New Upstream Release - golang-github-klauspost-reedsolomon

Ready changes

Summary

Merged new upstream version: 1.11.8 (was: 1.9.13).

Diff

diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 0000000..80a1488
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,72 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+  push:
+    branches: [ "master" ]
+  pull_request:
+    # The branches below must be a subset of the branches above
+    branches: [ "master" ]
+  schedule:
+    - cron: '36 20 * * 3'
+
+jobs:
+  analyze:
+    name: Analyze
+    runs-on: ubuntu-latest
+    permissions:
+      actions: read
+      contents: read
+      security-events: write
+
+    strategy:
+      fail-fast: false
+      matrix:
+        language: [ 'go' ]
+        # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+        # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
+
+    steps:
+    - name: Checkout repository
+      uses: actions/checkout@v3
+
+    # Initializes the CodeQL tools for scanning.
+    - name: Initialize CodeQL
+      uses: github/codeql-action/init@v2
+      with:
+        languages: ${{ matrix.language }}
+        # If you wish to specify custom queries, you can do so here or in a config file.
+        # By default, queries listed here will override any specified in a config file.
+        # Prefix the list here with "+" to use these queries and those in the config file.
+        
+        # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
+        # queries: security-extended,security-and-quality
+
+        
+    # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
+    # If this step fails, then you should remove it and run the build manually (see below)
+    - name: Autobuild
+      uses: github/codeql-action/autobuild@v2
+
+    # ℹ️ Command-line programs to run using the OS shell.
+    # πŸ“š See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+
+    #   If the Autobuild fails above, remove it and uncomment the following three lines. 
+    #   modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
+
+    # - run: |
+    #   echo "Run, Build Application using script"
+    #   ./location_of_script_within_repo/buildscript.sh
+
+    - name: Perform CodeQL Analysis
+      uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
new file mode 100644
index 0000000..5dee2e5
--- /dev/null
+++ b/.github/workflows/go.yml
@@ -0,0 +1,131 @@
+name: Go
+
+on:
+  push:
+    branches: [ master ]
+  pull_request:
+    branches: [ master ]
+
+
+jobs:
+  build:
+    strategy:
+      matrix:
+        go-version: [1.18.x, 1.19.x, 1.20.x]
+        os: [ubuntu-latest, macos-latest, windows-latest]
+    env:
+      CGO_ENABLED: 0
+    runs-on: ${{ matrix.os }}
+    steps:
+    - name: Set up Go
+      uses: actions/setup-go@v2
+      with:
+        go-version: ${{ matrix.go-version }}
+
+    - name: CPU support
+      run: go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest&&cpuid
+
+    - name: Checkout code
+      uses: actions/checkout@v2
+
+    - name: Vet
+      run: go vet ./...
+
+    - name: Test
+      run: go test ./...
+
+    - name: Test Noasm
+      run: go test -tags=noasm -short&&go test -short -no-avx512&&go test -short -no-avx512 -no-avx2&&go test -no-avx512 -no-avx2 -no-ssse3 -short
+
+    - name: Test Race
+      env:
+        CGO_ENABLED: 1
+      run: go test -cpu="1,4" -short -race -v -timeout 20m .
+
+  build-special:
+    env:
+      CGO_ENABLED: 0
+    runs-on: ubuntu-latest
+    steps:
+    - name: Set up Go
+      uses: actions/setup-go@v2
+      with:
+        go-version: 1.19.x
+
+    - name: Checkout code
+      uses: actions/checkout@v2
+
+    - name: fmt
+      run: diff <(gofmt -d .) <(printf "")
+
+    - name: Test 386
+      run: GOOS=linux GOARCH=386 go test -short ./...
+
+    - name: Build examples
+      run: go build examples/simple-decoder.go&&go build examples/simple-encoder.go&&go build examples/stream-decoder.go&&go build examples/stream-encoder.go
+
+    - name: Test Races, noasm, 1 cpu
+      env:
+        CGO_ENABLED: 1
+      run: go test -tags=noasm -cpu=1 -short -race -timeout 20m .
+
+    - name: Test Races, noasm, 4 cpu
+      env:
+        CGO_ENABLED: 1
+      run: go test -tags=noasm -cpu=4 -short -race -timeout 20m .
+
+    - name: Test Races, no gfni
+      env:
+        CGO_ENABLED: 1
+      run: go test -no-gfni -short -race
+
+    - name: Test Races, no avx512
+      env:
+        CGO_ENABLED: 1
+      run: go test -no-avx512 -short -race .
+
+    - name: Test Races, no avx2
+      env:
+        CGO_ENABLED: 1
+      run: go test -no-avx512 -no-avx2 -short -race .
+
+    - name: Test Races, no ssse3
+      env:
+        CGO_ENABLED: 1
+      run: go test -no-avx512 -no-avx2 -no-ssse3 -short -race .
+
+    - name: Test Microarch v4
+      shell: bash {0}
+      run: go run testlevel.go 4;if [ $? -eq 0 ]; then GOAMD64=v4 go test -no-avx512 ./...; else true; fi
+
+    - name: Builds on ARM64
+      env:
+        GOOS: linux
+        GOARCH: arm64
+      run: go build .&&go build examples/simple-decoder.go&&go build examples/simple-encoder.go&&go build examples/stream-decoder.go&&go build examples/stream-encoder.go
+
+    - name: Build on PPC64LE
+      env:
+        GOOS: linux
+        GOARCH: ppc64le
+      run: go build .&&go build examples/simple-decoder.go&&go build examples/simple-encoder.go&&go build examples/stream-decoder.go&&go build examples/stream-encoder.go
+
+  generate:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Set up Go
+        uses: actions/setup-go@v2
+        with:
+          go-version: 1.19.x
+
+      - name: Checkout code
+        uses: actions/checkout@v2
+
+      - name: Generate
+        working-directory: ./_gen
+        run: go generate -v -x
+
+      - name: Git Status
+        run: |
+          git diff
+          test -z "$(git status --porcelain)"
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index fdd619c..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,65 +0,0 @@
-language: go
-
-os:
-  - linux
-  - osx
-  - windows
-
-arch:
-  - amd64
-  - arm64
-  - ppc64le
-  - s390x
-
-go:
-  - 1.14.x
-  - 1.15.x
-  - 1.16.x
-  - master
-
-env:
-  - GO111MODULE=off CGO_ENABLED=0
-
-install:
- - go get ./...
-
-script:
- - go vet ./...
- - go test -cpu=1,2 .
- - go test -tags=noasm -cpu=1,2 .
- - go build examples/simple-decoder.go
- - go build examples/simple-encoder.go
- - go build examples/stream-decoder.go
- - go build examples/stream-encoder.go
-
-jobs:
-  allow_failures:
-    - go: 'master'
-    - arch: s390x
-  fast_finish: true
-  include:
-    - stage: other
-      go: 1.16.x
-      os: linux
-      arch: amd64
-      script:
-        - diff <(gofmt -d .) <(printf "")
-        - diff <(gofmt -d ./examples) <(printf "")
-        - go get github.com/klauspost/asmfmt&&go install github.com/klauspost/asmfmt/cmd/asmfmt
-        - diff <(asmfmt -d .) <(printf "")
-        - CGO_ENABLED=1 go test -cpu=1 -short -race .
-        - CGO_ENABLED=1 go test -cpu=2 -short -race .
-        - CGO_ENABLED=1 go test -tags=noasm -cpu=1 -short -race .
-        - CGO_ENABLED=1 go test -tags=noasm -cpu=4 -short -race .
-        - CGO_ENABLED=1 go test -no-avx512 -short -race .
-        - CGO_ENABLED=1 go test -no-avx512 -no-avx2 -short -race .
-        - CGO_ENABLED=1 go test -no-avx512 -no-avx2 -no-ssse3 -short -race .
-        - GOOS=linux GOARCH=386 go test -short .
-    - stage: other
-      go: 1.15.x
-      os: linux
-      arch: amd64
-      script:
-        - go test -no-avx512
-        - go test -no-avx512 -no-avx2
-        - go test -no-avx512 -no-avx2 -no-ssse3
diff --git a/README.md b/README.md
index ff50f43..e9c148f 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,5 @@
 # Reed-Solomon
-[![GoDoc][1]][2] [![Build Status][3]][4]
-
-[1]: https://godoc.org/github.com/klauspost/reedsolomon?status.svg
-[2]: https://pkg.go.dev/github.com/klauspost/reedsolomon?tab=doc
-[3]: https://travis-ci.org/klauspost/reedsolomon.svg?branch=master
-[4]: https://travis-ci.org/klauspost/reedsolomon
+[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/reedsolomon.svg)](https://pkg.go.dev/github.com/klauspost/reedsolomon) [![Go](https://github.com/klauspost/reedsolomon/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/reedsolomon/actions/workflows/go.yml)
 
 Reed-Solomon Erasure Coding in Go, with speeds exceeding 1GB/s/cpu core implemented in pure Go.
 
@@ -13,9 +8,12 @@ This is a Go port of the [JavaReedSolomon](https://github.com/Backblaze/JavaReed
 
 For an introduction on erasure coding, see the post on the [Backblaze blog](https://www.backblaze.com/blog/reed-solomon/).
 
+For encoding high shard counts (>256) a Leopard implementation is used.
+For most platforms this performs close to the original Leopard implementation in terms of speed. 
+
 Package home: https://github.com/klauspost/reedsolomon
 
-Godoc: https://pkg.go.dev/github.com/klauspost/reedsolomon?tab=doc
+Godoc: https://pkg.go.dev/github.com/klauspost/reedsolomon
 
 # Installation
 To get the package use the standard:
@@ -23,10 +21,30 @@ To get the package use the standard:
 go get -u github.com/klauspost/reedsolomon
 ```
 
-Using Go modules recommended.
+Using Go modules is recommended.
 
 # Changes
 
+## 2022
+
+* [GFNI](https://github.com/klauspost/reedsolomon/pull/224) support for amd64, for up to 3x faster processing.
+* [Leopard GF8](https://github.com/klauspost/reedsolomon#leopard-gf8) mode added, for faster processing of medium shard counts.
+* [Leopard GF16](https://github.com/klauspost/reedsolomon#leopard-compatible-gf16) mode added, for up to 65536 shards. 
+* [WithJerasureMatrix](https://pkg.go.dev/github.com/klauspost/reedsolomon?tab=doc#WithJerasureMatrix) allows constructing a [Jerasure](https://github.com/tsuraan/Jerasure) compatible matrix.
+
+## 2021
+
+* Use `GOAMD64=v4` to enable faster AVX2.
+* Add progressive shard encoding.
+* Wider AVX2 loops
+* Limit concurrency on AVX2, since we are likely memory bound.
+* Allow 0 parity shards.
+* Allow disabling inversion cache.
+* Faster AVX2 encoding.
+
+<details>
+	<summary>See older changes</summary>
+
 ## May 2020
 
 * ARM64 optimizations, up to 2.5x faster.
@@ -89,6 +107,8 @@ The [`StreamEncoder`](https://godoc.org/github.com/klauspost/reedsolomon#StreamE
 handles this without modifying the interface. 
 This is a good lesson on why returning interfaces is not a good design.
 
+</details>
+
 # Usage
 
 This section assumes you know the basics of Reed-Solomon encoding. 
@@ -98,23 +118,19 @@ This package performs the calculation of the parity sets. The usage is therefore
 
 First of all, you need to choose your distribution of data and parity shards. 
 A 'good' distribution is very subjective, and will depend a lot on your usage scenario. 
-A good starting point is above 5 and below 257 data shards (the maximum supported number), 
-and the number of parity shards to be 2 or above, and below the number of data shards.
 
 To create an encoder with 10 data shards (where your data goes) and 3 parity shards (calculated):
 ```Go
     enc, err := reedsolomon.New(10, 3)
 ```
 This encoder will work for all parity sets with this distribution of data and parity shards. 
-The error will only be set if you specify 0 or negative values in any of the parameters, 
-or if you specify more than 256 data shards.
 
 If you will primarily be using it with one shard size it is recommended to use 
 [`WithAutoGoroutines(shardSize)`](https://pkg.go.dev/github.com/klauspost/reedsolomon?tab=doc#WithAutoGoroutines)
 as an additional parameter. This will attempt to calculate the optimal number of goroutines to use for the best speed.
 It is not required that all shards are this size. 
 
-The you send and receive data  is a simple slice of byte slices; `[][]byte`. 
+Then you send and receive data that is a simple slice of byte slices; `[][]byte`. 
 In the example above, the top slice must have a length of 13.
 
 ```Go
@@ -130,8 +146,10 @@ but you could for instance also use [mmap](https://github.com/edsrzf/mmap-go) to
       data[i] := make([]byte, 50000)
     }
     
+    // The above allocations can also be done by the encoder:
+    // data := enc.(reedsolomon.Extended).AllocAligned(50000)
     
-  // Fill some data into the data shards
+    // Fill some data into the data shards
     for i, in := range data[:10] {
       for j:= range in {
          in[j] = byte((i+j)&0xff)
@@ -180,6 +198,17 @@ If you are only interested in the data shards (for reading purposes) you can cal
     err := enc.ReconstructData(data)
 ```
 
+If you don't need all data shards you can use `ReconstructSome()`:
+
+```Go
+    // Delete two data shards
+    data[3] = nil
+    data[7] = nil
+    
+    // Reconstruct just the shard 3
+    err := enc.ReconstructSome(data, []bool{false, false, false, true, false, false, false, false})
+```
+
 So to sum up reconstruction:
 * The number of data/parity shards must match the numbers used for encoding.
 * The order of shards must be the same as used when encoding.
@@ -211,6 +240,72 @@ To join a data set, use the `Join()` function, which will join the shards and wr
    err = enc.Join(io.Discard, data, len(bigfile))
 ```
 
+## Aligned Allocations
+
+For AMD64 aligned inputs can make a big speed difference.
+
+This is an example of the speed difference when inputs are unaligned/aligned:
+
+```
+BenchmarkEncode100x20x10000-32    	    7058	    172648 ns/op	6950.57 MB/s
+BenchmarkEncode100x20x10000-32    	    8406	    137911 ns/op	8701.24 MB/s
+```
+
+This is mostly the case when dealing with odd-sized shards. 
+
+To facilitate this the package provides an `AllocAligned(shards, each int) [][]byte`. 
+This will allocate a number of shards, each with the size `each`.
+Each shard will then be aligned to a 64 byte boundary.
+
+Each encoder also has a `AllocAligned(each int) [][]byte` as an extended interface which will return the same, 
+but with the shard count configured in the encoder.   
+
+It is not possible to re-aligned already allocated slices, for example when using `Split`.
+When it is not possible to write to aligned shards, you should not copy to them.
+
+# Progressive encoding
+
+It is possible to encode individual shards using EncodeIdx:
+
+```Go
+	// EncodeIdx will add parity for a single data shard.
+	// Parity shards should start out as 0. The caller must zero them.
+	// Data shards must be delivered exactly once. There is no check for this.
+	// The parity shards will always be updated and the data shards will remain the same.
+	EncodeIdx(dataShard []byte, idx int, parity [][]byte) error
+```
+
+This allows progressively encoding the parity by sending individual data shards.
+There is no requirement on shards being delivered in order, 
+but when sent in order it allows encoding shards one at the time,
+effectively allowing the operation to be streaming. 
+
+The result will be the same as encoding all shards at once.
+There is a minor speed penalty using this method, so send 
+shards at once if they are available.
+
+## Example
+
+```Go
+func test() {
+    // Create an encoder with 7 data and 3 parity slices.
+    enc, _ := reedsolomon.New(7, 3)
+
+    // This will be our output parity.
+    parity := make([][]byte, 3)
+    for i := range parity {
+        parity[i] = make([]byte, 10000)
+    }
+
+    for i := 0; i < 7; i++ {
+        // Send data shards one at the time.
+        _ = enc.EncodeIdx(make([]byte, 10000), i, parity)
+    }
+
+    // parity now contains parity, as if all data was sent in one call.
+}
+```
+
 # Streaming/Merging
 
 It might seem like a limitation that all data should be in memory, 
@@ -283,6 +378,8 @@ There is no buffering or timeouts/retry specified. If you want to add that, you
 For complete examples of a streaming encoder and decoder see the 
 [examples folder](https://github.com/klauspost/reedsolomon/tree/master/examples).
 
+GF16 (more than 256 shards) is not supported by the streaming interface. 
+
 # Advanced Options
 
 You can modify internal options which affects how jobs are split between and processed by goroutines.
@@ -296,8 +393,88 @@ Example of how to supply options:
      enc, err := reedsolomon.New(10, 3, WithMaxGoroutines(25))
  ```
 
+# Leopard Compatible GF16
+
+When you encode more than 256 shards the library will switch to a [Leopard-RS](https://github.com/catid/leopard) implementation.
+
+This allows encoding up to 65536 shards (data+parity) with the following limitations, similar to leopard:
+
+* The original and recovery data must not exceed 65536 pieces.
+* The shard size *must*  each be a multiple of 64 bytes.
+* Each buffer should have the same number of bytes.
+* Even the last shard must be rounded up to the block size.
+
+|                 | Regular | Leopard |
+|-----------------|---------|---------|
+| Encode          | βœ“       | βœ“       |
+| EncodeIdx       | βœ“       | -       |
+| Verify          | βœ“       | βœ“       |
+| Reconstruct     | βœ“       | βœ“       |
+| ReconstructData | βœ“       | βœ“       |
+| ReconstructSome | βœ“       | βœ“ (+)   |
+| Update          | βœ“       | -       |
+| Split           | βœ“       | βœ“       |
+| Join            | βœ“       | βœ“       |
+
+* (+) Same as calling `ReconstructData`.
+
+The Split/Join functions will help to split an input to the proper sizes.
+
+Speed can be expected to be `O(N*log(N))`, compared to the `O(N*N)`. 
+Reconstruction matrix calculation is more time-consuming, 
+so be sure to include that as part of any benchmark you run.  
+
+For now SSSE3, AVX2 and AVX512 assembly are available on AMD64 platforms.
+
+Leopard mode currently always runs as a single goroutine, since multiple 
+goroutines doesn't provide any worthwhile speedup.
+
+## Leopard GF8
+
+It is possible to replace the default reed-solomon encoder with a leopard compatible one.
+This will typically be faster when dealing with more than 20-30 shards.
+Note that the limitations listed above also applies to this mode. 
+See table below for speed with different number of shards.
+
+To enable Leopard GF8 mode use `WithLeopardGF(true)`.
+
+Benchmark Encoding and Reconstructing *1KB* shards with variable number of shards.
+All implementation use inversion cache when available.
+Speed is total shard size for each operation. Data shard throughput is speed/2.
+AVX2 is used.
+
+| Encoder      | Shards      | Encode         | Recover All  | Recover One    |
+|--------------|-------------|----------------|--------------|----------------|
+| Cauchy       | 4+4         | 23076.83 MB/s  | 5444.02 MB/s | 10834.67 MB/s  |
+| Cauchy       | 8+8         | 15206.87 MB/s  | 4223.42 MB/s | 16181.62  MB/s |
+| Cauchy       | 16+16       | 7427.47 MB/s   | 3305.84 MB/s | 22480.41  MB/s |
+| Cauchy       | 32+32       | 3785.64 MB/s   | 2300.07 MB/s | 26181.31  MB/s |
+| Cauchy       | 64+64       | 1911.93 MB/s   | 1368.51 MB/s | 27992.93 MB/s  |
+| Cauchy       | 128+128     | 963.83 MB/s    | 1327.56 MB/s | 32866.86 MB/s  |
+| Leopard GF8  | 4+4         | 17061.28 MB/s  | 3099.06 MB/s | 4096.78 MB/s   |
+| Leopard GF8  | 8+8         | 10546.67 MB/s  | 2925.92 MB/s | 3964.00 MB/s   |
+| Leopard GF8  | 16+16       | 10961.37  MB/s | 2328.40 MB/s | 3110.22 MB/s   |
+| Leopard GF8  | 32+32       | 7111.47 MB/s   | 2374.61 MB/s | 3220.75 MB/s   |
+| Leopard GF8  | 64+64       | 7468.57 MB/s   | 2055.41 MB/s | 3061.81 MB/s   |
+| Leopard GF8  | 128+128     | 5479.99 MB/s   | 1953.21 MB/s | 2815.15 MB/s   |
+| Leopard GF16 | 256+256     | 6158.66 MB/s   | 454.14 MB/s  | 506.70 MB/s    |
+| Leopard GF16 | 512+512     | 4418.58 MB/s   | 685.75 MB/s  | 801.63 MB/s    |
+| Leopard GF16 | 1024+1024   | 4778.05 MB/s   | 814.51 MB/s  | 1080.19 MB/s   |
+| Leopard GF16 | 2048+2048   | 3417.05 MB/s   | 911.64 MB/s  | 1179.48 MB/s   |
+| Leopard GF16 | 4096+4096   | 3209.41 MB/s   | 729.13 MB/s  | 1135.06 MB/s   |
+| Leopard GF16 | 8192+8192   | 2034.11 MB/s   | 604.52 MB/s  | 842.13 MB/s    |
+| Leopard GF16 | 16384+16384 | 1525.88 MB/s   | 486.74 MB/s  | 750.01 MB/s    |
+| Leopard GF16 | 32768+32768 | 1138.67 MB/s   | 482.81 MB/s  | 712.73 MB/s    |
+
+"Traditional" encoding is faster until somewhere between 16 and 32 shards.
+Leopard provides fast encoding in all cases, but shows a significant overhead for reconstruction.
+
+Calculating the reconstruction matrix takes a significant amount of computation. 
+With bigger shards that will be smaller. Arguably, fewer shards typically also means bigger shards.
+Due to the high shard count caching reconstruction matrices generally isn't feasible for Leopard. 
 
 # Performance
+
 Performance depends mainly on the number of parity shards. 
 In rough terms, doubling the number of parity shards will double the encoding time.
 
@@ -306,27 +483,16 @@ For reference each shard is 1MB random data, and 16 CPU cores are used for encod
 
 | Data | Parity | Go MB/s | SSSE3 MB/s | AVX2 MB/s |
 |------|--------|---------|------------|-----------|
-| 5    | 2      | 14287   | 66355      | 108755    |
-| 8    | 8      | 5569    | 34298      | 70516     |
-| 10   | 4      | 6766    | 48237      | 93875     |
-| 50   | 20     | 1540    | 12130      | 22090     |
+| 5    | 2      | 20,772  | 66,355     | 108,755   |
+| 8    | 8      | 6,815   | 38,338     | 70,516    |
+| 10   | 4      | 9,245   | 48,237     | 93,875    |
+| 50   | 20     | 2,063   | 12,130     | 22,828    |
 
 The throughput numbers here is the size of the encoded data and parity shards.
 
 If `runtime.GOMAXPROCS()` is set to a value higher than 1, 
 the encoder will use multiple goroutines to perform the calculations in `Verify`, `Encode` and `Reconstruct`.
 
-Example of performance scaling on AMD Ryzen 3950X - 16 physical cores, 32 logical cores, AVX 2.
-The example uses 10 blocks with 1MB data each and 4 parity blocks.
-
-| Threads | Speed      |
-|---------|------------|
-| 1       | 9979 MB/s  |
-| 2       | 18870 MB/s |
-| 4       | 33697 MB/s |
-| 8       | 51531 MB/s |
-| 16      | 59204 MB/s |
-
 
 Benchmarking `Reconstruct()` followed by a `Verify()` (=`all`) versus just calling `ReconstructData()` (=`data`) gives the following result:
 ```
@@ -340,22 +506,10 @@ BenchmarkReconstruct50x20x1M-8       1364.35      4189.79      3.07x
 BenchmarkReconstruct10x4x16M-8       1484.35      5779.53      3.89x
 ```
 
-# Performance on AVX512
+The package will use [GFNI](https://en.wikipedia.org/wiki/AVX-512#GFNI) instructions combined with AVX512 when these are available.
+This further improves speed by up to 3x over AVX2 code paths.
 
-The performance on AVX512 has been accelerated for Intel CPUs. 
-This gives speedups on a per-core basis typically up to 2x compared to 
-AVX2 as can be seen in the following table:
-
-```
-[...]
-```
-
-This speedup has been achieved by computing multiple parity blocks in parallel as opposed to one after the other. 
-In doing so it is possible to minimize the memory bandwidth required for loading all data shards. 
-At the same time the calculations are performed in the 512-bit wide ZMM registers and the surplus of ZMM 
-registers (32 in total) is used to keep more data around (most notably the matrix coefficients).
-
-# Performance on ARM64 NEON
+## ARM64 NEON
 
 By exploiting NEON instructions the performance for ARM has been accelerated. 
 Below are the performance numbers for a single core on an EC2 m6g.16xlarge (Graviton2) instance (Amazon Linux 2):
@@ -370,7 +524,7 @@ BenchmarkGaloisXor1M-64        10000    100322 ns/op        10452.13 MB/s
 # Performance on ppc64le
 
 The performance for ppc64le has been accelerated. 
-This gives roughly a 10x performance improvement on this architecture as can been seen below:
+This gives roughly a 10x performance improvement on this architecture as can be seen below:
 
 ```
 benchmark                      old MB/s     new MB/s     speedup
@@ -380,9 +534,6 @@ BenchmarkGaloisXor128K-160     862.02       7905.00      9.17x
 BenchmarkGaloisXor1M-160       784.60       6296.65      8.03x
 ```
 
-# asm2plan9s
-
-[asm2plan9s](https://github.com/fwessels/asm2plan9s) is used for assembling the AVX2 instructions into their BYTE/WORD/LONG equivalents.
 
 # Links
 * [Backblaze Open Sources Reed-Solomon Erasure Coding Source Code](https://www.backblaze.com/blog/reed-solomon/).
@@ -393,6 +544,7 @@ BenchmarkGaloisXor1M-160       784.60       6296.65      8.03x
 * [reed-solomon-erasure](https://github.com/darrenldl/reed-solomon-erasure). Compatible Rust implementation.
 * [go-erasure](https://github.com/somethingnew2-0/go-erasure). A similar library using cgo, slower in my tests.
 * [Screaming Fast Galois Field Arithmetic](http://www.snia.org/sites/default/files2/SDC2013/presentations/NewThinking/EthanMiller_Screaming_Fast_Galois_Field%20Arithmetic_SIMD%20Instructions.pdf). Basis for SSE3 optimizations.
+* [Leopard-RS](https://github.com/catid/leopard) C library used as basis for GF16 implementation.
 
 # License
 
diff --git a/_gen/cleanup.go b/_gen/cleanup.go
new file mode 100644
index 0000000..afdde01
--- /dev/null
+++ b/_gen/cleanup.go
@@ -0,0 +1,48 @@
+//go:build custom
+// +build custom
+
+// Copyright 2022+, Klaus Post. See LICENSE for details.
+
+package main
+
+import (
+	"bytes"
+	"flag"
+	"io/ioutil"
+	"log"
+	"os"
+
+	"github.com/klauspost/asmfmt"
+)
+
+func main() {
+	flag.Parse()
+	args := flag.Args()
+	for _, file := range args {
+		data, err := ioutil.ReadFile(file)
+		if err != nil {
+			log.Fatalln(err)
+		}
+		data = bytes.ReplaceAll(data, []byte("\t// #"), []byte("#"))
+		data = bytes.ReplaceAll(data, []byte("\t// @"), []byte(""))
+		data = bytes.ReplaceAll(data, []byte("VPTERNLOGQ"), []byte("XOR3WAY("))
+		split := bytes.Split(data, []byte("\n"))
+		// Add closing ')'
+		want := []byte("\tXOR3WAY(")
+		for i, b := range split {
+			if bytes.Contains(b, want) {
+				b = []byte(string(b) + ")")
+				split[i] = b
+			}
+		}
+		data = bytes.Join(split, []byte("\n"))
+		data, err = asmfmt.Format(bytes.NewBuffer(data))
+		if err != nil {
+			log.Fatalln(err)
+		}
+		err = ioutil.WriteFile(file, data, os.ModePerm)
+		if err != nil {
+			log.Fatalln(err)
+		}
+	}
+}
diff --git a/_gen/gen.go b/_gen/gen.go
index c544390..b2f2507 100644
--- a/_gen/gen.go
+++ b/_gen/gen.go
@@ -1,7 +1,12 @@
-//+build generate
+//go:build generate
+// +build generate
 
-//go:generate go run gen.go -out ../galois_gen_amd64.s -stubs ../galois_gen_amd64.go -pkg=reedsolomon
-//go:generate gofmt -w ../galois_gen_switch_amd64.go
+// Copyright 2022+, Klaus Post. See LICENSE for details.
+
+//go:generate go run -tags=generate . -out ../galois_gen_amd64.s -stubs ../galois_gen_amd64.go -pkg=reedsolomon
+//go:generate go fmt ../galois_gen_switch_amd64.go
+//go:generate go fmt ../galois_gen_amd64.go
+//go:generate go run cleanup.go ../galois_gen_amd64.s
 
 package main
 
@@ -24,6 +29,9 @@ const outputMax = 10
 var switchDefs [inputMax][outputMax]string
 var switchDefsX [inputMax][outputMax]string
 
+var switchDefs512 [inputMax][outputMax]string
+var switchDefsX512 [inputMax][outputMax]string
+
 // Prefetch offsets, set to 0 to disable.
 // Disabled since they appear to be consistently slower.
 const prefetchSrc = 0
@@ -35,14 +43,28 @@ func main() {
 	Constraint(buildtags.Not("nogen").ToConstraint())
 	Constraint(buildtags.Term("gc").ToConstraint())
 
-	const perLoopBits = 5
+	TEXT("_dummy_", 0, "func()")
+	Comment("#ifdef GOAMD64_v4")
+	Comment("#define XOR3WAY(ignore, a, b, dst)\\")
+	Comment("@\tVPTERNLOGD $0x96, a, b, dst")
+	Comment("#else")
+	Comment("#define XOR3WAY(ignore, a, b, dst)\\")
+	Comment("@\tVPXOR a, dst, dst\\")
+	Comment("@\tVPXOR b, dst, dst")
+	Comment("#endif")
+	RET()
+
+	const perLoopBits = 6
 	const perLoop = 1 << perLoopBits
 
 	for i := 1; i <= inputMax; i++ {
 		for j := 1; j <= outputMax; j++ {
-			//genMulAvx2(fmt.Sprintf("mulAvxTwoXor_%dx%d", i, j), i, j, true)
 			genMulAvx2(fmt.Sprintf("mulAvxTwo_%dx%d", i, j), i, j, false)
 			genMulAvx2Sixty64(fmt.Sprintf("mulAvxTwo_%dx%d_64", i, j), i, j, false)
+			genMulAvx512GFNI(fmt.Sprintf("mulGFNI_%dx%d_64", i, j), i, j, false)
+			genMulAvx512GFNI(fmt.Sprintf("mulGFNI_%dx%d_64Xor", i, j), i, j, true)
+			genMulAvx2(fmt.Sprintf("mulAvxTwo_%dx%dXor", i, j), i, j, true)
+			genMulAvx2Sixty64(fmt.Sprintf("mulAvxTwo_%dx%d_64Xor", i, j), i, j, true)
 		}
 	}
 	f, err := os.Create("../galois_gen_switch_amd64.go")
@@ -61,19 +83,26 @@ func main() {
 
 package reedsolomon
 
-import "fmt"
+import (
+	"fmt"
+)
 
 `)
 
-	w.WriteString("const avx2CodeGen = true\n")
-	w.WriteString(fmt.Sprintf("const maxAvx2Inputs = %d\nconst maxAvx2Outputs = %d\n", inputMax, outputMax))
+	w.WriteString(fmt.Sprintf(`const (
+avx2CodeGen = true
+maxAvx2Inputs = %d
+maxAvx2Outputs = %d
+minAvx2Size = %d
+avxSizeMask = maxInt - (minAvx2Size-1)
+)`, inputMax, outputMax, perLoop))
 	w.WriteString(`
 
 func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
-	n := stop-start
+	n := (stop-start) & avxSizeMask
+
 `)
 
-	w.WriteString(fmt.Sprintf("n = (n>>%d)<<%d\n\n", perLoopBits, perLoopBits))
 	w.WriteString(`switch len(in) {
 `)
 	for in, defs := range switchDefs[:] {
@@ -87,10 +116,79 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	w.WriteString(`}
 	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
 }
+
+func galMulSlicesAvx2Xor(matrix []byte, in, out [][]byte, start, stop int) int {
+	n := (stop-start) & avxSizeMask
+
+`)
+
+	w.WriteString(`switch len(in) {
+`)
+	for in, defs := range switchDefsX[:] {
+		w.WriteString(fmt.Sprintf("		case %d:\n			switch len(out) {\n", in+1))
+		for out, def := range defs[:] {
+			w.WriteString(fmt.Sprintf("				case %d:\n", out+1))
+			w.WriteString(def)
+		}
+		w.WriteString("}\n")
+	}
+	w.WriteString(`}
+	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
+}
+`)
+
+	w.WriteString(`
+
+func galMulSlicesGFNI(matrix []uint64, in, out [][]byte, start, stop int) int {
+	n := (stop-start) & avxSizeMask
+
 `)
+
+	w.WriteString(`switch len(in) {
+`)
+	for in, defs := range switchDefs512[:] {
+		w.WriteString(fmt.Sprintf("		case %d:\n			switch len(out) {\n", in+1))
+		for out, def := range defs[:] {
+			w.WriteString(fmt.Sprintf("				case %d:\n", out+1))
+			w.WriteString(def)
+		}
+		w.WriteString("}\n")
+	}
+	w.WriteString(`}
+	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
+}
+
+func galMulSlicesGFNIXor(matrix []uint64, in, out [][]byte, start, stop int) int {
+	n := (stop-start) & avxSizeMask
+
+`)
+
+	w.WriteString(`switch len(in) {
+`)
+	for in, defs := range switchDefsX512[:] {
+		w.WriteString(fmt.Sprintf("		case %d:\n			switch len(out) {\n", in+1))
+		for out, def := range defs[:] {
+			w.WriteString(fmt.Sprintf("				case %d:\n", out+1))
+			w.WriteString(def)
+		}
+		w.WriteString("}\n")
+	}
+	w.WriteString(`}
+	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
+}
+`)
+
+	genGF16()
+	genGF8()
 	Generate()
 }
 
+// VPXOR3way will 3-way xor a and b and dst.
+func VPXOR3way(a, b, dst reg.VecVirtual) {
+	// VPTERNLOGQ is replaced by XOR3WAY - we just use an equivalent operation
+	VPTERNLOGQ(U8(0), a, b, dst)
+}
+
 func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 	const perLoopBits = 5
 	const perLoop = 1 << perLoopBits
@@ -128,12 +226,21 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 		}
 	}
 
+	x := ""
+	if xor {
+		x = "Xor"
+	}
+
 	TEXT(name, attr.NOSPLIT, fmt.Sprintf("func(matrix []byte, in [][]byte, out [][]byte, start, n int)"))
 
 	// SWITCH DEFINITION:
-	s := fmt.Sprintf("			mulAvxTwo_%dx%d(matrix, in, out, start, n)\n", inputs, outputs)
+	s := fmt.Sprintf("			mulAvxTwo_%dx%d%s(matrix, in, out, start, n)\n", inputs, outputs, x)
 	s += fmt.Sprintf("\t\t\t\treturn n\n")
-	switchDefs[inputs-1][outputs-1] = s
+	if xor {
+		switchDefsX[inputs-1][outputs-1] = s
+	} else {
+		switchDefs[inputs-1][outputs-1] = s
+	}
 
 	if loadNone {
 		Comment("Loading no tables to registers")
@@ -196,7 +303,6 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 	if err != nil {
 		panic(err)
 	}
-	outBase := addr.Addr
 	outSlicePtr := GP64()
 	MOVQ(addr.Addr, outSlicePtr)
 	for i := range dst {
@@ -240,13 +346,13 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 		SHRQ(U8(perLoopBits), length)
 	}
 	Label(name + "_loop")
-	if xor {
+
+	// Load data before loop or during first iteration?
+	// No clear winner.
+	preloadInput := xor && false
+	if preloadInput {
 		Commentf("Load %d outputs", outputs)
-	} else {
-		Commentf("Clear %d outputs", outputs)
-	}
-	for i := range dst {
-		if xor {
+		for i := range dst {
 			if regDst {
 				VMOVDQU(Mem{Base: dstPtr[i]}, dst[i])
 				if prefetchDst > 0 {
@@ -255,13 +361,11 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 				continue
 			}
 			ptr := GP64()
-			MOVQ(outBase, ptr)
+			MOVQ(Mem{Base: outSlicePtr, Disp: i * 24}, ptr)
 			VMOVDQU(Mem{Base: ptr, Index: offset, Scale: 1}, dst[i])
 			if prefetchDst > 0 {
 				PREFETCHT0(Mem{Base: ptr, Disp: prefetchDst, Index: offset, Scale: 1})
 			}
-		} else {
-			VPXOR(dst[i], dst[i], dst[i])
 		}
 	}
 
@@ -278,6 +382,22 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 		VPAND(lowMask, inLow, inLow)
 		VPAND(lowMask, inHigh, inHigh)
 		for j := range dst {
+			//Commentf(" xor:%v i: %v", xor, i)
+			if !preloadInput && xor && i == 0 {
+				if regDst {
+					VMOVDQU(Mem{Base: dstPtr[j]}, dst[j])
+					if prefetchDst > 0 {
+						PREFETCHT0(Mem{Base: dstPtr[j], Disp: prefetchDst})
+					}
+				} else {
+					ptr := GP64()
+					MOVQ(Mem{Base: outSlicePtr, Disp: j * 24}, ptr)
+					VMOVDQU(Mem{Base: ptr, Index: offset, Scale: 1}, dst[j])
+					if prefetchDst > 0 {
+						PREFETCHT0(Mem{Base: ptr, Disp: prefetchDst, Index: offset, Scale: 1})
+					}
+				}
+			}
 			if loadNone {
 				VMOVDQU(Mem{Base: matrixBase, Disp: 64 * (i*outputs + j)}, lookLow)
 				VMOVDQU(Mem{Base: matrixBase, Disp: 32 + 64*(i*outputs+j)}, lookHigh)
@@ -287,8 +407,12 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 				VPSHUFB(inLow, inLo[i*outputs+j], lookLow)
 				VPSHUFB(inHigh, inHi[i*outputs+j], lookHigh)
 			}
-			VPXOR(lookLow, lookHigh, lookLow)
-			VPXOR(lookLow, dst[j], dst[j])
+			if i == 0 && !xor {
+				// We don't have any existing data, write directly.
+				VPXOR(lookLow, lookHigh, dst[j])
+			} else {
+				VPXOR3way(lookLow, lookHigh, dst[j])
+			}
 		}
 	}
 	Commentf("Store %d outputs", outputs)
@@ -339,35 +463,42 @@ func genMulAvx2Sixty64(name string, inputs int, outputs int, xor bool) {
 	// Load shuffle masks on every use.
 	var loadNone bool
 	// Use registers for destination registers.
-	var regDst = false
+	var regDst = true
 	var reloadLength = false
 
 	// lo, hi, 1 in, 1 out, 2 tmp, 1 mask
-	est := total*2 + outputs + 5
+	est := total*4 + outputs + 7
 	if outputs == 1 {
 		// We don't need to keep a copy of the input if only 1 output.
 		est -= 2
 	}
 
-	if true || est > 16 {
+	if est > 16 {
 		loadNone = true
 		// We run out of GP registers first, now.
 		if inputs+outputs > 13 {
 			regDst = false
 		}
 		// Save one register by reloading length.
-		if true || inputs+outputs > 12 && regDst {
+		if inputs+outputs > 12 && regDst {
 			reloadLength = true
 		}
 	}
 
 	TEXT(name, 0, fmt.Sprintf("func(matrix []byte, in [][]byte, out [][]byte, start, n int)"))
-
+	x := ""
+	if xor {
+		x = "Xor"
+	}
 	// SWITCH DEFINITION:
-	s := fmt.Sprintf("n = (n>>%d)<<%d\n", perLoopBits, perLoopBits)
-	s += fmt.Sprintf("			mulAvxTwo_%dx%d_64(matrix, in, out, start, n)\n", inputs, outputs)
+	//s := fmt.Sprintf("n = (n>>%d)<<%d\n", perLoopBits, perLoopBits)
+	s := fmt.Sprintf("			mulAvxTwo_%dx%d_64%s(matrix, in, out, start, n)\n", inputs, outputs, x)
 	s += fmt.Sprintf("\t\t\t\treturn n\n")
-	switchDefs[inputs-1][outputs-1] = s
+	if xor {
+		switchDefsX[inputs-1][outputs-1] = s
+	} else {
+		switchDefs[inputs-1][outputs-1] = s
+	}
 
 	if loadNone {
 		Comment("Loading no tables to registers")
@@ -473,33 +604,31 @@ func genMulAvx2Sixty64(name string, inputs int, outputs int, xor bool) {
 	VPBROADCASTB(lowMask.AsX(), lowMask)
 
 	if reloadLength {
+		Commentf("Reload length to save a register")
 		length = Load(Param("n"), GP64())
 		SHRQ(U8(perLoopBits), length)
 	}
 	Label(name + "_loop")
+
 	if xor {
 		Commentf("Load %d outputs", outputs)
-	} else {
-		Commentf("Clear %d outputs", outputs)
-	}
-	for i := range dst {
-		if xor {
+		for i := range dst {
 			if regDst {
 				VMOVDQU(Mem{Base: dstPtr[i]}, dst[i])
+				VMOVDQU(Mem{Base: dstPtr[i], Disp: 32}, dst2[i])
 				if prefetchDst > 0 {
 					PREFETCHT0(Mem{Base: dstPtr[i], Disp: prefetchDst})
 				}
 				continue
 			}
 			ptr := GP64()
-			MOVQ(outBase, ptr)
+			MOVQ(Mem{Base: outSlicePtr, Disp: i * 24}, ptr)
 			VMOVDQU(Mem{Base: ptr, Index: offset, Scale: 1}, dst[i])
+			VMOVDQU(Mem{Base: ptr, Index: offset, Scale: 1, Disp: 32}, dst2[i])
+
 			if prefetchDst > 0 {
 				PREFETCHT0(Mem{Base: ptr, Disp: prefetchDst, Index: offset, Scale: 1})
 			}
-		} else {
-			VPXOR(dst[i], dst[i], dst[i])
-			VPXOR(dst2[i], dst2[i], dst2[i])
 		}
 	}
 
@@ -526,19 +655,23 @@ func genMulAvx2Sixty64(name string, inputs int, outputs int, xor bool) {
 				VMOVDQU(Mem{Base: matrixBase, Disp: 64 * (i*outputs + j)}, lookLow)
 				VMOVDQU(Mem{Base: matrixBase, Disp: 32 + 64*(i*outputs+j)}, lookHigh)
 				VPSHUFB(in2Low, lookLow, lookLow2)
-				VPSHUFB(inLow, lookLow, lookLow)
+				VPSHUFB(inLow, lookLow, lookLow) // Reuse lookLow to save a reg
 				VPSHUFB(in2High, lookHigh, lookHigh2)
-				VPSHUFB(inHigh, lookHigh, lookHigh)
+				VPSHUFB(inHigh, lookHigh, lookHigh) // Reuse lookHigh to save a reg
 			} else {
 				VPSHUFB(inLow, inLo[i*outputs+j], lookLow)
 				VPSHUFB(in2Low, inLo[i*outputs+j], lookLow2)
 				VPSHUFB(inHigh, inHi[i*outputs+j], lookHigh)
 				VPSHUFB(in2High, inHi[i*outputs+j], lookHigh2)
 			}
-			VPXOR(lookLow, lookHigh, lookLow)
-			VPXOR(lookLow2, lookHigh2, lookLow2)
-			VPXOR(lookLow, dst[j], dst[j])
-			VPXOR(lookLow2, dst2[j], dst2[j])
+			if i == 0 && !xor {
+				// We don't have any existing data, write directly.
+				VPXOR(lookLow, lookHigh, dst[j])
+				VPXOR(lookLow2, lookHigh2, dst2[j])
+			} else {
+				VPXOR3way(lookLow, lookHigh, dst[j])
+				VPXOR3way(lookLow2, lookHigh2, dst2[j])
+			}
 		}
 	}
 	Commentf("Store %d outputs", outputs)
@@ -571,3 +704,229 @@ func genMulAvx2Sixty64(name string, inputs int, outputs int, xor bool) {
 	Label(name + "_end")
 	RET()
 }
+
+func genMulAvx512GFNI(name string, inputs int, outputs int, xor bool) {
+	const perLoopBits = 6
+	const perLoop = 1 << perLoopBits
+
+	total := inputs * outputs
+
+	doc := []string{
+		fmt.Sprintf("%s takes %d inputs and produces %d outputs.", name, inputs, outputs),
+	}
+	if !xor {
+		doc = append(doc, "The output is initialized to 0.")
+	}
+
+	// Load shuffle masks on every use.
+	var loadNone bool
+	// Use registers for destination registers.
+	var regDst = true
+	var reloadLength = false
+
+	est := total + outputs + 2
+	// When we can't hold all, keep this many in registers.
+	inReg := 0
+	if est > 32 {
+		loadNone = true
+		inReg = 32 - outputs - 2
+		// We run out of GP registers first, now.
+		if inputs+outputs > 13 {
+			regDst = false
+		}
+		// Save one register by reloading length.
+		if inputs+outputs > 12 && regDst {
+			reloadLength = true
+		}
+	}
+
+	TEXT(name, 0, fmt.Sprintf("func(matrix []uint64, in [][]byte, out [][]byte, start, n int)"))
+	x := ""
+	if xor {
+		x = "Xor"
+	}
+	// SWITCH DEFINITION:
+	//s := fmt.Sprintf("n = (n>>%d)<<%d\n", perLoopBits, perLoopBits)
+	s := fmt.Sprintf("			mulGFNI_%dx%d_64%s(matrix, in, out, start, n)\n", inputs, outputs, x)
+	s += fmt.Sprintf("\t\t\t\treturn n\n")
+	if xor {
+		switchDefsX512[inputs-1][outputs-1] = s
+	} else {
+		switchDefs512[inputs-1][outputs-1] = s
+	}
+
+	if loadNone {
+		Commentf("Loading %d of %d tables to registers", inReg, inputs*outputs)
+	} else {
+		// loadNone == false
+		Comment("Loading all tables to registers")
+	}
+	if regDst {
+		Comment("Destination kept in GP registers")
+	} else {
+		Comment("Destination kept on stack")
+	}
+
+	Doc(doc...)
+	Pragma("noescape")
+	Commentf("Full registers estimated %d YMM used", est)
+
+	length := Load(Param("n"), GP64())
+	matrixBase := GP64()
+	addr, err := Param("matrix").Base().Resolve()
+	if err != nil {
+		panic(err)
+	}
+	MOVQ(addr.Addr, matrixBase)
+	SHRQ(U8(perLoopBits), length)
+	TESTQ(length, length)
+	JZ(LabelRef(name + "_end"))
+
+	matrix := make([]reg.VecVirtual, total)
+
+	for i := range matrix {
+		if loadNone && i >= inReg {
+			break
+		}
+		table := ZMM()
+		VBROADCASTF32X2(Mem{Base: matrixBase, Disp: i * 8}, table)
+		matrix[i] = table
+	}
+
+	inPtrs := make([]reg.GPVirtual, inputs)
+	inSlicePtr := GP64()
+	addr, err = Param("in").Base().Resolve()
+	if err != nil {
+		panic(err)
+	}
+	MOVQ(addr.Addr, inSlicePtr)
+	for i := range inPtrs {
+		ptr := GP64()
+		MOVQ(Mem{Base: inSlicePtr, Disp: i * 24}, ptr)
+		inPtrs[i] = ptr
+	}
+	// Destination
+	dst := make([]reg.VecVirtual, outputs)
+	dstPtr := make([]reg.GPVirtual, outputs)
+	addr, err = Param("out").Base().Resolve()
+	if err != nil {
+		panic(err)
+	}
+	outBase := addr.Addr
+	outSlicePtr := GP64()
+	MOVQ(addr.Addr, outSlicePtr)
+	MOVQ(outBase, outSlicePtr)
+	for i := range dst {
+		dst[i] = ZMM()
+		if !regDst {
+			continue
+		}
+		ptr := GP64()
+		MOVQ(Mem{Base: outSlicePtr, Disp: i * 24}, ptr)
+		dstPtr[i] = ptr
+	}
+
+	offset := GP64()
+	addr, err = Param("start").Resolve()
+	if err != nil {
+		panic(err)
+	}
+
+	MOVQ(addr.Addr, offset)
+	if regDst {
+		Comment("Add start offset to output")
+		for _, ptr := range dstPtr {
+			ADDQ(offset, ptr)
+		}
+	}
+
+	Comment("Add start offset to input")
+	for _, ptr := range inPtrs {
+		ADDQ(offset, ptr)
+	}
+	// Offset no longer needed unless not regdst
+
+	if reloadLength {
+		Commentf("Reload length to save a register")
+		length = Load(Param("n"), GP64())
+		SHRQ(U8(perLoopBits), length)
+	}
+	Label(name + "_loop")
+
+	if xor {
+		Commentf("Load %d outputs", outputs)
+		for i := range dst {
+			if regDst {
+				VMOVDQU64(Mem{Base: dstPtr[i]}, dst[i])
+				if prefetchDst > 0 {
+					PREFETCHT0(Mem{Base: dstPtr[i], Disp: prefetchDst})
+				}
+				continue
+			}
+			ptr := GP64()
+			MOVQ(Mem{Base: outSlicePtr, Disp: i * 24}, ptr)
+			VMOVDQU64(Mem{Base: ptr, Index: offset, Scale: 1}, dst[i])
+
+			if prefetchDst > 0 {
+				PREFETCHT0(Mem{Base: ptr, Disp: prefetchDst, Index: offset, Scale: 1})
+			}
+		}
+	}
+
+	in := ZMM()
+	look := ZMM()
+	for i := range inPtrs {
+		Commentf("Load and process 64 bytes from input %d to %d outputs", i, outputs)
+		VMOVDQU64(Mem{Base: inPtrs[i]}, in)
+		if prefetchSrc > 0 {
+			PREFETCHT0(Mem{Base: inPtrs[i], Disp: prefetchSrc})
+		}
+		ADDQ(U8(perLoop), inPtrs[i])
+
+		for j := range dst {
+			idx := i*outputs + j
+			if loadNone && idx >= inReg {
+				if i == 0 && !xor {
+					VGF2P8AFFINEQB_BCST(U8(0), Mem{Base: matrixBase, Disp: 8 * idx}, in, dst[j])
+				} else {
+					VGF2P8AFFINEQB_BCST(U8(0), Mem{Base: matrixBase, Disp: 8 * idx}, in, look)
+					VXORPD(dst[j], look, dst[j])
+				}
+			} else {
+				if i == 0 && !xor {
+					VGF2P8AFFINEQB(U8(0), matrix[i*outputs+j], in, dst[j])
+				} else {
+					VGF2P8AFFINEQB(U8(0), matrix[i*outputs+j], in, look)
+					VXORPD(dst[j], look, dst[j])
+				}
+			}
+		}
+	}
+	Commentf("Store %d outputs", outputs)
+	for i := range dst {
+		if regDst {
+			VMOVDQU64(dst[i], Mem{Base: dstPtr[i]})
+			if prefetchDst > 0 && !xor {
+				PREFETCHT0(Mem{Base: dstPtr[i], Disp: prefetchDst})
+			}
+			ADDQ(U8(perLoop), dstPtr[i])
+			continue
+		}
+		ptr := GP64()
+		MOVQ(Mem{Base: outSlicePtr, Disp: i * 24}, ptr)
+		VMOVDQU64(dst[i], Mem{Base: ptr, Index: offset, Scale: 1})
+		if prefetchDst > 0 && !xor {
+			PREFETCHT0(Mem{Base: ptr, Disp: prefetchDst, Index: offset, Scale: 1})
+		}
+	}
+	Comment("Prepare for next loop")
+	if !regDst {
+		ADDQ(U8(perLoop), offset)
+	}
+	DECQ(length)
+	JNZ(LabelRef(name + "_loop"))
+	VZEROUPPER()
+
+	Label(name + "_end")
+	RET()
+}
diff --git a/_gen/gf16.go b/_gen/gf16.go
new file mode 100644
index 0000000..1cbea53
--- /dev/null
+++ b/_gen/gf16.go
@@ -0,0 +1,854 @@
+//go:build generate
+// +build generate
+
+// Copyright 2022+, Klaus Post. See LICENSE for details.
+
+package main
+
+import (
+	"fmt"
+
+	"github.com/mmcloughlin/avo/attr"
+	. "github.com/mmcloughlin/avo/build"
+	. "github.com/mmcloughlin/avo/operand"
+	"github.com/mmcloughlin/avo/reg"
+)
+
+type table256 struct {
+	Lo, Hi               Op
+	loadLo128, loadHi128 *Mem
+	loadLo256, loadHi256 *Mem
+	useZmmLo, useZmmHi   *reg.VecPhysical
+}
+
+type table512 Op
+
+func (t *table256) prepare() {
+	t.prepareLo()
+	t.prepareHi()
+}
+
+func (t *table256) prepareHi() {
+	if t.loadHi128 != nil {
+		t.Hi = YMM()
+		// Load and expand tables
+		VBROADCASTI128(*t.loadHi128, t.Hi)
+	}
+	if t.loadHi256 != nil {
+		t.Hi = YMM()
+		// Load and expand tables
+		VMOVDQU(*t.loadHi256, t.Hi)
+	}
+	if t.useZmmHi != nil {
+		r := *t.useZmmHi
+		t.Hi = r.AsY()
+	}
+}
+
+func (t *table256) prepareLo() {
+	if t.loadLo128 != nil {
+		t.Lo = YMM()
+		// Load and expand tables
+		VBROADCASTI128(*t.loadLo128, t.Lo)
+	}
+	if t.loadLo256 != nil {
+		t.Lo = YMM()
+		// Load and expand tables
+		VMOVDQU(*t.loadLo256, t.Lo)
+	}
+	if t.useZmmLo != nil {
+		r := *t.useZmmLo
+		t.Lo = r.AsY()
+	}
+}
+
+// table128 contains memory pointers to tables
+type table128 struct {
+	Lo, Hi Op
+}
+
+type gf16ctx struct {
+	clrMask    reg.VecVirtual
+	clrMask128 reg.VecVirtual
+	avx512     bool
+}
+
+func genGF16() {
+	var ctx gf16ctx
+	// Ported from static void IFFT_DIT2
+	// https://github.com/catid/leopard/blob/master/LeopardFF16.cpp#L629
+	{
+		TEXT("ifftDIT2_avx2", attr.NOSPLIT, fmt.Sprintf("func(x, y []byte, table  *[8*16]uint8)"))
+		Pragma("noescape")
+		tablePtr := Load(Param("table"), GP64())
+		tables := [4]table256{}
+		for i, t := range tables {
+			t.Lo, t.Hi = YMM(), YMM()
+			// Load and expand tables
+			VBROADCASTI128(Mem{Base: tablePtr, Disp: i * 16}, t.Lo)
+			VBROADCASTI128(Mem{Base: tablePtr, Disp: i*16 + 16*4}, t.Hi)
+			tables[i] = t
+		}
+		bytes := Load(Param("x").Len(), GP64())
+		x := Load(Param("x").Base(), GP64())
+		y := Load(Param("y").Base(), GP64())
+		// Generate mask
+		ctx.clrMask = YMM()
+		tmpMask := GP64()
+		MOVQ(U32(15), tmpMask)
+		MOVQ(tmpMask, ctx.clrMask.AsX())
+		VPBROADCASTB(ctx.clrMask.AsX(), ctx.clrMask)
+
+		xLo, xHi, yLo, yHi := YMM(), YMM(), YMM(), YMM()
+		Label("loop")
+		VMOVDQU(Mem{Base: x, Disp: 0}, xLo)
+		VMOVDQU(Mem{Base: x, Disp: 32}, xHi)
+		VMOVDQU(Mem{Base: y, Disp: 0}, yLo)
+		VMOVDQU(Mem{Base: y, Disp: 32}, yHi)
+		VPXOR(yLo, xLo, yLo)
+		VPXOR(yHi, xHi, yHi)
+		VMOVDQU(yLo, Mem{Base: y, Disp: 0})
+		VMOVDQU(yHi, Mem{Base: y, Disp: 32})
+		leoMulAdd256(ctx, xLo, xHi, yLo, yHi, tables)
+		VMOVDQU(xLo, Mem{Base: x, Disp: 0})
+		VMOVDQU(xHi, Mem{Base: x, Disp: 32})
+		ADDQ(U8(64), x)
+		ADDQ(U8(64), y)
+		SUBQ(U8(64), bytes)
+		JNZ(LabelRef("loop"))
+
+		VZEROUPPER()
+		RET()
+	}
+	{
+		TEXT("fftDIT2_avx2", attr.NOSPLIT, fmt.Sprintf("func(x, y []byte, table  *[8*16]uint8)"))
+		Pragma("noescape")
+		tablePtr := Load(Param("table"), GP64())
+		tables := [4]table256{}
+		for i, t := range tables {
+			t.Lo, t.Hi = YMM(), YMM()
+			// Load and expand tables
+			VBROADCASTI128(Mem{Base: tablePtr, Disp: i * 16}, t.Lo)
+			VBROADCASTI128(Mem{Base: tablePtr, Disp: i*16 + 16*4}, t.Hi)
+			tables[i] = t
+		}
+		bytes := Load(Param("x").Len(), GP64())
+		x := Load(Param("x").Base(), GP64())
+		y := Load(Param("y").Base(), GP64())
+		// Generate mask
+		ctx.clrMask = YMM()
+		tmpMask := GP64()
+		MOVQ(U32(15), tmpMask)
+		MOVQ(tmpMask, ctx.clrMask.AsX())
+		VPBROADCASTB(ctx.clrMask.AsX(), ctx.clrMask)
+
+		xLo, xHi, yLo, yHi := YMM(), YMM(), YMM(), YMM()
+		Label("loop")
+		VMOVDQU(Mem{Base: x, Disp: 0}, xLo)
+		VMOVDQU(Mem{Base: x, Disp: 32}, xHi)
+		VMOVDQU(Mem{Base: y, Disp: 0}, yLo)
+		VMOVDQU(Mem{Base: y, Disp: 32}, yHi)
+
+		leoMulAdd256(ctx, xLo, xHi, yLo, yHi, tables)
+		VMOVDQU(xLo, Mem{Base: x, Disp: 0})
+		VMOVDQU(xHi, Mem{Base: x, Disp: 32})
+
+		// Reload, or we go beyond 16 regs..
+		if true {
+			yLo, yHi = YMM(), YMM()
+			VMOVDQU(Mem{Base: y, Disp: 0}, yLo)
+			VMOVDQU(Mem{Base: y, Disp: 32}, yHi)
+		}
+
+		VPXOR(yLo, xLo, yLo)
+		VPXOR(yHi, xHi, yHi)
+		VMOVDQU(yLo, Mem{Base: y, Disp: 0})
+		VMOVDQU(yHi, Mem{Base: y, Disp: 32})
+		ADDQ(U8(64), x)
+		ADDQ(U8(64), y)
+		SUBQ(U8(64), bytes)
+		JNZ(LabelRef("loop"))
+
+		VZEROUPPER()
+		RET()
+	}
+
+	{
+		TEXT("mulgf16_avx2", attr.NOSPLIT, fmt.Sprintf("func(x, y []byte, table  *[8*16]uint8)"))
+		Pragma("noescape")
+		tablePtr := Load(Param("table"), GP64())
+		tables := [4]table256{}
+		for i, t := range tables {
+			t.Lo, t.Hi = YMM(), YMM()
+			// Load and expand tables
+			VBROADCASTI128(Mem{Base: tablePtr, Disp: i * 16}, t.Lo)
+			VBROADCASTI128(Mem{Base: tablePtr, Disp: i*16 + 16*4}, t.Hi)
+			tables[i] = t
+		}
+		bytes := Load(Param("x").Len(), GP64())
+		x := Load(Param("x").Base(), GP64())
+		y := Load(Param("y").Base(), GP64())
+		// Generate mask
+		ctx.clrMask = YMM()
+		tmpMask := GP64()
+		MOVQ(U32(15), tmpMask)
+		MOVQ(tmpMask, ctx.clrMask.AsX())
+		VPBROADCASTB(ctx.clrMask.AsX(), ctx.clrMask)
+
+		dataLo, dataHi := YMM(), YMM()
+		Label("loop")
+		VMOVDQU(Mem{Base: y, Disp: 0}, dataLo)
+		VMOVDQU(Mem{Base: y, Disp: 32}, dataHi)
+
+		prodLo, prodHi := leoMul256(ctx, dataLo, dataHi, tables)
+		VMOVDQU(prodLo, Mem{Base: x, Disp: 0})
+		VMOVDQU(prodHi, Mem{Base: x, Disp: 32})
+
+		ADDQ(U8(64), x)
+		ADDQ(U8(64), y)
+		SUBQ(U8(64), bytes)
+		JNZ(LabelRef("loop"))
+
+		VZEROUPPER()
+		RET()
+	}
+	for _, avx512 := range []bool{true, false} {
+		x := [8]int{}
+		for skipMask := range x[:] {
+			// AVX-512 only uses more registers for tables.
+			var suffix = "avx2_" + fmt.Sprint(skipMask)
+			if avx512 {
+				suffix = "avx512_" + fmt.Sprint(skipMask)
+			}
+			ctx.avx512 = avx512
+			extZMMs := []reg.VecPhysical{reg.Z16, reg.Z17, reg.Z18, reg.Z19, reg.Z20, reg.Z21, reg.Z22, reg.Z23, reg.Z24, reg.Z25, reg.Z26, reg.Z27, reg.Z28, reg.Z29, reg.Z30, reg.Z31}
+			{
+				TEXT("ifftDIT4_"+suffix, attr.NOSPLIT, fmt.Sprintf("func(work [][]byte, dist int, table01 *[8*16]uint8, table23 *[8*16]uint8, table02 *[8*16]uint8)"))
+				Pragma("noescape")
+				Comment("dist must be multiplied by 24 (size of slice header)")
+
+				// Unpack tables to stack. Slower.
+				const unpackTables = false
+
+				table01Ptr := Load(Param("table01"), GP64())
+				table23Ptr := Load(Param("table23"), GP64())
+				table02Ptr := Load(Param("table02"), GP64())
+
+				// Prepare table pointers.
+				table01 := [4]table256{}
+				table23 := [4]table256{}
+				table02 := [4]table256{}
+				if avx512 {
+					usedZmm := 0
+					fill := func(t *[4]table256, ptr reg.Register) {
+						for i := range table01 {
+							t := &t[i]
+							if len(extZMMs)-usedZmm >= 2 {
+								tmpLo, tmpHi := YMM(), YMM()
+								t.useZmmLo, t.useZmmHi = &extZMMs[usedZmm], &extZMMs[usedZmm+1]
+								usedZmm += 2
+								// Load and expand tables
+								VBROADCASTI128(Mem{Base: ptr, Disp: i * 16}, tmpLo)
+								VBROADCASTI128(Mem{Base: ptr, Disp: i*16 + 16*4}, tmpHi)
+								VMOVAPS(tmpLo.AsZ(), *t.useZmmLo)
+								VMOVAPS(tmpHi.AsZ(), *t.useZmmHi)
+							} else {
+								t.loadLo128 = &Mem{Base: ptr, Disp: i * 16}
+								t.loadHi128 = &Mem{Base: ptr, Disp: i*16 + 16*4}
+							}
+						}
+					}
+					if (skipMask & 4) == 0 {
+						fill(&table02, table02Ptr)
+					}
+					if (skipMask & 1) == 0 {
+						fill(&table01, table01Ptr)
+					}
+					if (skipMask & 2) == 0 {
+						fill(&table23, table23Ptr)
+					}
+				}
+				for i := range table01 {
+					if avx512 {
+						continue
+					}
+
+					if unpackTables {
+						toStack := func(m Mem) *Mem {
+							stack := AllocLocal(32)
+							y := YMM()
+							VBROADCASTI128(m, y)
+							VMOVDQU(y, stack)
+							return &stack
+						}
+
+						table01[i].loadLo256 = toStack(Mem{Base: table01Ptr, Disp: i * 16})
+						table23[i].loadLo256 = toStack(Mem{Base: table23Ptr, Disp: i * 16})
+						table02[i].loadLo256 = toStack(Mem{Base: table02Ptr, Disp: i * 16})
+
+						table01[i].loadHi256 = toStack(Mem{Base: table01Ptr, Disp: i*16 + 16*4})
+						table23[i].loadHi256 = toStack(Mem{Base: table23Ptr, Disp: i*16 + 16*4})
+						table02[i].loadHi256 = toStack(Mem{Base: table02Ptr, Disp: i*16 + 16*4})
+					} else {
+						table01[i].loadLo128 = &Mem{Base: table01Ptr, Disp: i * 16}
+						table23[i].loadLo128 = &Mem{Base: table23Ptr, Disp: i * 16}
+						table02[i].loadLo128 = &Mem{Base: table02Ptr, Disp: i * 16}
+
+						table01[i].loadHi128 = &Mem{Base: table01Ptr, Disp: i*16 + 16*4}
+						table23[i].loadHi128 = &Mem{Base: table23Ptr, Disp: i*16 + 16*4}
+						table02[i].loadHi128 = &Mem{Base: table02Ptr, Disp: i*16 + 16*4}
+					}
+				}
+				// Generate mask
+				ctx.clrMask = YMM()
+				tmpMask := GP64()
+				MOVQ(U32(15), tmpMask)
+				MOVQ(tmpMask, ctx.clrMask.AsX())
+				VPBROADCASTB(ctx.clrMask.AsX(), ctx.clrMask)
+
+				dist := Load(Param("dist"), GP64())
+
+				// Pointers to each "work"
+				var work [4]reg.GPVirtual
+				workTable := Load(Param("work").Base(), GP64()) // &work[0]
+				bytes := GP64()
+
+				// Load length of work[0]
+				MOVQ(Mem{Base: workTable, Disp: 8}, bytes)
+
+				offset := GP64()
+				XORQ(offset, offset)
+				for i := range work {
+					work[i] = GP64()
+					// work[i] = &workTable[dist*i]
+					MOVQ(Mem{Base: workTable, Index: offset, Scale: 1}, work[i])
+					if i < len(work)-1 {
+						ADDQ(dist, offset)
+					}
+				}
+				var workRegLo [4]reg.VecVirtual
+				var workRegHi [4]reg.VecVirtual
+
+				workRegLo[0], workRegHi[0] = YMM(), YMM()
+				workRegLo[1], workRegHi[1] = YMM(), YMM()
+
+				Label("loop")
+				VMOVDQU(Mem{Base: work[0], Disp: 0}, workRegLo[0])
+				VMOVDQU(Mem{Base: work[0], Disp: 32}, workRegHi[0])
+				VMOVDQU(Mem{Base: work[1], Disp: 0}, workRegLo[1])
+				VMOVDQU(Mem{Base: work[1], Disp: 32}, workRegHi[1])
+
+				// First layer:
+				VPXOR(workRegLo[0], workRegLo[1], workRegLo[1])
+				VPXOR(workRegHi[0], workRegHi[1], workRegHi[1])
+
+				// Test bit 0
+				if (skipMask & 1) == 0 {
+					leoMulAdd256(ctx, workRegLo[0], workRegHi[0], workRegLo[1], workRegHi[1], table01)
+				}
+				workRegLo[2], workRegHi[2] = YMM(), YMM()
+				workRegLo[3], workRegHi[3] = YMM(), YMM()
+				VMOVDQU(Mem{Base: work[2], Disp: 0}, workRegLo[2])
+				VMOVDQU(Mem{Base: work[2], Disp: 32}, workRegHi[2])
+				VMOVDQU(Mem{Base: work[3], Disp: 0}, workRegLo[3])
+				VMOVDQU(Mem{Base: work[3], Disp: 32}, workRegHi[3])
+
+				VPXOR(workRegLo[2], workRegLo[3], workRegLo[3])
+				VPXOR(workRegHi[2], workRegHi[3], workRegHi[3])
+
+				// Test bit 1
+				if (skipMask & 2) == 0 {
+					leoMulAdd256(ctx, workRegLo[2], workRegHi[2], workRegLo[3], workRegHi[3], table23)
+				}
+
+				// Second layer:
+				VPXOR(workRegLo[0], workRegLo[2], workRegLo[2])
+				VPXOR(workRegHi[0], workRegHi[2], workRegHi[2])
+				VPXOR(workRegLo[1], workRegLo[3], workRegLo[3])
+				VPXOR(workRegHi[1], workRegHi[3], workRegHi[3])
+
+				// Test bit 2
+				if (skipMask & 4) == 0 {
+					leoMulAdd256(ctx, workRegLo[0], workRegHi[0], workRegLo[2], workRegHi[2], table02)
+					leoMulAdd256(ctx, workRegLo[1], workRegHi[1], workRegLo[3], workRegHi[3], table02)
+				}
+
+				// Store + Next loop:
+				for i := range work {
+					VMOVDQU(workRegLo[i], Mem{Base: work[i], Disp: 0})
+					VMOVDQU(workRegHi[i], Mem{Base: work[i], Disp: 32})
+					ADDQ(U8(64), work[i])
+				}
+
+				SUBQ(U8(64), bytes)
+				JNZ(LabelRef("loop"))
+
+				VZEROUPPER()
+				RET()
+			}
+			{
+				TEXT("fftDIT4_"+suffix, attr.NOSPLIT, fmt.Sprintf("func(work [][]byte, dist int, table01 *[8*16]uint8, table23 *[8*16]uint8, table02 *[8*16]uint8)"))
+				Pragma("noescape")
+				Comment("dist must be multiplied by 24 (size of slice header)")
+
+				// Unpack tables to stack. Slower.
+				const unpackTables = false
+
+				table01Ptr := Load(Param("table01"), GP64())
+				table23Ptr := Load(Param("table23"), GP64())
+				table02Ptr := Load(Param("table02"), GP64())
+
+				// Prepare table pointers.
+				table01 := [4]table256{}
+				table23 := [4]table256{}
+				table02 := [4]table256{}
+				if avx512 {
+					usedZmm := 0
+					fill := func(t *[4]table256, ptr reg.Register) {
+						for i := range table01 {
+							t := &t[i]
+							if len(extZMMs)-usedZmm >= 2 {
+								tmpLo, tmpHi := YMM(), YMM()
+								t.useZmmLo, t.useZmmHi = &extZMMs[usedZmm], &extZMMs[usedZmm+1]
+								usedZmm += 2
+								// Load and expand tables
+								VBROADCASTI128(Mem{Base: ptr, Disp: i * 16}, tmpLo)
+								VBROADCASTI128(Mem{Base: ptr, Disp: i*16 + 16*4}, tmpHi)
+								VMOVAPS(tmpLo.AsZ(), *t.useZmmLo)
+								VMOVAPS(tmpHi.AsZ(), *t.useZmmHi)
+							} else {
+								t.loadLo128 = &Mem{Base: ptr, Disp: i * 16}
+								t.loadHi128 = &Mem{Base: ptr, Disp: i*16 + 16*4}
+							}
+						}
+					}
+					if (skipMask & 1) == 0 {
+						fill(&table02, table02Ptr)
+					}
+					if (skipMask & 2) == 0 {
+						fill(&table01, table01Ptr)
+					}
+					if (skipMask & 4) == 0 {
+						fill(&table23, table23Ptr)
+					}
+				}
+				for i := range table01 {
+					if avx512 {
+						continue
+					}
+					if unpackTables {
+						toStack := func(m Mem) *Mem {
+							stack := AllocLocal(32)
+							y := YMM()
+							VBROADCASTI128(m, y)
+							VMOVDQU(y, stack)
+							return &stack
+						}
+
+						table01[i].loadLo256 = toStack(Mem{Base: table01Ptr, Disp: i * 16})
+						table23[i].loadLo256 = toStack(Mem{Base: table23Ptr, Disp: i * 16})
+						table02[i].loadLo256 = toStack(Mem{Base: table02Ptr, Disp: i * 16})
+
+						table01[i].loadHi256 = toStack(Mem{Base: table01Ptr, Disp: i*16 + 16*4})
+						table23[i].loadHi256 = toStack(Mem{Base: table23Ptr, Disp: i*16 + 16*4})
+						table02[i].loadHi256 = toStack(Mem{Base: table02Ptr, Disp: i*16 + 16*4})
+					} else {
+						table01[i].loadLo128 = &Mem{Base: table01Ptr, Disp: i * 16}
+						table23[i].loadLo128 = &Mem{Base: table23Ptr, Disp: i * 16}
+						table02[i].loadLo128 = &Mem{Base: table02Ptr, Disp: i * 16}
+
+						table01[i].loadHi128 = &Mem{Base: table01Ptr, Disp: i*16 + 16*4}
+						table23[i].loadHi128 = &Mem{Base: table23Ptr, Disp: i*16 + 16*4}
+						table02[i].loadHi128 = &Mem{Base: table02Ptr, Disp: i*16 + 16*4}
+					}
+				}
+				// Generate mask
+				ctx.clrMask = YMM()
+				tmpMask := GP64()
+				MOVQ(U32(15), tmpMask)
+				MOVQ(tmpMask, ctx.clrMask.AsX())
+				VPBROADCASTB(ctx.clrMask.AsX(), ctx.clrMask)
+
+				dist := Load(Param("dist"), GP64())
+
+				// Pointers to each "work"
+				var work [4]reg.GPVirtual
+				workTable := Load(Param("work").Base(), GP64()) // &work[0]
+				bytes := GP64()
+
+				// Load length of work[0]
+				MOVQ(Mem{Base: workTable, Disp: 8}, bytes)
+
+				offset := GP64()
+				XORQ(offset, offset)
+				for i := range work {
+					work[i] = GP64()
+					// work[i] = &workTable[dist*i]
+					MOVQ(Mem{Base: workTable, Index: offset, Scale: 1}, work[i])
+					if i < len(work)-1 {
+						ADDQ(dist, offset)
+					}
+				}
+				var workRegLo [4]reg.VecVirtual
+				var workRegHi [4]reg.VecVirtual
+
+				workRegLo[0], workRegHi[0] = YMM(), YMM()
+				workRegLo[1], workRegHi[1] = YMM(), YMM()
+				workRegLo[2], workRegHi[2] = YMM(), YMM()
+				workRegLo[3], workRegHi[3] = YMM(), YMM()
+
+				Label("loop")
+				VMOVDQU(Mem{Base: work[0], Disp: 0}, workRegLo[0])
+				VMOVDQU(Mem{Base: work[0], Disp: 32}, workRegHi[0])
+				VMOVDQU(Mem{Base: work[2], Disp: 0}, workRegLo[2])
+				VMOVDQU(Mem{Base: work[2], Disp: 32}, workRegHi[2])
+
+				VMOVDQU(Mem{Base: work[1], Disp: 0}, workRegLo[1])
+				VMOVDQU(Mem{Base: work[1], Disp: 32}, workRegHi[1])
+				VMOVDQU(Mem{Base: work[3], Disp: 0}, workRegLo[3])
+				VMOVDQU(Mem{Base: work[3], Disp: 32}, workRegHi[3])
+
+				// First layer:
+
+				// Test bit 0
+				if (skipMask & 1) == 0 {
+					leoMulAdd256(ctx, workRegLo[0], workRegHi[0], workRegLo[2], workRegHi[2], table02)
+					leoMulAdd256(ctx, workRegLo[1], workRegHi[1], workRegLo[3], workRegHi[3], table02)
+				}
+
+				VPXOR(workRegLo[0], workRegLo[2], workRegLo[2])
+				VPXOR(workRegHi[0], workRegHi[2], workRegHi[2])
+				VPXOR(workRegLo[1], workRegLo[3], workRegLo[3])
+				VPXOR(workRegHi[1], workRegHi[3], workRegHi[3])
+
+				// Second layer:
+				// Test bit 1
+				if (skipMask & 2) == 0 {
+					leoMulAdd256(ctx, workRegLo[0], workRegHi[0], workRegLo[1], workRegHi[1], table01)
+				}
+				VPXOR(workRegLo[0], workRegLo[1], workRegLo[1])
+				VPXOR(workRegHi[0], workRegHi[1], workRegHi[1])
+
+				// Store...
+				for i := range work[:2] {
+					VMOVDQU(workRegLo[i], Mem{Base: work[i], Disp: 0})
+					VMOVDQU(workRegHi[i], Mem{Base: work[i], Disp: 32})
+					ADDQ(U8(64), work[i])
+				}
+
+				// Test bit 2
+				if (skipMask & 4) == 0 {
+					leoMulAdd256(ctx, workRegLo[2], workRegHi[2], workRegLo[3], workRegHi[3], table23)
+				}
+				VPXOR(workRegLo[2], workRegLo[3], workRegLo[3])
+				VPXOR(workRegHi[2], workRegHi[3], workRegHi[3])
+
+				// Store + Next loop:
+				for i := range work[2:] {
+					i := i + 2
+					VMOVDQU(workRegLo[i], Mem{Base: work[i], Disp: 0})
+					VMOVDQU(workRegHi[i], Mem{Base: work[i], Disp: 32})
+					ADDQ(U8(64), work[i])
+				}
+
+				SUBQ(U8(64), bytes)
+				JNZ(LabelRef("loop"))
+
+				VZEROUPPER()
+				RET()
+			}
+		}
+	}
+
+	// SSSE3:
+	ctx.avx512 = false
+	{
+		TEXT("ifftDIT2_ssse3", attr.NOSPLIT, fmt.Sprintf("func(x, y []byte, table  *[8*16]uint8)"))
+		Pragma("noescape")
+		tablePtr := Load(Param("table"), GP64())
+		tables := [4]table128{}
+		for i, t := range tables {
+			// We almost have enough space for all tables.
+			if i > 2 {
+				t.Lo, t.Hi = Mem{Base: tablePtr, Disp: i * 16}, Mem{Base: tablePtr, Disp: i*16 + 16*4}
+			} else {
+				t.Lo, t.Hi = XMM(), XMM()
+				MOVUPS(Mem{Base: tablePtr, Disp: i * 16}, t.Lo)
+				MOVUPS(Mem{Base: tablePtr, Disp: i*16 + 16*4}, t.Hi)
+			}
+			tables[i] = t
+		}
+		// Generate mask
+		zero := XMM()
+		XORPS(zero, zero) // Zero, so bytes will be copied.
+		fifteen, mask := GP64(), XMM()
+		MOVQ(U32(0xf), fifteen)
+		MOVQ(fifteen, mask)
+		PSHUFB(zero, mask)
+		ctx.clrMask128 = mask
+
+		bytes := Load(Param("x").Len(), GP64())
+		x := Load(Param("x").Base(), GP64())
+		y := Load(Param("y").Base(), GP64())
+
+		Label("loop")
+		for i := 0; i < 2; i++ {
+			xLo, xHi, yLo, yHi := XMM(), XMM(), XMM(), XMM()
+			MOVUPS(Mem{Base: x, Disp: i*16 + 0}, xLo)
+			MOVUPS(Mem{Base: x, Disp: i*16 + 32}, xHi)
+			MOVUPS(Mem{Base: y, Disp: i*16 + 0}, yLo)
+			MOVUPS(Mem{Base: y, Disp: i*16 + 32}, yHi)
+			PXOR(xLo, yLo)
+			PXOR(xHi, yHi)
+			MOVUPS(yLo, Mem{Base: y, Disp: i*16 + 0})
+			MOVUPS(yHi, Mem{Base: y, Disp: i*16 + 32})
+			leoMulAdd128(ctx, xLo, xHi, yLo, yHi, tables)
+			MOVUPS(xLo, Mem{Base: x, Disp: i*16 + 0})
+			MOVUPS(xHi, Mem{Base: x, Disp: i*16 + 32})
+		}
+		ADDQ(U8(64), x)
+		ADDQ(U8(64), y)
+		SUBQ(U8(64), bytes)
+		JNZ(LabelRef("loop"))
+
+		RET()
+	}
+	{
+		TEXT("fftDIT2_ssse3", attr.NOSPLIT, fmt.Sprintf("func(x, y []byte, table  *[8*16]uint8)"))
+		Pragma("noescape")
+		tablePtr := Load(Param("table"), GP64())
+		tables := [4]table128{}
+		for i, t := range tables {
+			// We almost have enough space for all tables.
+			if i > 2 {
+				t.Lo, t.Hi = Mem{Base: tablePtr, Disp: i * 16}, Mem{Base: tablePtr, Disp: i*16 + 16*4}
+			} else {
+				t.Lo, t.Hi = XMM(), XMM()
+				MOVUPS(Mem{Base: tablePtr, Disp: i * 16}, t.Lo)
+				MOVUPS(Mem{Base: tablePtr, Disp: i*16 + 16*4}, t.Hi)
+			}
+			tables[i] = t
+		}
+		// Generate mask
+		zero := XMM()
+		XORPS(zero, zero) // Zero, so bytes will be copied.
+		fifteen, mask := GP64(), XMM()
+		MOVQ(U32(0xf), fifteen)
+		MOVQ(fifteen, mask)
+		PSHUFB(zero, mask)
+		ctx.clrMask128 = mask
+
+		bytes := Load(Param("x").Len(), GP64())
+		x := Load(Param("x").Base(), GP64())
+		y := Load(Param("y").Base(), GP64())
+
+		Label("loop")
+		for i := 0; i < 2; i++ {
+			xLo, xHi, yLo, yHi := XMM(), XMM(), XMM(), XMM()
+			MOVUPS(Mem{Base: y, Disp: i*16 + 0}, yLo)
+			MOVUPS(Mem{Base: y, Disp: i*16 + 32}, yHi)
+
+			prodLo, prodHi := leoMul128(ctx, yLo, yHi, tables)
+
+			MOVUPS(Mem{Base: x, Disp: i*16 + 0}, xLo)
+			MOVUPS(Mem{Base: x, Disp: i*16 + 32}, xHi)
+			PXOR(prodLo, xLo)
+			PXOR(prodHi, xHi)
+			MOVUPS(xLo, Mem{Base: x, Disp: i*16 + 0})
+			MOVUPS(xHi, Mem{Base: x, Disp: i*16 + 32})
+
+			PXOR(xLo, yLo)
+			PXOR(xHi, yHi)
+			MOVUPS(yLo, Mem{Base: y, Disp: i*16 + 0})
+			MOVUPS(yHi, Mem{Base: y, Disp: i*16 + 32})
+
+		}
+
+		ADDQ(U8(64), x)
+		ADDQ(U8(64), y)
+		SUBQ(U8(64), bytes)
+		JNZ(LabelRef("loop"))
+
+		RET()
+	}
+	{
+		TEXT("mulgf16_ssse3", attr.NOSPLIT, fmt.Sprintf("func(x, y []byte, table  *[8*16]uint8)"))
+		Pragma("noescape")
+		tablePtr := Load(Param("table"), GP64())
+		tables := [4]table128{}
+		for i, t := range tables {
+			// We have enough space for all tables.
+			if i > 3 {
+				t.Lo, t.Hi = Mem{Base: tablePtr, Disp: i * 16}, Mem{Base: tablePtr, Disp: i*16 + 16*4}
+			} else {
+				t.Lo, t.Hi = XMM(), XMM()
+				MOVUPS(Mem{Base: tablePtr, Disp: i * 16}, t.Lo)
+				MOVUPS(Mem{Base: tablePtr, Disp: i*16 + 16*4}, t.Hi)
+			}
+			tables[i] = t
+		}
+		bytes := Load(Param("x").Len(), GP64())
+		x := Load(Param("x").Base(), GP64())
+		y := Load(Param("y").Base(), GP64())
+		// Generate mask
+		zero := XMM()
+		XORPS(zero, zero) // Zero, so bytes will be copied.
+		fifteen, mask := GP64(), XMM()
+		MOVQ(U32(0xf), fifteen)
+		MOVQ(fifteen, mask)
+		PSHUFB(zero, mask)
+		ctx.clrMask128 = mask
+
+		Label("loop")
+		for i := 0; i < 2; i++ {
+			dataLo, dataHi := XMM(), XMM()
+			MOVUPS(Mem{Base: y, Disp: i*16 + 0}, dataLo)
+			MOVUPS(Mem{Base: y, Disp: i*16 + 32}, dataHi)
+
+			prodLo, prodHi := leoMul128(ctx, dataLo, dataHi, tables)
+			MOVUPS(prodLo, Mem{Base: x, Disp: i*16 + 0})
+			MOVUPS(prodHi, Mem{Base: x, Disp: i*16 + 32})
+		}
+
+		ADDQ(U8(64), x)
+		ADDQ(U8(64), y)
+		SUBQ(U8(64), bytes)
+		JNZ(LabelRef("loop"))
+
+		RET()
+	}
+
+}
+
+// xLo, xHi updated, yLo, yHi preserved...
+func leoMulAdd256(ctx gf16ctx, xLo, xHi, yLo, yHi reg.VecVirtual, table [4]table256) {
+	// inlined:
+	// prodLo, prodHi := leoMul256(ctx, yLo, yHi, table)
+	lo := yLo
+	hi := yHi
+	data0, data1 := YMM(), YMM()
+	VPSRLQ(U8(4), lo, data1)         // data1 = lo >> 4
+	VPAND(ctx.clrMask, lo, data0)    // data0 = lo&0xf
+	VPAND(ctx.clrMask, data1, data1) // data 1 = data1 &0xf
+	prodLo, prodHi := YMM(), YMM()
+	table[0].prepare()
+	VPSHUFB(data0, table[0].Lo, prodLo)
+	VPSHUFB(data0, table[0].Hi, prodHi)
+	tmpLo, tmpHi := YMM(), YMM()
+	table[1].prepare()
+	VPSHUFB(data1, table[1].Lo, tmpLo)
+	VPSHUFB(data1, table[1].Hi, tmpHi)
+	VPXOR(prodLo, tmpLo, prodLo)
+	VPXOR(prodHi, tmpHi, prodHi)
+
+	// Now process high
+	data0, data1 = YMM(), YMM() // Realloc to break dep
+	VPAND(hi, ctx.clrMask, data0)
+	VPSRLQ(U8(4), hi, data1)
+	VPAND(ctx.clrMask, data1, data1)
+
+	tmpLo, tmpHi = YMM(), YMM() // Realloc to break dep
+	table[2].prepare()
+	VPSHUFB(data0, table[2].Lo, tmpLo)
+	VPSHUFB(data0, table[2].Hi, tmpHi)
+	VPXOR(prodLo, tmpLo, prodLo)
+	VPXOR(prodHi, tmpHi, prodHi)
+	table[3].prepare()
+	VPSHUFB(data1, table[3].Lo, tmpLo)
+	VPSHUFB(data1, table[3].Hi, tmpHi)
+	if ctx.avx512 {
+		VPTERNLOGD(U8(0x96), prodLo, tmpLo, xLo)
+		VPTERNLOGD(U8(0x96), prodHi, tmpHi, xHi)
+	} else {
+		VPXOR3way(prodLo, tmpLo, xLo)
+		VPXOR3way(prodHi, tmpHi, xHi)
+	}
+}
+
+// leoMul256 lo, hi preserved...
+func leoMul256(ctx gf16ctx, lo, hi reg.VecVirtual, table [4]table256) (prodLo, prodHi reg.VecVirtual) {
+	data0, data1 := YMM(), YMM()
+	VPSRLQ(U8(4), lo, data1)         // data1 = lo >> 4
+	VPAND(ctx.clrMask, lo, data0)    // data0 = lo&0xf
+	VPAND(ctx.clrMask, data1, data1) // data 1 = data1 &0xf
+	prodLo, prodHi = YMM(), YMM()
+	table[0].prepare()
+	VPSHUFB(data0, table[0].Lo, prodLo)
+	VPSHUFB(data0, table[0].Hi, prodHi)
+	tmpLo, tmpHi := YMM(), YMM()
+	table[1].prepare()
+	VPSHUFB(data1, table[1].Lo, tmpLo)
+	VPSHUFB(data1, table[1].Hi, tmpHi)
+	VPXOR(prodLo, tmpLo, prodLo)
+	VPXOR(prodHi, tmpHi, prodHi)
+
+	// Now process high
+	data0, data1 = YMM(), YMM() // Realloc to break dep
+	VPAND(hi, ctx.clrMask, data0)
+	VPSRLQ(U8(4), hi, data1)
+	VPAND(ctx.clrMask, data1, data1)
+
+	tmpLo, tmpHi = YMM(), YMM() // Realloc to break dep
+	table[2].prepare()
+	VPSHUFB(data0, table[2].Lo, tmpLo)
+	VPSHUFB(data0, table[2].Hi, tmpHi)
+	VPXOR(prodLo, tmpLo, prodLo)
+	VPXOR(prodHi, tmpHi, prodHi)
+	table[3].prepare()
+	VPSHUFB(data1, table[3].Lo, tmpLo)
+	VPSHUFB(data1, table[3].Hi, tmpHi)
+	VPXOR(prodLo, tmpLo, prodLo)
+	VPXOR(prodHi, tmpHi, prodHi)
+	return
+}
+
+func leoMulAdd128(ctx gf16ctx, xLo, xHi, yLo, yHi reg.VecVirtual, table [4]table128) {
+	prodLo, prodHi := leoMul128(ctx, yLo, yHi, table)
+	PXOR(prodLo, xLo)
+	PXOR(prodHi, xHi)
+}
+
+// leoMul128 lo, hi preseved (but likely will take extra regs to reuse)
+func leoMul128(ctx gf16ctx, lo, hi reg.VecVirtual, table [4]table128) (prodLo, prodHi reg.VecVirtual) {
+	data0, data1 := XMM(), XMM()
+	MOVAPS(lo, data1)
+	PSRLQ(U8(4), data1) // data1 = lo >> 4
+	MOVAPS(lo, data0)
+	PAND(ctx.clrMask128, data0) // data0 = lo&0xf
+	PAND(ctx.clrMask128, data1) // data 1 = data1 &0xf
+	prodLo, prodHi = XMM(), XMM()
+	MOVUPS(table[0].Lo, prodLo)
+	MOVUPS(table[0].Hi, prodHi)
+	PSHUFB(data0, prodLo)
+	PSHUFB(data0, prodHi)
+	tmpLo, tmpHi := XMM(), XMM()
+	MOVUPS(table[1].Lo, tmpLo)
+	MOVUPS(table[1].Hi, tmpHi)
+	PSHUFB(data1, tmpLo)
+	PSHUFB(data1, tmpHi)
+	PXOR(tmpLo, prodLo)
+	PXOR(tmpHi, prodHi)
+
+	// Now process high
+	data0, data1 = XMM(), XMM() // Realloc to break dep
+	MOVAPS(hi, data0)
+	MOVAPS(hi, data1)
+	PAND(ctx.clrMask128, data0)
+	PSRLQ(U8(4), data1)
+	PAND(ctx.clrMask128, data1)
+
+	tmpLo, tmpHi = XMM(), XMM() // Realloc to break dep
+	MOVUPS(table[2].Lo, tmpLo)
+	MOVUPS(table[2].Hi, tmpHi)
+	PSHUFB(data0, tmpLo)
+	PSHUFB(data0, tmpHi)
+	PXOR(tmpLo, prodLo)
+	PXOR(tmpHi, prodHi)
+	MOVUPS(table[3].Lo, tmpLo)
+	MOVUPS(table[3].Hi, tmpHi)
+	PSHUFB(data1, tmpLo)
+	PSHUFB(data1, tmpHi)
+	PXOR(tmpLo, prodLo)
+	PXOR(tmpHi, prodHi)
+	return
+}
diff --git a/_gen/gf8.go b/_gen/gf8.go
new file mode 100644
index 0000000..d177a55
--- /dev/null
+++ b/_gen/gf8.go
@@ -0,0 +1,564 @@
+//go:build generate
+// +build generate
+
+// Copyright 2022+, Klaus Post. See LICENSE for details.
+
+package main
+
+import (
+	"fmt"
+
+	"github.com/mmcloughlin/avo/attr"
+	. "github.com/mmcloughlin/avo/build"
+	. "github.com/mmcloughlin/avo/operand"
+	"github.com/mmcloughlin/avo/reg"
+)
+
+type gf8ctx struct {
+	clrMask    reg.VecVirtual
+	clrMask128 reg.VecVirtual
+}
+
+func genGF8() {
+	var ctx gf8ctx
+	// Ported from static void IFFT_DIT2
+	// https://github.com/catid/leopard/blob/master/LeopardFF8.cpp#L599
+	if true {
+		TEXT("ifftDIT28_avx2", attr.NOSPLIT, fmt.Sprintf("func(x, y []byte, table *[2*16]uint8)"))
+		Pragma("noescape")
+		tablePtr := Load(Param("table"), GP64())
+		var tables table256
+		tables.Lo, tables.Hi = YMM(), YMM()
+		// Load and expand tables
+		VBROADCASTI128(Mem{Base: tablePtr, Disp: 0}, tables.Lo)
+		VBROADCASTI128(Mem{Base: tablePtr, Disp: 16}, tables.Hi)
+
+		bytes := Load(Param("x").Len(), GP64())
+		x := Load(Param("x").Base(), GP64())
+		y := Load(Param("y").Base(), GP64())
+		// Generate mask
+		ctx.clrMask = YMM()
+		tmpMask := GP64()
+		MOVQ(U32(15), tmpMask)
+		MOVQ(tmpMask, ctx.clrMask.AsX())
+		VPBROADCASTB(ctx.clrMask.AsX(), ctx.clrMask)
+
+		x0, x1, y0, y1 := YMM(), YMM(), YMM(), YMM()
+		Label("loop")
+		VMOVDQU(Mem{Base: x, Disp: 0}, x0)
+		VMOVDQU(Mem{Base: x, Disp: 32}, x1)
+		VMOVDQU(Mem{Base: y, Disp: 0}, y0)
+		VMOVDQU(Mem{Base: y, Disp: 32}, y1)
+
+		// Update y and store
+		VPXOR(y0, x0, y0)
+		VPXOR(y1, x1, y1)
+		VMOVDQU(y0, Mem{Base: y, Disp: 0})
+		VMOVDQU(y1, Mem{Base: y, Disp: 32})
+
+		// Update x and store
+		leo8MulAdd256(ctx, x0, y0, tables)
+		leo8MulAdd256(ctx, x1, y1, tables)
+		VMOVDQU(x0, Mem{Base: x, Disp: 0})
+		VMOVDQU(x1, Mem{Base: x, Disp: 32})
+
+		// Move on
+		ADDQ(U8(64), x)
+		ADDQ(U8(64), y)
+		SUBQ(U8(64), bytes)
+		JA(LabelRef("loop"))
+
+		VZEROUPPER()
+		RET()
+	}
+	// https://github.com/catid/leopard/blob/master/LeopardFF8.cpp#L1323
+	if true {
+		TEXT("fftDIT28_avx2", attr.NOSPLIT, fmt.Sprintf("func(x, y []byte, table *[2*16]uint8)"))
+		Pragma("noescape")
+		tablePtr := Load(Param("table"), GP64())
+		var tables table256
+		tables.Lo, tables.Hi = YMM(), YMM()
+		// Load and expand tables
+		VBROADCASTI128(Mem{Base: tablePtr, Disp: 0}, tables.Lo)
+		VBROADCASTI128(Mem{Base: tablePtr, Disp: 16}, tables.Hi)
+
+		bytes := Load(Param("x").Len(), GP64())
+		x := Load(Param("x").Base(), GP64())
+		y := Load(Param("y").Base(), GP64())
+		// Generate mask
+		ctx.clrMask = YMM()
+		tmpMask := GP64()
+		MOVQ(U32(15), tmpMask)
+		MOVQ(tmpMask, ctx.clrMask.AsX())
+		VPBROADCASTB(ctx.clrMask.AsX(), ctx.clrMask)
+
+		x0, x1, y0, y1 := YMM(), YMM(), YMM(), YMM()
+		Label("loop")
+		VMOVDQU(Mem{Base: x, Disp: 0}, x0)
+		VMOVDQU(Mem{Base: x, Disp: 32}, x1)
+		VMOVDQU(Mem{Base: y, Disp: 0}, y0)
+		VMOVDQU(Mem{Base: y, Disp: 32}, y1)
+
+		leo8MulAdd256(ctx, x0, y0, tables)
+		leo8MulAdd256(ctx, x1, y1, tables)
+		VMOVDQU(x0, Mem{Base: x, Disp: 0})
+		VMOVDQU(x1, Mem{Base: x, Disp: 32})
+
+		VPXOR(y0, x0, y0)
+		VPXOR(y1, x1, y1)
+		VMOVDQU(y0, Mem{Base: y, Disp: 0})
+		VMOVDQU(y1, Mem{Base: y, Disp: 32})
+		ADDQ(U8(64), x)
+		ADDQ(U8(64), y)
+		SUBQ(U8(64), bytes)
+		JA(LabelRef("loop"))
+
+		VZEROUPPER()
+		RET()
+	}
+
+	x := [8]int{}
+	for skipMask := range x[:] {
+		{
+			var suffix = "avx2_" + fmt.Sprint(skipMask)
+			TEXT("ifftDIT48_"+suffix, attr.NOSPLIT, fmt.Sprintf("func(work [][]byte, dist int, t01, t23, t02 *[2*16]uint8)"))
+			Pragma("noescape")
+			var t01, t23, t02 table256
+			// Load and expand tables
+
+			if (skipMask & 1) == 0 {
+				tablePtr := Load(Param("t01"), GP64())
+				t01.Lo, t01.Hi = YMM(), YMM()
+				// We need one register when loading all.
+				if skipMask == 0 {
+					t01.loadLo128 = &Mem{Base: tablePtr, Disp: 0}
+				} else {
+					VBROADCASTI128(Mem{Base: tablePtr, Disp: 0}, t01.Lo)
+				}
+				VBROADCASTI128(Mem{Base: tablePtr, Disp: 16}, t01.Hi)
+			}
+			if (skipMask & 2) == 0 {
+				tablePtr := Load(Param("t23"), GP64())
+				t23.Lo, t23.Hi = YMM(), YMM()
+				VBROADCASTI128(Mem{Base: tablePtr, Disp: 0}, t23.Lo)
+				VBROADCASTI128(Mem{Base: tablePtr, Disp: 16}, t23.Hi)
+			}
+			if (skipMask & 4) == 0 {
+				tablePtr := Load(Param("t02"), GP64())
+				t02.Lo, t02.Hi = YMM(), YMM()
+				VBROADCASTI128(Mem{Base: tablePtr, Disp: 0}, t02.Lo)
+				VBROADCASTI128(Mem{Base: tablePtr, Disp: 16}, t02.Hi)
+			}
+			dist := Load(Param("dist"), GP64())
+
+			var work [4]reg.GPVirtual
+			workTable := Load(Param("work").Base(), GP64()) // &work[0]
+			bytes := GP64()
+			MOVQ(Mem{Base: workTable, Disp: 8}, bytes)
+
+			offset := GP64()
+			XORQ(offset, offset)
+			for i := range work {
+				work[i] = GP64()
+				// work[i] = &workTable[dist*i]
+				MOVQ(Mem{Base: workTable, Index: offset, Scale: 1}, work[i])
+				if i < len(work)-1 {
+					ADDQ(dist, offset)
+				}
+			}
+
+			// Generate mask
+			ctx.clrMask = YMM()
+			tmpMask := GP64()
+			MOVQ(U32(15), tmpMask)
+			MOVQ(tmpMask, ctx.clrMask.AsX())
+			VPBROADCASTB(ctx.clrMask.AsX(), ctx.clrMask)
+
+			Label("loop")
+			var workReg [4]reg.VecVirtual
+			var workReg2 [4]reg.VecVirtual
+
+			workReg[0] = YMM()
+			workReg[1] = YMM()
+			workReg2[0] = YMM()
+			workReg2[1] = YMM()
+
+			VMOVDQU(Mem{Base: work[0], Disp: 0}, workReg[0])
+			VMOVDQU(Mem{Base: work[1], Disp: 0}, workReg[1])
+			VMOVDQU(Mem{Base: work[0], Disp: 32}, workReg2[0])
+			VMOVDQU(Mem{Base: work[1], Disp: 32}, workReg2[1])
+
+			// work1_reg = _mm256_xor_si256(work0_reg, work1_reg);
+			VPXOR(workReg[1], workReg[0], workReg[1])
+			VPXOR(workReg2[1], workReg2[0], workReg2[1])
+			if (skipMask & 1) == 0 {
+				t01.prepare()
+				leo8MulAdd256(ctx, workReg[0], workReg[1], t01)
+				leo8MulAdd256(ctx, workReg2[0], workReg2[1], t01)
+			}
+
+			workReg[2] = YMM()
+			workReg[3] = YMM()
+			workReg2[2] = YMM()
+			workReg2[3] = YMM()
+			VMOVDQU(Mem{Base: work[2], Disp: 0}, workReg[2])
+			VMOVDQU(Mem{Base: work[3], Disp: 0}, workReg[3])
+			VMOVDQU(Mem{Base: work[2], Disp: 32}, workReg2[2])
+			VMOVDQU(Mem{Base: work[3], Disp: 32}, workReg2[3])
+
+			//work3_reg = _mm256_xor_si256(work2_reg, work3_reg)
+			VPXOR(workReg[2], workReg[3], workReg[3])
+			VPXOR(workReg2[2], workReg2[3], workReg2[3])
+			if (skipMask & 2) == 0 {
+				leo8MulAdd256(ctx, workReg[2], workReg[3], t23)
+				leo8MulAdd256(ctx, workReg2[2], workReg2[3], t23)
+			}
+
+			// Second layer:
+			// work2_reg = _mm256_xor_si256(work0_reg, work2_reg);
+			// work3_reg = _mm256_xor_si256(work1_reg, work3_reg);
+			VPXOR(workReg[0], workReg[2], workReg[2])
+			VPXOR(workReg[1], workReg[3], workReg[3])
+			VPXOR(workReg2[0], workReg2[2], workReg2[2])
+			VPXOR(workReg2[1], workReg2[3], workReg2[3])
+
+			if (skipMask & 4) == 0 {
+				leo8MulAdd256(ctx, workReg[0], workReg[2], t02)
+				leo8MulAdd256(ctx, workReg[1], workReg[3], t02)
+				leo8MulAdd256(ctx, workReg2[0], workReg2[2], t02)
+				leo8MulAdd256(ctx, workReg2[1], workReg2[3], t02)
+			}
+
+			// Store + Next loop:
+			for i := range work {
+				VMOVDQU(workReg[i], Mem{Base: work[i], Disp: 0})
+				VMOVDQU(workReg2[i], Mem{Base: work[i], Disp: 32})
+				ADDQ(U8(64), work[i])
+			}
+
+			SUBQ(U8(64), bytes)
+			JA(LabelRef("loop"))
+
+			VZEROUPPER()
+			RET()
+		}
+		{
+			var suffix = "avx2_" + fmt.Sprint(skipMask)
+			TEXT("fftDIT48_"+suffix, attr.NOSPLIT, fmt.Sprintf("func(work [][]byte, dist int, t01, t23, t02 *[2*16]uint8)"))
+			Pragma("noescape")
+			var t01, t23, t02 table256
+			// Load and expand tables
+
+			if (skipMask & 2) == 0 {
+				tablePtr := Load(Param("t01"), GP64())
+				t01.Lo, t01.Hi = YMM(), YMM()
+				if skipMask == 0 {
+					t01.loadLo128 = &Mem{Base: tablePtr, Disp: 0}
+				} else {
+					// We need additional registers
+					VBROADCASTI128(Mem{Base: tablePtr, Disp: 0}, t01.Lo)
+				}
+				VBROADCASTI128(Mem{Base: tablePtr, Disp: 16}, t01.Hi)
+			}
+			if (skipMask & 4) == 0 {
+				tablePtr := Load(Param("t23"), GP64())
+				t23.Lo, t23.Hi = YMM(), YMM()
+				if skipMask == 0 {
+					t23.loadLo128 = &Mem{Base: tablePtr, Disp: 0}
+				} else {
+					VBROADCASTI128(Mem{Base: tablePtr, Disp: 0}, t23.Lo)
+				}
+				VBROADCASTI128(Mem{Base: tablePtr, Disp: 16}, t23.Hi)
+			}
+			if (skipMask & 1) == 0 {
+				tablePtr := Load(Param("t02"), GP64())
+
+				t02.Lo, t02.Hi = YMM(), YMM()
+				VBROADCASTI128(Mem{Base: tablePtr, Disp: 0}, t02.Lo)
+				VBROADCASTI128(Mem{Base: tablePtr, Disp: 16}, t02.Hi)
+			}
+			dist := Load(Param("dist"), GP64())
+
+			var work [4]reg.GPVirtual
+			workTable := Load(Param("work").Base(), GP64()) // &work[0]
+			bytes := GP64()
+			MOVQ(Mem{Base: workTable, Disp: 8}, bytes)
+
+			offset := GP64()
+			XORQ(offset, offset)
+			for i := range work {
+				work[i] = GP64()
+				// work[i] = &workTable[dist*i]
+				MOVQ(Mem{Base: workTable, Index: offset, Scale: 1}, work[i])
+				if i < len(work)-1 {
+					ADDQ(dist, offset)
+				}
+			}
+
+			// Generate mask
+			ctx.clrMask = YMM()
+			tmpMask := GP64()
+			MOVQ(U32(15), tmpMask)
+			MOVQ(tmpMask, ctx.clrMask.AsX())
+			VPBROADCASTB(ctx.clrMask.AsX(), ctx.clrMask)
+
+			Label("loop")
+			var workReg [4]reg.VecVirtual
+			var workReg2 [4]reg.VecVirtual
+
+			for i := range workReg {
+				workReg[i] = YMM()
+				workReg2[i] = YMM()
+			}
+
+			VMOVDQU(Mem{Base: work[0], Disp: 0}, workReg[0])
+			VMOVDQU(Mem{Base: work[0], Disp: 32}, workReg2[0])
+			VMOVDQU(Mem{Base: work[2], Disp: 0}, workReg[2])
+			VMOVDQU(Mem{Base: work[2], Disp: 32}, workReg2[2])
+			VMOVDQU(Mem{Base: work[1], Disp: 0}, workReg[1])
+			VMOVDQU(Mem{Base: work[1], Disp: 32}, workReg2[1])
+			VMOVDQU(Mem{Base: work[3], Disp: 0}, workReg[3])
+			VMOVDQU(Mem{Base: work[3], Disp: 32}, workReg2[3])
+
+			// work1_reg = _mm256_xor_si256(work0_reg, work1_reg);
+			if (skipMask & 1) == 0 {
+				leo8MulAdd256(ctx, workReg[0], workReg[2], t02)
+				leo8MulAdd256(ctx, workReg2[0], workReg2[2], t02)
+
+				leo8MulAdd256(ctx, workReg[1], workReg[3], t02)
+				leo8MulAdd256(ctx, workReg2[1], workReg2[3], t02)
+			}
+			// work2_reg = _mm256_xor_si256(work0_reg, work2_reg);
+			// work3_reg = _mm256_xor_si256(work1_reg, work3_reg);
+			VPXOR(workReg[0], workReg[2], workReg[2])
+			VPXOR(workReg[1], workReg[3], workReg[3])
+			VPXOR(workReg2[0], workReg2[2], workReg2[2])
+			VPXOR(workReg2[1], workReg2[3], workReg2[3])
+
+			// Second layer:
+			if (skipMask & 2) == 0 {
+				t01.prepare()
+				leo8MulAdd256(ctx, workReg[0], workReg[1], t01)
+				leo8MulAdd256(ctx, workReg2[0], workReg2[1], t01)
+			}
+			//work1_reg = _mm256_xor_si256(work0_reg, work1_reg);
+			VPXOR(workReg[1], workReg[0], workReg[1])
+			VPXOR(workReg2[1], workReg2[0], workReg2[1])
+
+			if (skipMask & 4) == 0 {
+				t23.prepare()
+				leo8MulAdd256(ctx, workReg[2], workReg[3], t23)
+				leo8MulAdd256(ctx, workReg2[2], workReg2[3], t23)
+			}
+			// work3_reg = _mm256_xor_si256(work2_reg, work3_reg);
+			VPXOR(workReg[2], workReg[3], workReg[3])
+			VPXOR(workReg2[2], workReg2[3], workReg2[3])
+
+			// Store + Next loop:
+			for i := range work {
+				VMOVDQU(workReg[i], Mem{Base: work[i], Disp: 0})
+				VMOVDQU(workReg2[i], Mem{Base: work[i], Disp: 32})
+				ADDQ(U8(64), work[i])
+			}
+
+			SUBQ(U8(64), bytes)
+			JA(LabelRef("loop"))
+
+			VZEROUPPER()
+			RET()
+		}
+	}
+
+	// GFNI
+	for skipMask := range x[:] {
+		{
+			var suffix = "gfni_" + fmt.Sprint(skipMask)
+			TEXT("ifftDIT48_"+suffix, attr.NOSPLIT, fmt.Sprintf("func(work [][]byte, dist int, t01, t23, t02 uint64)"))
+			Pragma("noescape")
+			var t01, t23, t02 table512 = ZMM(), ZMM(), ZMM()
+			// Load and expand tables
+
+			if (skipMask & 1) == 0 {
+				tablePtr, _ := Param("t01").Resolve()
+				VBROADCASTF32X2(tablePtr.Addr, t01)
+			}
+			if (skipMask & 2) == 0 {
+				tablePtr, _ := Param("t23").Resolve()
+				VBROADCASTF32X2(tablePtr.Addr, t23)
+			}
+			if (skipMask & 4) == 0 {
+				tablePtr, _ := Param("t02").Resolve()
+				VBROADCASTF32X2(tablePtr.Addr, t02)
+			}
+			dist := Load(Param("dist"), GP64())
+
+			var work [4]reg.GPVirtual
+			workTable := Load(Param("work").Base(), GP64()) // &work[0]
+			bytes := GP64()
+			MOVQ(Mem{Base: workTable, Disp: 8}, bytes)
+
+			offset := GP64()
+			XORQ(offset, offset)
+			for i := range work {
+				work[i] = GP64()
+				// work[i] = &workTable[dist*i]
+				MOVQ(Mem{Base: workTable, Index: offset, Scale: 1}, work[i])
+				if i < len(work)-1 {
+					ADDQ(dist, offset)
+				}
+			}
+
+			Label("loop")
+			var workReg [4]reg.VecVirtual
+			for i := range workReg[:] {
+				workReg[i] = ZMM()
+				VMOVDQU64(Mem{Base: work[i], Disp: 0}, workReg[i])
+			}
+
+			// work1_reg = _mm256_xor_si256(work0_reg, work1_reg);
+			VXORPD(workReg[1], workReg[0], workReg[1])
+			if (skipMask & 1) == 0 {
+				leo8MulAdd512(ctx, workReg[0], workReg[1], t01, nil)
+			}
+
+			//work3_reg = _mm256_xor_si256(work2_reg, work3_reg)
+			VXORPD(workReg[2], workReg[3], workReg[3])
+			if (skipMask & 2) == 0 {
+				leo8MulAdd512(ctx, workReg[2], workReg[3], t23, workReg[0])
+			} else {
+				// Merged above when run...
+				VXORPD(workReg[0], workReg[2], workReg[2])
+			}
+
+			// Second layer:
+			// work2_reg = _mm256_xor_si256(work0_reg, work2_reg);
+			// work3_reg = _mm256_xor_si256(work1_reg, work3_reg);
+			VXORPD(workReg[1], workReg[3], workReg[3])
+
+			if (skipMask & 4) == 0 {
+				leo8MulAdd512(ctx, workReg[0], workReg[2], t02, nil)
+				leo8MulAdd512(ctx, workReg[1], workReg[3], t02, nil)
+			}
+
+			// Store + Next loop:
+			for i := range work {
+				VMOVDQU64(workReg[i], Mem{Base: work[i], Disp: 0})
+				ADDQ(U8(64), work[i])
+			}
+
+			SUBQ(U8(64), bytes)
+			JA(LabelRef("loop"))
+
+			VZEROUPPER()
+			RET()
+		}
+		{
+			var suffix = "gfni_" + fmt.Sprint(skipMask)
+			TEXT("fftDIT48_"+suffix, attr.NOSPLIT, fmt.Sprintf("func(work [][]byte, dist int, t01, t23, t02 uint64)"))
+			Pragma("noescape")
+			var t01, t23, t02 table512 = ZMM(), ZMM(), ZMM()
+			// Load and expand tables
+
+			if (skipMask & 2) == 0 {
+				tablePtr, _ := Param("t01").Resolve()
+				VBROADCASTF32X2(tablePtr.Addr, t01)
+			}
+			if (skipMask & 4) == 0 {
+				tablePtr, _ := Param("t23").Resolve()
+				VBROADCASTF32X2(tablePtr.Addr, t23)
+			}
+			if (skipMask & 1) == 0 {
+				tablePtr, _ := Param("t02").Resolve()
+				VBROADCASTF32X2(tablePtr.Addr, t02)
+			}
+			dist := Load(Param("dist"), GP64())
+
+			var work [4]reg.GPVirtual
+			workTable := Load(Param("work").Base(), GP64()) // &work[0]
+			bytes := GP64()
+			MOVQ(Mem{Base: workTable, Disp: 8}, bytes)
+
+			offset := GP64()
+			XORQ(offset, offset)
+			for i := range work {
+				work[i] = GP64()
+				// work[i] = &workTable[dist*i]
+				MOVQ(Mem{Base: workTable, Index: offset, Scale: 1}, work[i])
+				if i < len(work)-1 {
+					ADDQ(dist, offset)
+				}
+			}
+
+			Label("loop")
+			var workReg [4]reg.VecVirtual
+
+			for i := range workReg {
+				workReg[i] = ZMM()
+				VMOVDQU64(Mem{Base: work[i], Disp: 0}, workReg[i])
+			}
+
+			// work1_reg = _mm256_xor_si256(work0_reg, work1_reg);
+			if (skipMask & 1) == 0 {
+				leo8MulAdd512(ctx, workReg[0], workReg[2], t02, nil)
+				leo8MulAdd512(ctx, workReg[1], workReg[3], t02, nil)
+			}
+			// work2_reg = _mm256_xor_si256(work0_reg, work2_reg);
+			// work3_reg = _mm256_xor_si256(work1_reg, work3_reg);
+			VXORPD(workReg[0], workReg[2], workReg[2])
+			VXORPD(workReg[1], workReg[3], workReg[3])
+
+			// Second layer:
+			if (skipMask & 2) == 0 {
+				leo8MulAdd512(ctx, workReg[0], workReg[1], t01, nil)
+			}
+			//work1_reg = _mm256_xor_si256(work0_reg, work1_reg);
+			VXORPD(workReg[1], workReg[0], workReg[1])
+
+			if (skipMask & 4) == 0 {
+				leo8MulAdd512(ctx, workReg[2], workReg[3], t23, nil)
+			}
+			// work3_reg = _mm256_xor_si256(work2_reg, work3_reg);
+			VXORPD(workReg[2], workReg[3], workReg[3])
+
+			// Store + Next loop:
+			for i := range work {
+				VMOVDQU64(workReg[i], Mem{Base: work[i], Disp: 0})
+				ADDQ(U8(64), work[i])
+			}
+
+			SUBQ(U8(64), bytes)
+			JA(LabelRef("loop"))
+
+			VZEROUPPER()
+			RET()
+		}
+	}
+
+}
+
+// x updated, y preserved...
+func leo8MulAdd256(ctx gf8ctx, x, y reg.VecVirtual, table table256) {
+	Comment("LEO_MULADD_256")
+	lo, hi := YMM(), YMM()
+
+	VPAND(y, ctx.clrMask, lo)
+	VPSRLQ(U8(4), y, hi)
+	VPSHUFB(lo, table.Lo, lo)
+
+	// Do high
+	VPAND(hi, ctx.clrMask, hi)
+	VPSHUFB(hi, table.Hi, hi)
+	VPXOR3way(lo, hi, x)
+}
+
+// multiply y with table and xor result into x.
+func leo8MulAdd512(ctx gf8ctx, x reg.VecVirtual, y reg.VecVirtual, table table512, z reg.VecVirtual) {
+	Comment("LEO_MULADD_512")
+	tmp := ZMM()
+	VGF2P8AFFINEQB(U8(0), table, y, tmp)
+	if z == nil {
+		VXORPD(x, tmp, x)
+	} else {
+		VPTERNLOGD(U8(0x96), tmp, z, x)
+	}
+}
diff --git a/_gen/go.mod b/_gen/go.mod
index d496f66..5185727 100644
--- a/_gen/go.mod
+++ b/_gen/go.mod
@@ -1,5 +1,8 @@
 module github.com/klauspost/reedsolomon/_gen
 
-go 1.14
+go 1.16
 
-require github.com/mmcloughlin/avo v0.2.0
+require (
+	github.com/klauspost/asmfmt v1.3.1
+	github.com/mmcloughlin/avo v0.5.1-0.20221128045730-bf1d05562091
+)
diff --git a/_gen/go.sum b/_gen/go.sum
index dae4777..5aa2531 100644
--- a/_gen/go.sum
+++ b/_gen/go.sum
@@ -1,31 +1,39 @@
-github.com/mmcloughlin/avo v0.2.0 h1:6vhoSaKtxb6f4RiH+LK2qL6GSMpFzhEwJYTTSZNy09w=
-github.com/mmcloughlin/avo v0.2.0/go.mod h1:5tidO2Z9Z7N6X7UMcGg+1KTj51O8OxYDCMHxCZTVpEA=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-golang.org/x/arch v0.0.0-20210405154355-08b684f594a5/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
+github.com/klauspost/asmfmt v1.3.1 h1:7xZi1N7s9gTLbqiM8KUv8TLyysavbTRGBT5/ly0bRtw=
+github.com/klauspost/asmfmt v1.3.1/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
+github.com/mmcloughlin/avo v0.5.1-0.20221128045730-bf1d05562091 h1:C2c8ttOBeyhs1SvyCXVPCFd0EqtPiTKGnMWQ+JkM0Lc=
+github.com/mmcloughlin/avo v0.5.1-0.20221128045730-bf1d05562091/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/appveyor.yml b/appveyor.yml
deleted file mode 100644
index 9bb067f..0000000
--- a/appveyor.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-os: Visual Studio 2015
-
-platform: x64
-
-clone_folder: c:\gopath\src\github.com\klauspost\reedsolomon
-
-# environment variables
-environment:
-  GOPATH: c:\gopath
-
-install:
-  - echo %PATH%
-  - echo %GOPATH%
-  - go version
-  - go env
-  - go get -d ./...
-
-build_script:
-  - go test -v -cpu=2 ./...
-  - go test -cpu=1,2,4 -short -race ./...
diff --git a/benchmark/main.go b/benchmark/main.go
new file mode 100644
index 0000000..436b19d
--- /dev/null
+++ b/benchmark/main.go
@@ -0,0 +1,437 @@
+// Copyright 2023+, Klaus Post, see LICENSE for details.
+
+package main
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"math"
+	"math/rand"
+	"os"
+	"runtime"
+	"runtime/debug"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+	"unicode"
+
+	"github.com/klauspost/cpuid/v2"
+	"github.com/klauspost/reedsolomon"
+)
+
+var (
+	blockSize  = flag.String("size", "10MiB", "Size of each input block.")
+	blocks     = flag.Int("blocks", 1, "Total number of blocks")
+	kShards    = flag.Int("k", 12, "Data shards")
+	mShards    = flag.Int("m", 4, "Parity shards")
+	codec      = flag.String("codec", "vandermonde", "Encoder Algorithm")
+	codecs     = flag.Bool("codecs", false, "Display codecs and exit")
+	invCache   = flag.Bool("cache", true, "Enable inversion cache")
+	corrupt    = flag.Int("corrupt", 0, "Corrupt 1 to n shards. 0 means up to m shards.")
+	duration   = flag.Int("duration", 10, "Minimum number of seconds to run.")
+	progress   = flag.Bool("progress", true, "Display progress while running")
+	concurrent = flag.Bool("concurrent", false, "Run blocks in parallel")
+	cpu        = flag.Int("cpu", 16, "Set maximum number of cores to use")
+	csv        = flag.Bool("csv", false, "Output as CSV")
+
+	sSE2   = flag.Bool("sse2", cpuid.CPU.Has(cpuid.SSE2), "Use SSE2")
+	sSSE3  = flag.Bool("ssse3", cpuid.CPU.Has(cpuid.SSSE3), "Use SSSE3")
+	aVX2   = flag.Bool("avx2", cpuid.CPU.Has(cpuid.AVX2), "Use AVX2")
+	aVX512 = flag.Bool("avx512", cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512BW, cpuid.AVX512VL), "Use AVX512")
+	gNFI   = flag.Bool("gfni", cpuid.CPU.Supports(cpuid.AVX512F, cpuid.GFNI, cpuid.AVX512DQ), "Use AVX512+GFNI")
+)
+
+var codecDefinitions = map[string]struct {
+	Description string
+	MaxKM       int
+	MaxM        int
+	Opts        []reedsolomon.Option
+}{
+	"vandermonde": {Description: "Vandermonde style matrix", MaxKM: 256},
+	"cauchy":      {Description: "Cauchy style matrix", MaxKM: 256, Opts: []reedsolomon.Option{reedsolomon.WithCauchyMatrix()}},
+	"jerasure":    {Description: "Uses Vandermonde matrix in the same way as done by the Jerasure library", MaxKM: 256, Opts: []reedsolomon.Option{reedsolomon.WithJerasureMatrix()}},
+	"xor":         {Description: "XOR - supporting only one parity shard", MaxKM: 256, MaxM: 1, Opts: []reedsolomon.Option{reedsolomon.WithFastOneParityMatrix()}},
+	"par1":        {Description: "PAR1 style matrix (not reliable)", MaxKM: 256, MaxM: 1, Opts: []reedsolomon.Option{reedsolomon.WithPAR1Matrix()}},
+	"leopard":     {Description: "Progressive Leopard-RS encoding, automatic choose 8 or 16 bits", MaxKM: 65536, Opts: []reedsolomon.Option{reedsolomon.WithLeopardGF(true)}},
+	"leopard8":    {Description: "Progressive Leopard-RS encoding, 8 bits", MaxKM: 256, Opts: []reedsolomon.Option{reedsolomon.WithLeopardGF(true)}},
+	"leopard16":   {Description: "Progressive Leopard-RS encoding, 16 bits", MaxKM: 65536, Opts: []reedsolomon.Option{reedsolomon.WithLeopardGF16(true)}},
+}
+
+func main() {
+	flag.Parse()
+	if *codecs {
+		printCodecs(0)
+	}
+	sz, err := toSize(*blockSize)
+	exitErr(err)
+	if *kShards <= 0 {
+		exitErr(errors.New("invalid k shard count"))
+	}
+	if sz <= 0 {
+		exitErr(errors.New("invalid block size"))
+	}
+	runtime.GOMAXPROCS(*cpu)
+	if sz > math.MaxInt || sz < 0 {
+		exitErr(errors.New("block size invalid"))
+	}
+	dataSz := int(sz)
+	each := (dataSz + *kShards - 1) / *kShards
+
+	opts := getOptions(each)
+	enc, err := reedsolomon.New(*kShards, *mShards, opts...)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "creating encoder returned: %s\n", err.Error())
+		os.Exit(1)
+	}
+
+	total := *kShards + *mShards
+	data := make([][][]byte, *blocks)
+	ext := enc.(reedsolomon.Extensions)
+	mulOf := ext.ShardSizeMultiple()
+	each = ((each + mulOf - 1) / mulOf) * mulOf
+	if *csv {
+		*progress = false
+	} else {
+		fmt.Printf("Benchmarking %d block(s) of %d data (K) and %d parity shards (M), each %d bytes using %d threads. Total %d bytes.\n\n", *blocks, *kShards, *mShards, each, *cpu, *blocks*each*total)
+	}
+
+	// Reduce GC overhead
+	debug.SetGCPercent(25)
+	for i := range data {
+		data[i] = reedsolomon.AllocAligned(total, each)
+	}
+	if *concurrent {
+		benchmarkEncodingConcurrent(enc, data)
+		benchmarkDecodingConcurrent(enc, data)
+	} else {
+		benchmarkEncoding(enc, data)
+		benchmarkDecoding(enc, data)
+	}
+}
+
+const updateFreq = time.Second / 3
+
+var spin = [...]byte{'|', '/', '-', '\\'}
+
+/*
+const speedDivisor = float64(1 << 30)
+const speedUnit = "Gbps"
+const speedBitMul = 8
+*/
+
+const speedDivisor = float64(1 << 20)
+const speedUnit = "MiB/s"
+const speedBitMul = 1
+
+func benchmarkEncoding(enc reedsolomon.Encoder, data [][][]byte) {
+	ext := enc.(reedsolomon.Extensions)
+	parityShards := ext.ParityShards()
+	dataShards := ext.DataShards()
+
+	start := time.Now()
+	finished := int64(0)
+	lastUpdate := start
+	end := start.Add(time.Second * time.Duration(*duration))
+	spinIdx := 0
+	for time.Now().Before(end) {
+		for _, shards := range data {
+			err := enc.Encode(shards)
+			exitErr(err)
+			finished += int64(len(shards[0]) * len(shards))
+			if *progress && time.Since(lastUpdate) > updateFreq {
+				encGB := float64(finished) * (1 / speedDivisor)
+				speed := encGB / (float64(time.Since(start)) / float64(time.Second))
+				fmt.Printf("\r %s Encoded: %.02f GiB @%.02f %s.", string(spin[spinIdx]), encGB, speed*speedBitMul, speedUnit)
+				spinIdx = (spinIdx + 1) % len(spin)
+				lastUpdate = time.Now()
+			}
+		}
+	}
+	encGB := float64(finished) * (1 / speedDivisor)
+	speed := encGB / (float64(time.Since(start)) / float64(time.Second))
+	if *csv {
+		fmt.Printf("encode\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", *kShards, *mShards, *blockSize, *blocks, *cpu, *codec, finished, time.Since(start).Microseconds(), speed)
+	} else {
+		fmt.Printf("\r * Encoded %.00f GiB in %v. Speed: %.02f %s (%d+%d:%d)\n", encGB, time.Since(start).Round(time.Millisecond), speedBitMul*speed, speedUnit, dataShards, parityShards, len(data[0][0]))
+	}
+}
+
+func benchmarkEncodingConcurrent(enc reedsolomon.Encoder, data [][][]byte) {
+	ext := enc.(reedsolomon.Extensions)
+	parityShards := ext.ParityShards()
+	dataShards := ext.DataShards()
+
+	start := time.Now()
+	finished := int64(0)
+	end := start.Add(time.Second * time.Duration(*duration))
+	spinIdx := 0
+	var wg sync.WaitGroup
+	var exit = make(chan struct{})
+	wg.Add(len(data))
+	for _, shards := range data {
+		go func(shards [][]byte) {
+			defer wg.Done()
+			for {
+				select {
+				case <-exit:
+					return
+				default:
+				}
+				err := enc.Encode(shards)
+				exitErr(err)
+				atomic.AddInt64(&finished, int64(len(shards[0])*len(shards)))
+			}
+		}(shards)
+	}
+
+	t := time.NewTicker(updateFreq)
+	defer t.Stop()
+
+	for range t.C {
+		if time.Now().After(end) {
+			break
+		}
+		if *progress {
+			encGB := float64(atomic.LoadInt64(&finished)) * (1 / speedDivisor)
+			speed := encGB / (float64(time.Since(start)) / float64(time.Second))
+			fmt.Printf("\r %s Encoded: %.02f GiB @%.02f %s.", string(spin[spinIdx]), encGB, speed*speedBitMul, speedUnit)
+			spinIdx = (spinIdx + 1) % len(spin)
+		}
+	}
+	close(exit)
+	wg.Wait()
+	encGB := float64(finished) * (1 / speedDivisor)
+	speed := encGB / (float64(time.Since(start)) / float64(time.Second))
+	if *csv {
+		fmt.Printf("encode conc\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", *kShards, *mShards, *blockSize, *blocks, *cpu, *codec, finished, time.Since(start).Microseconds(), speed)
+	} else {
+		fmt.Printf("\r * Encoded concurrent %.00f GiB in %v. Speed: %.02f %s (%d+%d:%d/%d)\n", encGB, time.Since(start).Round(time.Millisecond), speedBitMul*speed, speedUnit, dataShards, parityShards, len(data[0][0]), len(data))
+	}
+}
+
+func benchmarkDecoding(enc reedsolomon.Encoder, data [][][]byte) {
+	// Prepare
+	for _, shards := range data {
+		err := enc.Encode(shards)
+		exitErr(err)
+	}
+	ext := enc.(reedsolomon.Extensions)
+	parityShards := ext.ParityShards()
+	dataShards := ext.DataShards()
+	rng := rand.New(rand.NewSource(0))
+
+	start := time.Now()
+	finished := int64(0)
+	lastUpdate := start
+	end := start.Add(time.Second * time.Duration(*duration))
+	spinIdx := 0
+	for time.Now().Before(end) {
+		for _, shards := range data {
+			// Corrupt random number of shards up to what we can allow
+			cor := *corrupt
+			if cor == 0 {
+				cor = 1 + rng.Intn(parityShards)
+			}
+			for cor > 0 {
+				idx := rng.Intn(len(shards))
+				if len(shards[idx]) > 0 {
+					shards[idx] = shards[idx][:0]
+					cor--
+				}
+			}
+			err := enc.Reconstruct(shards)
+			exitErr(err)
+			finished += int64(len(shards[0]) * len(shards))
+			if *progress && time.Since(lastUpdate) > updateFreq {
+				encGB := float64(finished) * (1 / speedDivisor)
+				speed := encGB / (float64(time.Since(start)) / float64(time.Second))
+				fmt.Printf("\r %s Repaired: %.02f GiB @%.02f %s.", string(spin[spinIdx]), encGB, speed*speedBitMul, speedUnit)
+				spinIdx = (spinIdx + 1) % len(spin)
+				lastUpdate = time.Now()
+			}
+		}
+	}
+	encGB := float64(finished) * (1 / speedDivisor)
+	speed := encGB / (float64(time.Since(start)) / float64(time.Second))
+	if *csv {
+		fmt.Printf("decode\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", *kShards, *mShards, *blockSize, *blocks, *cpu, *codec, finished, time.Since(start).Microseconds(), speed)
+	} else {
+		fmt.Printf("\r * Repaired %.00f GiB in %v. Speed: %.02f %s (%d+%d:%d)\n", encGB, time.Since(start).Round(time.Millisecond), speedBitMul*speed, speedUnit, dataShards, parityShards, len(data[0][0]))
+	}
+}
+
+func benchmarkDecodingConcurrent(enc reedsolomon.Encoder, data [][][]byte) {
+	// Prepare
+	for _, shards := range data {
+		err := enc.Encode(shards)
+		exitErr(err)
+	}
+	ext := enc.(reedsolomon.Extensions)
+	parityShards := ext.ParityShards()
+	dataShards := ext.DataShards()
+
+	start := time.Now()
+	finished := int64(0)
+	end := start.Add(time.Second * time.Duration(*duration))
+	spinIdx := 0
+	var wg sync.WaitGroup
+	var exit = make(chan struct{})
+	wg.Add(len(data))
+	for _, shards := range data {
+		go func(shards [][]byte) {
+			rng := rand.New(rand.NewSource(0))
+			defer wg.Done()
+			for {
+				select {
+				case <-exit:
+					return
+				default:
+				}
+				// Corrupt random number of shards up to what we can allow
+				cor := *corrupt
+				if cor == 0 {
+					cor = 1 + rng.Intn(parityShards)
+				}
+				for cor > 0 {
+					idx := rng.Intn(len(shards))
+					if len(shards[idx]) > 0 {
+						shards[idx] = shards[idx][:0]
+						cor--
+					}
+				}
+				err := enc.Reconstruct(shards)
+				exitErr(err)
+				atomic.AddInt64(&finished, int64(len(shards[0])*len(shards)))
+			}
+		}(shards)
+	}
+	t := time.NewTicker(updateFreq)
+	defer t.Stop()
+	for range t.C {
+		if time.Now().After(end) {
+			break
+		}
+		if *progress {
+			encGB := float64(finished) * (1 / speedDivisor)
+			speed := encGB / (float64(time.Since(start)) / float64(time.Second))
+			fmt.Printf("\r %s Repaired: %.02f GiB @%.02f %s.", string(spin[spinIdx]), encGB, speed*speedBitMul, speedUnit)
+			spinIdx = (spinIdx + 1) % len(spin)
+		}
+	}
+	encGB := float64(finished) * (1 / speedDivisor)
+	speed := encGB / (float64(time.Since(start)) / float64(time.Second))
+	if *csv {
+		fmt.Printf("decode conc\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", *kShards, *mShards, *blockSize, *blocks, *cpu, *codec, finished, time.Since(start).Microseconds(), speed)
+	} else {
+		fmt.Printf("\r * Repaired concurrent %.00f GiB in %v. Speed: %.02f %s (%d+%d:%d/%d)\n", encGB, time.Since(start).Round(time.Millisecond), speedBitMul*speed, speedUnit, dataShards, parityShards, len(data[0][0]), len(data))
+	}
+}
+
+func printCodecs(exitCode int) {
+	var keys []string
+	maxLen := 0
+	for k := range codecDefinitions {
+		keys = append(keys, k)
+		if len(k) > maxLen {
+			maxLen = len(k)
+		}
+	}
+	sort.Strings(keys)
+	for _, k := range keys {
+		def := codecDefinitions[k]
+		k = k + strings.Repeat(" ", maxLen-len(k))
+		fmt.Printf("%s %s. Max K+M: %d.", k, def.Description, def.MaxKM)
+		if def.MaxM > 0 {
+			fmt.Printf(" Max M: %d.", def.MaxM)
+		}
+		fmt.Print("\n")
+	}
+	// Exit
+	if exitCode >= 0 {
+		os.Exit(exitCode)
+	}
+}
+
+func getOptions(shardSize int) []reedsolomon.Option {
+	var o []reedsolomon.Option
+	c, ok := codecDefinitions[*codec]
+	if !ok {
+		fmt.Fprintf(os.Stderr, "ERR: unknown codec: %q\n", *codec)
+		printCodecs(1)
+	}
+	total := *kShards + *mShards
+	if total > c.MaxKM {
+		fmt.Fprintf(os.Stderr, "ERR: maximum shards (k+m) %d exceeds maximum %d for codex %q\n", total, c.MaxKM, *codec)
+		os.Exit(1)
+	}
+	if c.MaxM > 0 && *mShards > c.MaxM {
+		fmt.Fprintf(os.Stderr, "ERR: maximum parity shards (m) %d exceeds maximum %d for codex %q\n", *mShards, c.MaxM, *codec)
+		os.Exit(1)
+	}
+	o = append(o, c.Opts...)
+	if !*sSSE3 {
+		o = append(o, reedsolomon.WithSSSE3(false))
+	}
+	if !*sSE2 {
+		o = append(o, reedsolomon.WithSSE2(false))
+	}
+	if !*aVX2 {
+		o = append(o, reedsolomon.WithAVX2(false))
+	}
+	if !*aVX512 {
+		o = append(o, reedsolomon.WithAVX512(false))
+	}
+	if !*gNFI {
+		o = append(o, reedsolomon.WithGFNI(false))
+	}
+	if !*invCache {
+		o = append(o, reedsolomon.WithInversionCache(false))
+	}
+	o = append(o, reedsolomon.WithAutoGoroutines(shardSize))
+	return o
+}
+
+func exitErr(err error) {
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "ERR: %s\n", err.Error())
+		os.Exit(1)
+	}
+}
+
+// toSize converts a size indication to bytes.
+func toSize(size string) (uint64, error) {
+	size = strings.ToUpper(strings.TrimSpace(size))
+	firstLetter := strings.IndexFunc(size, unicode.IsLetter)
+	if firstLetter == -1 {
+		firstLetter = len(size)
+	}
+
+	bytesString, multiple := size[:firstLetter], size[firstLetter:]
+	bytes, err := strconv.ParseUint(bytesString, 10, 64)
+	if err != nil {
+		return 0, fmt.Errorf("unable to parse size: %v", err)
+	}
+
+	switch multiple {
+	case "G", "GIB":
+		return bytes * 1 << 30, nil
+	case "GB":
+		return bytes * 1e9, nil
+	case "M", "MIB":
+		return bytes * 1 << 20, nil
+	case "MB":
+		return bytes * 1e6, nil
+	case "K", "KIB":
+		return bytes * 1 << 10, nil
+	case "KB":
+		return bytes * 1e3, nil
+	case "B", "":
+		return bytes, nil
+	default:
+		return 0, fmt.Errorf("unknown size suffix: %v", multiple)
+	}
+}
diff --git a/debian/changelog b/debian/changelog
index 99cbbcd..cc211b3 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+golang-github-klauspost-reedsolomon (1.11.8-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Sat, 15 Jul 2023 02:46:48 -0000
+
 golang-github-klauspost-reedsolomon (1.9.13-1) unstable; urgency=medium
 
   * Team upload.
diff --git a/examples/simple-decoder.go b/examples/simple-decoder.go
index c251104..19e91ca 100644
--- a/examples/simple-decoder.go
+++ b/examples/simple-decoder.go
@@ -1,4 +1,5 @@
-//+build ignore
+//go:build ignore
+// +build ignore
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 //
diff --git a/examples/simple-encoder.go b/examples/simple-encoder.go
index 1f0ed66..d90904c 100644
--- a/examples/simple-encoder.go
+++ b/examples/simple-encoder.go
@@ -1,4 +1,5 @@
-//+build ignore
+//go:build ignore
+// +build ignore
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 //
diff --git a/examples/stream-decoder.go b/examples/stream-decoder.go
index 1e27183..ffa890c 100644
--- a/examples/stream-decoder.go
+++ b/examples/stream-decoder.go
@@ -1,4 +1,5 @@
-//+build ignore
+//go:build ignore
+// +build ignore
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 //
diff --git a/examples/stream-encoder.go b/examples/stream-encoder.go
index 9f18d9b..658dc87 100644
--- a/examples/stream-encoder.go
+++ b/examples/stream-encoder.go
@@ -1,4 +1,5 @@
-//+build ignore
+//go:build ignore
+// +build ignore
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 //
diff --git a/examples_test.go b/examples_test.go
index 7ba7407..faf97d4 100644
--- a/examples_test.go
+++ b/examples_test.go
@@ -58,6 +58,63 @@ func ExampleEncoder() {
 	// ok
 }
 
+// Simple example of how to use all functions of the EncoderIdx.
+// Note that all error checks have been removed to keep it short.
+func ExampleEncoder_EncodeIdx() {
+	const dataShards = 7
+	const erasureShards = 3
+
+	// Create some sample data
+	var data = make([]byte, 250000)
+	fillRandom(data)
+
+	// Create an encoder with 7 data and 3 parity slices.
+	enc, _ := reedsolomon.New(dataShards, erasureShards)
+
+	// Split the data into shards
+	shards, _ := enc.Split(data)
+
+	// Zero erasure shards.
+	for i := 0; i < erasureShards; i++ {
+		clear := shards[dataShards+i]
+		for j := range clear {
+			clear[j] = 0
+		}
+	}
+
+	for i := 0; i < dataShards; i++ {
+		// Encode one shard at the time.
+		// Note how this gives linear access.
+		// There is however no requirement on shards being delivered in order.
+		// All parity shards will be updated on each run.
+		_ = enc.EncodeIdx(shards[i], i, shards[dataShards:])
+	}
+
+	// Verify the parity set
+	ok, err := enc.Verify(shards)
+	if ok {
+		fmt.Println("ok")
+	} else {
+		fmt.Println(err)
+	}
+
+	// Delete two shards
+	shards[dataShards-2], shards[dataShards-2] = nil, nil
+
+	// Reconstruct the shards
+	_ = enc.Reconstruct(shards)
+
+	// Verify the data set
+	ok, err = enc.Verify(shards)
+	if ok {
+		fmt.Println("ok")
+	} else {
+		fmt.Println(err)
+	}
+	// Output: ok
+	// ok
+}
+
 // This demonstrates that shards can be arbitrary sliced and
 // merged and still remain valid.
 func ExampleEncoder_slicing() {
diff --git a/galois.go b/galois.go
index ff93d65..479fa44 100644
--- a/galois.go
+++ b/galois.go
@@ -6,6 +6,10 @@
 
 package reedsolomon
 
+import (
+	"encoding/binary"
+)
+
 const (
 	// The number of elements in the field.
 	fieldSize = 256
@@ -76,7 +80,7 @@ func galSub(a, b byte) byte {
 // Table from https://github.com/templexxx/reedsolomon
 var invTable = [256]byte{0x0, 0x1, 0x8e, 0xf4, 0x47, 0xa7, 0x7a, 0xba, 0xad, 0x9d, 0xdd, 0x98, 0x3d, 0xaa, 0x5d, 0x96, 0xd8, 0x72, 0xc0, 0x58, 0xe0, 0x3e, 0x4c, 0x66, 0x90, 0xde, 0x55, 0x80, 0xa0, 0x83, 0x4b, 0x2a, 0x6c, 0xed, 0x39, 0x51, 0x60, 0x56, 0x2c, 0x8a, 0x70, 0xd0, 0x1f, 0x4a, 0x26, 0x8b, 0x33, 0x6e, 0x48, 0x89, 0x6f, 0x2e, 0xa4, 0xc3, 0x40, 0x5e, 0x50, 0x22, 0xcf, 0xa9, 0xab, 0xc, 0x15, 0xe1, 0x36, 0x5f, 0xf8, 0xd5, 0x92, 0x4e, 0xa6, 0x4, 0x30, 0x88, 0x2b, 0x1e, 0x16, 0x67, 0x45, 0x93, 0x38, 0x23, 0x68, 0x8c, 0x81, 0x1a, 0x25, 0x61, 0x13, 0xc1, 0xcb, 0x63, 0x97, 0xe, 0x37, 0x41, 0x24, 0x57, 0xca, 0x5b, 0xb9, 0xc4, 0x17, 0x4d, 0x52, 0x8d, 0xef, 0xb3, 0x20, 0xec, 0x2f, 0x32, 0x28, 0xd1, 0x11, 0xd9, 0xe9, 0xfb, 0xda, 0x79, 0xdb, 0x77, 0x6, 0xbb, 0x84, 0xcd, 0xfe, 0xfc, 0x1b, 0x54, 0xa1, 0x1d, 0x7c, 0xcc, 0xe4, 0xb0, 0x49, 0x31, 0x27, 0x2d, 0x53, 0x69, 0x2, 0xf5, 0x18, 0xdf, 0x44, 0x4f, 0x9b, 0xbc, 0xf, 0x5c, 0xb, 0xdc, 0xbd, 0x94, 0xac, 0x9, 0xc7, 0xa2, 0x1c, 0x82, 0x9f, 0xc6, 0x34, 0xc2, 0x46, 0x5, 0xce, 0x3b, 0xd, 0x3c, 0x9c, 0x8, 0xbe, 0xb7, 0x87, 0xe5, 0xee, 0x6b, 0xeb, 0xf2, 0xbf, 0xaf, 0xc5, 0x64, 0x7, 0x7b, 0x95, 0x9a, 0xae, 0xb6, 0x12, 0x59, 0xa5, 0x35, 0x65, 0xb8, 0xa3, 0x9e, 0xd2, 0xf7, 0x62, 0x5a, 0x85, 0x7d, 0xa8, 0x3a, 0x29, 0x71, 0xc8, 0xf6, 0xf9, 0x43, 0xd7, 0xd6, 0x10, 0x73, 0x76, 0x78, 0x99, 0xa, 0x19, 0x91, 0x14, 0x3f, 0xe6, 0xf0, 0x86, 0xb1, 0xe2, 0xf1, 0xfa, 0x74, 0xf3, 0xb4, 0x6d, 0x21, 0xb2, 0x6a, 0xe3, 0xe7, 0xb5, 0xea, 0x3, 0x8f, 0xd3, 0xc9, 0x42, 0xd4, 0xe8, 0x75, 0x7f, 0xff, 0x7e, 0xfd}
 
-var mulTable = [256][256]uint8{[256]uint8{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+var mulTable = [256][256]uint8{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
 	{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff},
 	{0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e, 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e, 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe, 0x1d, 0x1f, 0x19, 0x1b, 0x15, 0x17, 0x11, 0x13, 0xd, 0xf, 0x9, 0xb, 0x5, 0x7, 0x1, 0x3, 0x3d, 0x3f, 0x39, 0x3b, 0x35, 0x37, 0x31, 0x33, 0x2d, 0x2f, 0x29, 0x2b, 0x25, 0x27, 0x21, 0x23, 0x5d, 0x5f, 0x59, 0x5b, 0x55, 0x57, 0x51, 0x53, 0x4d, 0x4f, 0x49, 0x4b, 0x45, 0x47, 0x41, 0x43, 0x7d, 0x7f, 0x79, 0x7b, 0x75, 0x77, 0x71, 0x73, 0x6d, 0x6f, 0x69, 0x6b, 0x65, 0x67, 0x61, 0x63, 0x9d, 0x9f, 0x99, 0x9b, 0x95, 0x97, 0x91, 0x93, 0x8d, 0x8f, 0x89, 0x8b, 0x85, 0x87, 0x81, 0x83, 0xbd, 0xbf, 0xb9, 0xbb, 0xb5, 0xb7, 0xb1, 0xb3, 0xad, 0xaf, 0xa9, 0xab, 0xa5, 0xa7, 0xa1, 0xa3, 0xdd, 0xdf, 0xd9, 0xdb, 0xd5, 0xd7, 0xd1, 0xd3, 0xcd, 0xcf, 0xc9, 0xcb, 0xc5, 0xc7, 0xc1, 0xc3, 0xfd, 0xff, 0xf9, 0xfb, 0xf5, 0xf7, 0xf1, 0xf3, 0xed, 0xef, 0xe9, 0xeb, 0xe5, 0xe7, 0xe1, 0xe3},
 	{0x0, 0x3, 0x6, 0x5, 0xc, 0xf, 0xa, 0x9, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11, 0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21, 0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71, 0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41, 0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1, 0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1, 0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1, 0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81, 0x9d, 0x9e, 0x9b, 0x98, 0x91, 0x92, 0x97, 0x94, 0x85, 0x86, 0x83, 0x80, 0x89, 0x8a, 0x8f, 0x8c, 0xad, 0xae, 0xab, 0xa8, 0xa1, 0xa2, 0xa7, 0xa4, 0xb5, 0xb6, 0xb3, 0xb0, 0xb9, 0xba, 0xbf, 0xbc, 0xfd, 0xfe, 0xfb, 0xf8, 0xf1, 0xf2, 0xf7, 0xf4, 0xe5, 0xe6, 0xe3, 0xe0, 0xe9, 0xea, 0xef, 0xec, 0xcd, 0xce, 0xcb, 0xc8, 0xc1, 0xc2, 0xc7, 0xc4, 0xd5, 0xd6, 0xd3, 0xd0, 0xd9, 0xda, 0xdf, 0xdc, 0x5d, 0x5e, 0x5b, 0x58, 0x51, 0x52, 0x57, 0x54, 0x45, 0x46, 0x43, 0x40, 0x49, 0x4a, 0x4f, 0x4c, 0x6d, 0x6e, 0x6b, 0x68, 0x61, 0x62, 0x67, 0x64, 0x75, 0x76, 0x73, 0x70, 0x79, 0x7a, 0x7f, 0x7c, 0x3d, 0x3e, 0x3b, 0x38, 0x31, 0x32, 0x37, 0x34, 0x25, 0x26, 0x23, 0x20, 0x29, 0x2a, 0x2f, 0x2c, 0xd, 0xe, 0xb, 0x8, 0x1, 0x2, 0x7, 0x4, 0x15, 0x16, 0x13, 0x10, 0x19, 0x1a, 0x1f, 0x1c},
@@ -901,7 +905,7 @@ func galExp(a byte, n int) byte {
 	return expTable[logResult]
 }
 
-func genAvx2Matrix(matrixRows [][]byte, inputs, outputs int, dst []byte) []byte {
+func genAvx2Matrix(matrixRows [][]byte, inputs, inIdx, outputs int, dst []byte) []byte {
 	if !avx2CodeGen {
 		panic("codegen not enabled")
 	}
@@ -910,12 +914,12 @@ func genAvx2Matrix(matrixRows [][]byte, inputs, outputs int, dst []byte) []byte
 	// Duplicated in+out
 	wantBytes := total * 32 * 2
 	if cap(dst) < wantBytes {
-		dst = make([]byte, wantBytes)
+		dst = AllocAligned(1, wantBytes)[0]
 	} else {
 		dst = dst[:wantBytes]
 	}
 	for i, row := range matrixRows[:outputs] {
-		for j, idx := range row[:inputs] {
+		for j, idx := range row[inIdx : inIdx+inputs] {
 			dstIdx := (j*outputs + i) * 64
 			dstPart := dst[dstIdx:]
 			dstPart = dstPart[:64]
@@ -929,3 +933,42 @@ func genAvx2Matrix(matrixRows [][]byte, inputs, outputs int, dst []byte) []byte
 	}
 	return dst
 }
+
+var gf2p811dMulMatrices = [256]uint64{0, 0x102040810204080, 0x8001828488102040, 0x8103868c983060c0, 0x408041c2c4881020, 0x418245cad4a850a0, 0xc081c3464c983060, 0xc183c74e5cb870e0, 0x2040a061e2c48810, 0x2142a469f2e4c890, 0xa04122e56ad4a850, 0xa14326ed7af4e8d0, 0x60c0e1a3264c9830, 0x61c2e5ab366cd8b0, 0xe0c16327ae5cb870, 0xe1c3672fbe7cf8f0, 0x102050b071e2c488, 0x112254b861c28408, 0x9021d234f9f2e4c8, 0x9123d63ce9d2a448, 0x50a01172b56ad4a8, 0x51a2157aa54a9428, 0xd0a193f63d7af4e8, 0xd1a397fe2d5ab468, 0x3060f0d193264c98, 0x3162f4d983060c18, 0xb06172551b366cd8, 0xb163765d0b162c58, 0x70e0b11357ae5cb8, 0x71e2b51b478e1c38, 0xf0e13397dfbe7cf8, 0xf1e3379fcf9e3c78, 0x8810a8d83871e2c4, 0x8912acd02851a244, 0x8112a5cb061c284, 0x9132e54a0418204, 0xc890e91afcf9f2e4, 0xc992ed12ecd9b264, 0x48916b9e74e9d2a4, 0x49936f9664c99224, 0xa85008b9dab56ad4, 0xa9520cb1ca952a54, 0x28518a3d52a54a94, 0x29538e3542850a14, 0xe8d0497b1e3d7af4, 0xe9d24d730e1d3a74, 0x68d1cbff962d5ab4, 0x69d3cff7860d1a34, 0x9830f8684993264c, 0x9932fc6059b366cc, 0x18317aecc183060c, 0x19337ee4d1a3468c, 0xd8b0b9aa8d1b366c, 0xd9b2bda29d3b76ec, 0x58b13b2e050b162c, 0x59b33f26152b56ac, 0xb8705809ab57ae5c, 0xb9725c01bb77eedc, 0x3871da8d23478e1c, 0x3973de853367ce9c, 0xf8f019cb6fdfbe7c, 0xf9f21dc37ffffefc, 0x78f19b4fe7cf9e3c, 0x79f39f47f7efdebc, 0xc488d46c1c3871e2, 0xc58ad0640c183162, 0x448956e8942851a2, 0x458b52e084081122, 0x840895aed8b061c2, 0x850a91a6c8902142, 0x409172a50a04182, 0x50b132240800102, 0xe4c8740dfefcf9f2, 0xe5ca7005eedcb972, 0x64c9f68976ecd9b2, 0x65cbf28166cc9932, 0xa44835cf3a74e9d2, 0xa54a31c72a54a952, 0x2449b74bb264c992, 0x254bb343a2448912, 0xd4a884dc6ddab56a, 0xd5aa80d47dfaf5ea, 0x54a90658e5ca952a, 0x55ab0250f5ead5aa, 0x9428c51ea952a54a, 0x952ac116b972e5ca, 0x1429479a2142850a, 0x152b43923162c58a, 0xf4e824bd8f1e3d7a, 0xf5ea20b59f3e7dfa, 0x74e9a639070e1d3a, 0x75eba231172e5dba, 0xb468657f4b962d5a, 0xb56a61775bb66dda, 0x3469e7fbc3860d1a, 0x356be3f3d3a64d9a, 0x4c987cb424499326, 0x4d9a78bc3469d3a6, 0xcc99fe30ac59b366, 0xcd9bfa38bc79f3e6, 0xc183d76e0c18306, 0xd1a397ef0e1c386, 0x8c19bff268d1a346, 0x8d1bbbfa78f1e3c6, 0x6cd8dcd5c68d1b36, 0x6ddad8ddd6ad5bb6, 0xecd95e514e9d3b76, 0xeddb5a595ebd7bf6, 0x2c589d1702050b16, 0x2d5a991f12254b96, 0xac591f938a152b56, 0xad5b1b9b9a356bd6, 0x5cb82c0455ab57ae, 0x5dba280c458b172e, 0xdcb9ae80ddbb77ee, 0xddbbaa88cd9b376e, 0x1c386dc69123478e, 0x1d3a69ce8103070e, 0x9c39ef42193367ce, 0x9d3beb4a0913274e, 0x7cf88c65b76fdfbe, 0x7dfa886da74f9f3e, 0xfcf90ee13f7ffffe, 0xfdfb0ae92f5fbf7e, 0x3c78cda773e7cf9e, 0x3d7ac9af63c78f1e, 0xbc794f23fbf7efde, 0xbd7b4b2bebd7af5e, 0xe2c46a368e1c3871, 0xe3c66e3e9e3c78f1, 0x62c5e8b2060c1831, 0x63c7ecba162c58b1, 0xa2442bf44a942851, 0xa3462ffc5ab468d1, 0x2245a970c2840811, 0x2347ad78d2a44891, 0xc284ca576cd8b061, 0xc386ce5f7cf8f0e1, 0x428548d3e4c89021, 0x43874cdbf4e8d0a1, 0x82048b95a850a041, 0x83068f9db870e0c1, 0x205091120408001, 0x3070d193060c081, 0xf2e43a86fffefcf9, 0xf3e63e8eefdebc79, 0x72e5b80277eedcb9, 0x73e7bc0a67ce9c39, 0xb2647b443b76ecd9, 0xb3667f4c2b56ac59, 0x3265f9c0b366cc99, 0x3367fdc8a3468c19, 0xd2a49ae71d3a74e9, 0xd3a69eef0d1a3469, 0x52a51863952a54a9, 0x53a71c6b850a1429, 0x9224db25d9b264c9, 0x9326df2dc9922449, 0x122559a151a24489, 0x13275da941820409, 0x6ad4c2eeb66ddab5, 0x6bd6c6e6a64d9a35, 0xead5406a3e7dfaf5, 0xebd744622e5dba75, 0x2a54832c72e5ca95, 0x2b56872462c58a15, 0xaa5501a8faf5ead5, 0xab5705a0ead5aa55, 0x4a94628f54a952a5, 0x4b96668744891225, 0xca95e00bdcb972e5, 0xcb97e403cc993265, 0xa14234d90214285, 0xb16274580010205, 0x8a15a1c9183162c5, 0x8b17a5c108112245, 0x7af4925ec78f1e3d, 0x7bf69656d7af5ebd, 0xfaf510da4f9f3e7d, 0xfbf714d25fbf7efd, 0x3a74d39c03070e1d, 0x3b76d79413274e9d, 0xba7551188b172e5d, 0xbb7755109b376edd, 0x5ab4323f254b962d, 0x5bb63637356bd6ad, 0xdab5b0bbad5bb66d, 0xdbb7b4b3bd7bf6ed, 0x1a3473fde1c3860d, 0x1b3677f5f1e3c68d, 0x9a35f17969d3a64d, 0x9b37f57179f3e6cd, 0x264cbe5a92244993, 0x274eba5282040913, 0xa64d3cde1a3469d3, 0xa74f38d60a142953, 0x66ccff9856ac59b3, 0x67cefb90468c1933, 0xe6cd7d1cdebc79f3, 0xe7cf7914ce9c3973, 0x60c1e3b70e0c183, 0x70e1a3360c08103, 0x860d9cbff8f0e1c3, 0x870f98b7e8d0a143, 0x468c5ff9b468d1a3, 0x478e5bf1a4489123, 0xc68ddd7d3c78f1e3, 0xc78fd9752c58b163, 0x366ceeeae3c68d1b, 0x376eeae2f3e6cd9b, 0xb66d6c6e6bd6ad5b, 0xb76f68667bf6eddb, 0x76ecaf28274e9d3b, 0x77eeab20376eddbb, 0xf6ed2dacaf5ebd7b, 0xf7ef29a4bf7efdfb, 0x162c4e8b0102050b, 0x172e4a831122458b, 0x962dcc0f8912254b, 0x972fc807993265cb, 0x56ac0f49c58a152b, 0x57ae0b41d5aa55ab, 0xd6ad8dcd4d9a356b, 0xd7af89c55dba75eb, 0xae5c1682aa55ab57, 0xaf5e128aba75ebd7, 0x2e5d940622458b17, 0x2f5f900e3265cb97, 0xeedc57406eddbb77, 0xefde53487efdfbf7, 0x6eddd5c4e6cd9b37, 0x6fdfd1ccf6eddbb7, 0x8e1cb6e348912347, 0x8f1eb2eb58b163c7, 0xe1d3467c0810307, 0xf1f306fd0a14387, 0xce9cf7218c193367, 0xcf9ef3299c3973e7, 0x4e9d75a504091327, 0x4f9f71ad142953a7, 0xbe7c4632dbb76fdf, 0xbf7e423acb972f5f, 0x3e7dc4b653a74f9f, 0x3f7fc0be43870f1f, 0xfefc07f01f3f7fff, 0xfffe03f80f1f3f7f, 0x7efd8574972f5fbf, 0x7fff817c870f1f3f, 0x9e3ce6533973e7cf, 0x9f3ee25b2953a74f, 0x1e3d64d7b163c78f, 0x1f3f60dfa143870f, 0xdebca791fdfbf7ef, 0xdfbea399eddbb76f, 0x5ebd251575ebd7af, 0x5fbf211d65cb972f}
+
+func genGFNIMatrix(matrixRows [][]byte, inputs, inIdx, outputs int, dst []uint64) []uint64 {
+	if !avx2CodeGen {
+		panic("codegen not enabled")
+	}
+	total := inputs * outputs
+
+	// Duplicated in+out
+	dst = dst[:total]
+	for i, row := range matrixRows[:outputs] {
+		for j, idx := range row[inIdx : inIdx+inputs] {
+			dst[j*outputs+i] = gf2p811dMulMatrices[idx]
+		}
+	}
+	return dst
+}
+
+// xor slices writing to out.
+func sliceXorGo(in, out []byte, _ *options) {
+	for len(out) >= 32 {
+		inS := in[:32]
+		v0 := binary.LittleEndian.Uint64(out[:8]) ^ binary.LittleEndian.Uint64(inS[:8])
+		v1 := binary.LittleEndian.Uint64(out[8:16]) ^ binary.LittleEndian.Uint64(inS[8:16])
+		v2 := binary.LittleEndian.Uint64(out[16:24]) ^ binary.LittleEndian.Uint64(inS[16:24])
+		v3 := binary.LittleEndian.Uint64(out[24:32]) ^ binary.LittleEndian.Uint64(inS[24:32])
+		binary.LittleEndian.PutUint64(out[:8], v0)
+		binary.LittleEndian.PutUint64(out[8:16], v1)
+		binary.LittleEndian.PutUint64(out[16:24], v2)
+		binary.LittleEndian.PutUint64(out[24:32], v3)
+		out = out[32:]
+		in = in[32:]
+	}
+	out = out[:len(in)]
+	for n, input := range in {
+		out[n] ^= input
+	}
+}
diff --git a/galoisAvx512_amd64.go b/galoisAvx512_amd64.go
deleted file mode 100644
index 720196f..0000000
--- a/galoisAvx512_amd64.go
+++ /dev/null
@@ -1,338 +0,0 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-// Copyright 2019, Minio, Inc.
-
-package reedsolomon
-
-import (
-	"sync"
-)
-
-//go:noescape
-func _galMulAVX512Parallel81(in, out [][]byte, matrix *[matrixSize81]byte, addTo bool)
-
-//go:noescape
-func _galMulAVX512Parallel82(in, out [][]byte, matrix *[matrixSize82]byte, addTo bool)
-
-//go:noescape
-func _galMulAVX512Parallel84(in, out [][]byte, matrix *[matrixSize84]byte, addTo bool)
-
-const (
-	dimIn        = 8                            // Number of input rows processed simultaneously
-	dimOut81     = 1                            // Number of output rows processed simultaneously for x1 routine
-	dimOut82     = 2                            // Number of output rows processed simultaneously for x2 routine
-	dimOut84     = 4                            // Number of output rows processed simultaneously for x4 routine
-	matrixSize81 = (16 + 16) * dimIn * dimOut81 // Dimension of slice of matrix coefficient passed into x1 routine
-	matrixSize82 = (16 + 16) * dimIn * dimOut82 // Dimension of slice of matrix coefficient passed into x2 routine
-	matrixSize84 = (16 + 16) * dimIn * dimOut84 // Dimension of slice of matrix coefficient passed into x4 routine
-)
-
-// Construct block of matrix coefficients for single output row in parallel
-func setupMatrix81(matrixRows [][]byte, inputOffset, outputOffset int, matrix *[matrixSize81]byte) {
-	offset := 0
-	for c := inputOffset; c < inputOffset+dimIn; c++ {
-		for iRow := outputOffset; iRow < outputOffset+dimOut81; iRow++ {
-			if c < len(matrixRows[iRow]) {
-				coeff := matrixRows[iRow][c]
-				copy(matrix[offset*32:], mulTableLow[coeff][:])
-				copy(matrix[offset*32+16:], mulTableHigh[coeff][:])
-			} else {
-				// coefficients not used for this input shard (so null out)
-				v := matrix[offset*32 : offset*32+32]
-				for i := range v {
-					v[i] = 0
-				}
-			}
-			offset += dimIn
-			if offset >= dimIn*dimOut81 {
-				offset -= dimIn*dimOut81 - 1
-			}
-		}
-	}
-}
-
-// Construct block of matrix coefficients for 2 output rows in parallel
-func setupMatrix82(matrixRows [][]byte, inputOffset, outputOffset int, matrix *[matrixSize82]byte) {
-	offset := 0
-	for c := inputOffset; c < inputOffset+dimIn; c++ {
-		for iRow := outputOffset; iRow < outputOffset+dimOut82; iRow++ {
-			if c < len(matrixRows[iRow]) {
-				coeff := matrixRows[iRow][c]
-				copy(matrix[offset*32:], mulTableLow[coeff][:])
-				copy(matrix[offset*32+16:], mulTableHigh[coeff][:])
-			} else {
-				// coefficients not used for this input shard (so null out)
-				v := matrix[offset*32 : offset*32+32]
-				for i := range v {
-					v[i] = 0
-				}
-			}
-			offset += dimIn
-			if offset >= dimIn*dimOut82 {
-				offset -= dimIn*dimOut82 - 1
-			}
-		}
-	}
-}
-
-// Construct block of matrix coefficients for 4 output rows in parallel
-func setupMatrix84(matrixRows [][]byte, inputOffset, outputOffset int, matrix *[matrixSize84]byte) {
-	offset := 0
-	for c := inputOffset; c < inputOffset+dimIn; c++ {
-		for iRow := outputOffset; iRow < outputOffset+dimOut84; iRow++ {
-			if c < len(matrixRows[iRow]) {
-				coeff := matrixRows[iRow][c]
-				copy(matrix[offset*32:], mulTableLow[coeff][:])
-				copy(matrix[offset*32+16:], mulTableHigh[coeff][:])
-			} else {
-				// coefficients not used for this input shard (so null out)
-				v := matrix[offset*32 : offset*32+32]
-				for i := range v {
-					v[i] = 0
-				}
-			}
-			offset += dimIn
-			if offset >= dimIn*dimOut84 {
-				offset -= dimIn*dimOut84 - 1
-			}
-		}
-	}
-}
-
-// Invoke AVX512 routine for single output row in parallel
-func galMulAVX512Parallel81(in, out [][]byte, matrixRows [][]byte, inputOffset, outputOffset, start, stop int, matrix81 *[matrixSize81]byte) {
-	done := stop - start
-	if done <= 0 {
-		return
-	}
-
-	inputEnd := inputOffset + dimIn
-	if inputEnd > len(in) {
-		inputEnd = len(in)
-	}
-	outputEnd := outputOffset + dimOut81
-	if outputEnd > len(out) {
-		outputEnd = len(out)
-	}
-
-	// We know the max size, alloc temp array.
-	var inTmp [dimIn][]byte
-	for i, v := range in[inputOffset:inputEnd] {
-		inTmp[i] = v[start:stop]
-	}
-	var outTmp [dimOut81][]byte
-	for i, v := range out[outputOffset:outputEnd] {
-		outTmp[i] = v[start:stop]
-	}
-
-	addTo := inputOffset != 0 // Except for the first input column, add to previous results
-	_galMulAVX512Parallel81(inTmp[:inputEnd-inputOffset], outTmp[:outputEnd-outputOffset], matrix81, addTo)
-
-	done = start + ((done >> 6) << 6)
-	if done < stop {
-		galMulAVX512LastInput(inputOffset, inputEnd, outputOffset, outputEnd, matrixRows, done, stop, out, in)
-	}
-}
-
-// Invoke AVX512 routine for 2 output rows in parallel
-func galMulAVX512Parallel82(in, out [][]byte, matrixRows [][]byte, inputOffset, outputOffset, start, stop int, matrix82 *[matrixSize82]byte) {
-	done := stop - start
-	if done <= 0 {
-		return
-	}
-
-	inputEnd := inputOffset + dimIn
-	if inputEnd > len(in) {
-		inputEnd = len(in)
-	}
-	outputEnd := outputOffset + dimOut82
-	if outputEnd > len(out) {
-		outputEnd = len(out)
-	}
-
-	// We know the max size, alloc temp array.
-	var inTmp [dimIn][]byte
-	for i, v := range in[inputOffset:inputEnd] {
-		inTmp[i] = v[start:stop]
-	}
-	var outTmp [dimOut82][]byte
-	for i, v := range out[outputOffset:outputEnd] {
-		outTmp[i] = v[start:stop]
-	}
-
-	addTo := inputOffset != 0 // Except for the first input column, add to previous results
-	_galMulAVX512Parallel82(inTmp[:inputEnd-inputOffset], outTmp[:outputEnd-outputOffset], matrix82, addTo)
-
-	done = start + ((done >> 6) << 6)
-	if done < stop {
-		galMulAVX512LastInput(inputOffset, inputEnd, outputOffset, outputEnd, matrixRows, done, stop, out, in)
-	}
-}
-
-// Invoke AVX512 routine for 4 output rows in parallel
-func galMulAVX512Parallel84(in, out [][]byte, matrixRows [][]byte, inputOffset, outputOffset, start, stop int, matrix84 *[matrixSize84]byte) {
-	done := stop - start
-	if done <= 0 {
-		return
-	}
-
-	inputEnd := inputOffset + dimIn
-	if inputEnd > len(in) {
-		inputEnd = len(in)
-	}
-	outputEnd := outputOffset + dimOut84
-	if outputEnd > len(out) {
-		outputEnd = len(out)
-	}
-
-	// We know the max size, alloc temp array.
-	var inTmp [dimIn][]byte
-	for i, v := range in[inputOffset:inputEnd] {
-		inTmp[i] = v[start:stop]
-	}
-	var outTmp [dimOut84][]byte
-	for i, v := range out[outputOffset:outputEnd] {
-		outTmp[i] = v[start:stop]
-	}
-
-	addTo := inputOffset != 0 // Except for the first input column, add to previous results
-	_galMulAVX512Parallel84(inTmp[:inputEnd-inputOffset], outTmp[:outputEnd-outputOffset], matrix84, addTo)
-
-	done = start + ((done >> 6) << 6)
-	if done < stop {
-		galMulAVX512LastInput(inputOffset, inputEnd, outputOffset, outputEnd, matrixRows, done, stop, out, in)
-	}
-}
-
-func galMulAVX512LastInput(inputOffset int, inputEnd int, outputOffset int, outputEnd int, matrixRows [][]byte, done int, stop int, out [][]byte, in [][]byte) {
-	for c := inputOffset; c < inputEnd; c++ {
-		for iRow := outputOffset; iRow < outputEnd; iRow++ {
-			if c < len(matrixRows[iRow]) {
-				mt := mulTable[matrixRows[iRow][c]][:256]
-				for i := done; i < stop; i++ {
-					if c == 0 { // only set value for first input column
-						out[iRow][i] = mt[in[c][i]]
-					} else { // and add for all others
-						out[iRow][i] ^= mt[in[c][i]]
-					}
-				}
-			}
-		}
-	}
-}
-
-// Perform the same as codeSomeShards, but taking advantage of
-// AVX512 parallelism for up to 4x faster execution as compared to AVX2
-func (r *reedSolomon) codeSomeShardsAvx512(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
-	// Process using no goroutines
-	start, end := 0, r.o.perRound
-	if end > byteCount {
-		end = byteCount
-	}
-	for start < byteCount {
-		matrix84 := [matrixSize84]byte{}
-		matrix82 := [matrixSize82]byte{}
-		matrix81 := [matrixSize81]byte{}
-
-		outputRow := 0
-		// First process (multiple) batches of 4 output rows in parallel
-		if outputRow+dimOut84 <= outputCount {
-			for ; outputRow+dimOut84 <= outputCount; outputRow += dimOut84 {
-				for inputRow := 0; inputRow < len(inputs); inputRow += dimIn {
-					setupMatrix84(matrixRows, inputRow, outputRow, &matrix84)
-					galMulAVX512Parallel84(inputs, outputs, matrixRows, inputRow, outputRow, start, end, &matrix84)
-				}
-			}
-		}
-		// Then process a (single) batch of 2 output rows in parallel
-		if outputRow+dimOut82 <= outputCount {
-			for inputRow := 0; inputRow < len(inputs); inputRow += dimIn {
-				setupMatrix82(matrixRows, inputRow, outputRow, &matrix82)
-				galMulAVX512Parallel82(inputs, outputs, matrixRows, inputRow, outputRow, start, end, &matrix82)
-			}
-			outputRow += dimOut82
-		}
-		// Lastly, we may have a single output row left (for uneven parity)
-		if outputRow < outputCount {
-			for inputRow := 0; inputRow < len(inputs); inputRow += dimIn {
-				setupMatrix81(matrixRows, inputRow, outputRow, &matrix81)
-				galMulAVX512Parallel81(inputs, outputs, matrixRows, inputRow, outputRow, start, end, &matrix81)
-			}
-		}
-
-		start = end
-		end += r.o.perRound
-		if end > byteCount {
-			end = byteCount
-		}
-	}
-}
-
-// Perform the same as codeSomeShards, but taking advantage of
-// AVX512 parallelism for up to 4x faster execution as compared to AVX2
-func (r *reedSolomon) codeSomeShardsAvx512P(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
-	var wg sync.WaitGroup
-	do := byteCount / r.o.maxGoroutines
-	if do < r.o.minSplitSize {
-		do = r.o.minSplitSize
-	}
-	// Make sizes divisible by 64
-	do = (do + 63) & (^63)
-	start := 0
-	for start < byteCount {
-		if start+do > byteCount {
-			do = byteCount - start
-		}
-		wg.Add(1)
-		go func(grStart, grStop int) {
-			start, stop := grStart, grStart+r.o.perRound
-			if stop > grStop {
-				stop = grStop
-			}
-			// Loop for each round.
-			matrix84 := [matrixSize84]byte{}
-			matrix82 := [matrixSize82]byte{}
-			matrix81 := [matrixSize81]byte{}
-			for start < grStop {
-				outputRow := 0
-				// First process (multiple) batches of 4 output rows in parallel
-				if outputRow+dimOut84 <= outputCount {
-					// 1K matrix buffer
-					for ; outputRow+dimOut84 <= outputCount; outputRow += dimOut84 {
-						for inputRow := 0; inputRow < len(inputs); inputRow += dimIn {
-							setupMatrix84(matrixRows, inputRow, outputRow, &matrix84)
-							galMulAVX512Parallel84(inputs, outputs, matrixRows, inputRow, outputRow, start, stop, &matrix84)
-						}
-					}
-				}
-				// Then process a (single) batch of 2 output rows in parallel
-				if outputRow+dimOut82 <= outputCount {
-					// 512B matrix buffer
-					for inputRow := 0; inputRow < len(inputs); inputRow += dimIn {
-						setupMatrix82(matrixRows, inputRow, outputRow, &matrix82)
-						galMulAVX512Parallel82(inputs, outputs, matrixRows, inputRow, outputRow, start, stop, &matrix82)
-					}
-					outputRow += dimOut82
-				}
-				// Lastly, we may have a single output row left (for uneven parity)
-				if outputRow < outputCount {
-					for inputRow := 0; inputRow < len(inputs); inputRow += dimIn {
-						setupMatrix81(matrixRows, inputRow, outputRow, &matrix81)
-						galMulAVX512Parallel81(inputs, outputs, matrixRows, inputRow, outputRow, start, stop, &matrix81)
-					}
-				}
-				start = stop
-				stop += r.o.perRound
-				if stop > grStop {
-					stop = grStop
-				}
-			}
-			wg.Done()
-		}(start, start+do)
-		start += do
-	}
-	wg.Wait()
-}
diff --git a/galoisAvx512_amd64.s b/galoisAvx512_amd64.s
deleted file mode 100644
index 09f1d0d..0000000
--- a/galoisAvx512_amd64.s
+++ /dev/null
@@ -1,402 +0,0 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-// Copyright 2019, Minio, Inc.
-
-#define LOAD(OFFSET) \
-	MOVQ      OFFSET(SI), BX  \
-	VMOVDQU64 (BX)(R11*1), Z0 \
-	VPSRLQ    $4, Z0, Z1      \ // high input
-	VPANDQ    Z2, Z0, Z0      \ // low input
-	VPANDQ    Z2, Z1, Z1      // high input
-
-#define GALOIS_MUL(MUL_LO, MUL_HI, LO, HI, OUT) \
-	VPSHUFB    Z0, MUL_LO, LO     \ // mul low part
-	VPSHUFB    Z1, MUL_HI, HI     \ // mul high part
-	VPTERNLOGD $0x96, LO, HI, OUT
-
-#define GALOIS(C1, C2, IN, LO, HI, OUT) \
-	VSHUFI64X2 $C1, IN, IN, LO      \
-	VSHUFI64X2 $C2, IN, IN, HI      \
-	GALOIS_MUL(LO, HI, LO, HI, OUT)
-
-//
-// Process single output row from a total of 8 input rows
-//
-// func _galMulAVX512Parallel81(in, out [][]byte, matrix *[matrixSize81]byte, addTo bool)
-TEXT Β·_galMulAVX512Parallel81(SB), 7, $0
-	MOVQ  in+0(FP), SI
-	MOVQ  8(SI), R9              // R9: len(in)
-	SHRQ  $6, R9                 // len(in) / 64
-	TESTQ R9, R9
-	JZ    done_avx512_parallel81
-
-	MOVQ      matrix+48(FP), SI
-	VMOVDQU64 0x000(SI), Z16
-	VMOVDQU64 0x040(SI), Z17
-	VMOVDQU64 0x080(SI), Z18
-	VMOVDQU64 0x0c0(SI), Z19
-
-	// Initialize multiplication constants
-	VSHUFI64X2 $0x55, Z16, Z16, Z20
-	VSHUFI64X2 $0xaa, Z16, Z16, Z24
-	VSHUFI64X2 $0xff, Z16, Z16, Z28
-	VSHUFI64X2 $0x00, Z16, Z16, Z16
-
-	VSHUFI64X2 $0x55, Z17, Z17, Z21
-	VSHUFI64X2 $0xaa, Z17, Z17, Z25
-	VSHUFI64X2 $0xff, Z17, Z17, Z29
-	VSHUFI64X2 $0x00, Z17, Z17, Z17
-
-	VSHUFI64X2 $0x55, Z18, Z18, Z22
-	VSHUFI64X2 $0xaa, Z18, Z18, Z26
-	VSHUFI64X2 $0xff, Z18, Z18, Z30
-	VSHUFI64X2 $0x00, Z18, Z18, Z18
-
-	VSHUFI64X2 $0x55, Z19, Z19, Z23
-	VSHUFI64X2 $0xaa, Z19, Z19, Z27
-	VSHUFI64X2 $0xff, Z19, Z19, Z31
-	VSHUFI64X2 $0x00, Z19, Z19, Z19
-
-	MOVQ         $15, BX
-	VPBROADCASTB BX, Z2
-
-	MOVB  addTo+56(FP), AX
-	IMULQ $-0x1, AX
-	KMOVQ AX, K1
-	MOVQ  in+0(FP), SI     // SI: &in
-	MOVQ  in_len+8(FP), AX // number of inputs
-	XORQ  R11, R11
-	MOVQ  out+24(FP), DX
-	MOVQ  (DX), DX         // DX: &out[0][0]
-
-loopback_avx512_parallel81:
-	VMOVDQU64.Z (DX), K1, Z4
-
-	LOAD(0x00)                         // &in[0][0]
-	GALOIS_MUL(Z16, Z20, Z14, Z15, Z4)
-
-	CMPQ AX, $1
-	JE   skip_avx512_parallel81
-
-	LOAD(0x18)                         // &in[1][0]
-	GALOIS_MUL(Z24, Z28, Z14, Z15, Z4)
-
-	CMPQ AX, $2
-	JE   skip_avx512_parallel81
-
-	LOAD(0x30)                         // &in[2][0]
-	GALOIS_MUL(Z17, Z21, Z14, Z15, Z4)
-
-	CMPQ AX, $3
-	JE   skip_avx512_parallel81
-
-	LOAD(0x48)                         // &in[3][0]
-	GALOIS_MUL(Z25, Z29, Z14, Z15, Z4)
-
-	CMPQ AX, $4
-	JE   skip_avx512_parallel81
-
-	LOAD(0x60)                         // &in[4][0]
-	GALOIS_MUL(Z18, Z22, Z14, Z15, Z4)
-
-	CMPQ AX, $5
-	JE   skip_avx512_parallel81
-
-	LOAD(0x78)                         // &in[5][0]
-	GALOIS_MUL(Z26, Z30, Z14, Z15, Z4)
-
-	CMPQ AX, $6
-	JE   skip_avx512_parallel81
-
-	LOAD(0x90)                         // &in[6][0]
-	GALOIS_MUL(Z19, Z23, Z14, Z15, Z4)
-
-	CMPQ AX, $7
-	JE   skip_avx512_parallel81
-
-	LOAD(0xa8)                         // &in[7][0]
-	GALOIS_MUL(Z27, Z31, Z14, Z15, Z4)
-
-skip_avx512_parallel81:
-	VMOVDQU64 Z4, (DX)
-
-	ADDQ $64, R11 // in4+=64
-
-	ADDQ $64, DX // out+=64
-
-	SUBQ $1, R9
-	JNZ  loopback_avx512_parallel81
-
-done_avx512_parallel81:
-	VZEROUPPER
-	RET
-
-//
-// Process 2 output rows in parallel from a total of 8 input rows
-//
-// func _galMulAVX512Parallel82(in, out [][]byte, matrix *[matrixSize82]byte, addTo bool)
-TEXT Β·_galMulAVX512Parallel82(SB), 7, $0
-	MOVQ  in+0(FP), SI
-	MOVQ  8(SI), R9              // R9: len(in)
-	SHRQ  $6, R9                 // len(in) / 64
-	TESTQ R9, R9
-	JZ    done_avx512_parallel82
-
-	MOVQ      matrix+48(FP), SI
-	VMOVDQU64 0x000(SI), Z16
-	VMOVDQU64 0x040(SI), Z17
-	VMOVDQU64 0x080(SI), Z18
-	VMOVDQU64 0x0c0(SI), Z19
-	VMOVDQU64 0x100(SI), Z20
-	VMOVDQU64 0x140(SI), Z21
-	VMOVDQU64 0x180(SI), Z22
-	VMOVDQU64 0x1c0(SI), Z23
-
-	// Initialize multiplication constants
-	VSHUFI64X2 $0x55, Z16, Z16, Z24
-	VSHUFI64X2 $0xaa, Z16, Z16, Z25
-	VSHUFI64X2 $0xff, Z16, Z16, Z26
-	VSHUFI64X2 $0x00, Z16, Z16, Z16
-
-	VSHUFI64X2 $0x55, Z20, Z20, Z27
-	VSHUFI64X2 $0xaa, Z20, Z20, Z28
-	VSHUFI64X2 $0xff, Z20, Z20, Z29
-	VSHUFI64X2 $0x00, Z20, Z20, Z20
-
-	VSHUFI64X2 $0x55, Z17, Z17, Z30
-	VSHUFI64X2 $0xaa, Z17, Z17, Z31
-	VSHUFI64X2 $0xff, Z17, Z17, Z11
-	VSHUFI64X2 $0x00, Z17, Z17, Z17
-
-	VSHUFI64X2 $0x55, Z21, Z21, Z8
-	VSHUFI64X2 $0xaa, Z21, Z21, Z9
-	VSHUFI64X2 $0xff, Z21, Z21, Z10
-	VSHUFI64X2 $0x00, Z21, Z21, Z21
-
-	MOVQ         $15, BX
-	VPBROADCASTB BX, Z2
-
-	MOVB  addTo+56(FP), AX
-	IMULQ $-0x1, AX
-	KMOVQ AX, K1
-	MOVQ  in+0(FP), SI     // SI: &in
-	MOVQ  in_len+8(FP), AX // number of inputs
-	XORQ  R11, R11
-	MOVQ  out+24(FP), DX
-	MOVQ  24(DX), CX       // CX: &out[1][0]
-	MOVQ  (DX), DX         // DX: &out[0][0]
-
-loopback_avx512_parallel82:
-	VMOVDQU64.Z (DX), K1, Z4
-	VMOVDQU64.Z (CX), K1, Z5
-
-	LOAD(0x00)                         // &in[0][0]
-	GALOIS_MUL(Z16, Z24, Z14, Z15, Z4)
-	GALOIS_MUL(Z20, Z27, Z12, Z13, Z5)
-
-	CMPQ AX, $1
-	JE   skip_avx512_parallel82
-
-	LOAD(0x18)                         // &in[1][0]
-	GALOIS_MUL(Z25, Z26, Z14, Z15, Z4)
-	GALOIS_MUL(Z28, Z29, Z12, Z13, Z5)
-
-	CMPQ AX, $2
-	JE   skip_avx512_parallel82
-
-	LOAD(0x30)                         // &in[2][0]
-	GALOIS_MUL(Z17, Z30, Z14, Z15, Z4)
-	GALOIS_MUL(Z21, Z8, Z12, Z13, Z5)
-
-	CMPQ AX, $3
-	JE   skip_avx512_parallel82
-
-	LOAD(0x48)                         // &in[3][0]
-	GALOIS_MUL(Z31, Z11, Z14, Z15, Z4)
-	GALOIS_MUL(Z9, Z10, Z12, Z13, Z5)
-
-	CMPQ AX, $4
-	JE   skip_avx512_parallel82
-
-	LOAD(0x60)                            // &in[4][0]
-	GALOIS(0x00, 0x55, Z18, Z14, Z15, Z4)
-	GALOIS(0x00, 0x55, Z22, Z12, Z13, Z5)
-
-	CMPQ AX, $5
-	JE   skip_avx512_parallel82
-
-	LOAD(0x78)                            // &in[5][0]
-	GALOIS(0xaa, 0xff, Z18, Z14, Z15, Z4)
-	GALOIS(0xaa, 0xff, Z22, Z12, Z13, Z5)
-
-	CMPQ AX, $6
-	JE   skip_avx512_parallel82
-
-	LOAD(0x90)                            // &in[6][0]
-	GALOIS(0x00, 0x55, Z19, Z14, Z15, Z4)
-	GALOIS(0x00, 0x55, Z23, Z12, Z13, Z5)
-
-	CMPQ AX, $7
-	JE   skip_avx512_parallel82
-
-	LOAD(0xa8)                            // &in[7][0]
-	GALOIS(0xaa, 0xff, Z19, Z14, Z15, Z4)
-	GALOIS(0xaa, 0xff, Z23, Z12, Z13, Z5)
-
-skip_avx512_parallel82:
-	VMOVDQU64 Z4, (DX)
-	VMOVDQU64 Z5, (CX)
-
-	ADDQ $64, R11 // in4+=64
-
-	ADDQ $64, DX // out+=64
-	ADDQ $64, CX // out2+=64
-
-	SUBQ $1, R9
-	JNZ  loopback_avx512_parallel82
-
-done_avx512_parallel82:
-	VZEROUPPER
-	RET
-
-//
-// Process 4 output rows in parallel from a total of 8 input rows
-//
-// func _galMulAVX512Parallel84(in, out [][]byte, matrix *[matrixSize84]byte, addTo bool)
-TEXT Β·_galMulAVX512Parallel84(SB), 7, $0
-	MOVQ  in+0(FP), SI
-	MOVQ  8(SI), R9              // R9: len(in)
-	SHRQ  $6, R9                 // len(in) / 64
-	TESTQ R9, R9
-	JZ    done_avx512_parallel84
-
-	MOVQ      matrix+48(FP), SI
-	VMOVDQU64 0x000(SI), Z16
-	VMOVDQU64 0x040(SI), Z17
-	VMOVDQU64 0x080(SI), Z18
-	VMOVDQU64 0x0c0(SI), Z19
-	VMOVDQU64 0x100(SI), Z20
-	VMOVDQU64 0x140(SI), Z21
-	VMOVDQU64 0x180(SI), Z22
-	VMOVDQU64 0x1c0(SI), Z23
-	VMOVDQU64 0x200(SI), Z24
-	VMOVDQU64 0x240(SI), Z25
-	VMOVDQU64 0x280(SI), Z26
-	VMOVDQU64 0x2c0(SI), Z27
-	VMOVDQU64 0x300(SI), Z28
-	VMOVDQU64 0x340(SI), Z29
-	VMOVDQU64 0x380(SI), Z30
-	VMOVDQU64 0x3c0(SI), Z31
-
-	MOVQ         $15, BX
-	VPBROADCASTB BX, Z2
-
-	MOVB  addTo+56(FP), AX
-	IMULQ $-0x1, AX
-	KMOVQ AX, K1
-	MOVQ  in+0(FP), SI     // SI: &in
-	MOVQ  in_len+8(FP), AX // number of inputs
-	XORQ  R11, R11
-	MOVQ  out+24(FP), DX
-	MOVQ  24(DX), CX       // CX: &out[1][0]
-	MOVQ  48(DX), R10      // R10: &out[2][0]
-	MOVQ  72(DX), R12      // R12: &out[3][0]
-	MOVQ  (DX), DX         // DX: &out[0][0]
-
-loopback_avx512_parallel84:
-	VMOVDQU64.Z (DX), K1, Z4
-	VMOVDQU64.Z (CX), K1, Z5
-	VMOVDQU64.Z (R10), K1, Z6
-	VMOVDQU64.Z (R12), K1, Z7
-
-	LOAD(0x00)                            // &in[0][0]
-	GALOIS(0x00, 0x55, Z16, Z14, Z15, Z4)
-	GALOIS(0x00, 0x55, Z20, Z12, Z13, Z5)
-	GALOIS(0x00, 0x55, Z24, Z10, Z11, Z6)
-	GALOIS(0x00, 0x55, Z28,  Z8,  Z9, Z7)
-
-	CMPQ AX, $1
-	JE   skip_avx512_parallel84
-
-	LOAD(0x18)                            // &in[1][0]
-	GALOIS(0xaa, 0xff, Z16, Z14, Z15, Z4)
-	GALOIS(0xaa, 0xff, Z20, Z12, Z13, Z5)
-	GALOIS(0xaa, 0xff, Z24, Z10, Z11, Z6)
-	GALOIS(0xaa, 0xff, Z28,  Z8,  Z9, Z7)
-
-	CMPQ AX, $2
-	JE   skip_avx512_parallel84
-
-	LOAD(0x30)                            // &in[2][0]
-	GALOIS(0x00, 0x55, Z17, Z14, Z15, Z4)
-	GALOIS(0x00, 0x55, Z21, Z12, Z13, Z5)
-	GALOIS(0x00, 0x55, Z25, Z10, Z11, Z6)
-	GALOIS(0x00, 0x55, Z29,  Z8,  Z9, Z7)
-
-	CMPQ AX, $3
-	JE   skip_avx512_parallel84
-
-	LOAD(0x48)                            // &in[3][0]
-	GALOIS(0xaa, 0xff, Z17, Z14, Z15, Z4)
-	GALOIS(0xaa, 0xff, Z21, Z12, Z13, Z5)
-	GALOIS(0xaa, 0xff, Z25, Z10, Z11, Z6)
-	GALOIS(0xaa, 0xff, Z29,  Z8,  Z9, Z7)
-
-	CMPQ AX, $4
-	JE   skip_avx512_parallel84
-
-	LOAD(0x60)                            // &in[4][0]
-	GALOIS(0x00, 0x55, Z18, Z14, Z15, Z4)
-	GALOIS(0x00, 0x55, Z22, Z12, Z13, Z5)
-	GALOIS(0x00, 0x55, Z26, Z10, Z11, Z6)
-	GALOIS(0x00, 0x55, Z30,  Z8,  Z9, Z7)
-
-	CMPQ AX, $5
-	JE   skip_avx512_parallel84
-
-	LOAD(0x78)                            // &in[5][0]
-	GALOIS(0xaa, 0xff, Z18, Z14, Z15, Z4)
-	GALOIS(0xaa, 0xff, Z22, Z12, Z13, Z5)
-	GALOIS(0xaa, 0xff, Z26, Z10, Z11, Z6)
-	GALOIS(0xaa, 0xff, Z30,  Z8,  Z9, Z7)
-
-	CMPQ AX, $6
-	JE   skip_avx512_parallel84
-
-	LOAD(0x90)                            // &in[6][0]
-	GALOIS(0x00, 0x55, Z19, Z14, Z15, Z4)
-	GALOIS(0x00, 0x55, Z23, Z12, Z13, Z5)
-	GALOIS(0x00, 0x55, Z27, Z10, Z11, Z6)
-	GALOIS(0x00, 0x55, Z31,  Z8,  Z9, Z7)
-
-	CMPQ AX, $7
-	JE   skip_avx512_parallel84
-
-	LOAD(0xa8)                            // &in[7][0]
-	GALOIS(0xaa, 0xff, Z19, Z14, Z15, Z4)
-	GALOIS(0xaa, 0xff, Z23, Z12, Z13, Z5)
-	GALOIS(0xaa, 0xff, Z27, Z10, Z11, Z6)
-	GALOIS(0xaa, 0xff, Z31,  Z8,  Z9, Z7)
-
-skip_avx512_parallel84:
-	VMOVDQU64 Z4, (DX)
-	VMOVDQU64 Z5, (CX)
-	VMOVDQU64 Z6, (R10)
-	VMOVDQU64 Z7, (R12)
-
-	ADDQ $64, R11 // in4+=64
-
-	ADDQ $64, DX  // out+=64
-	ADDQ $64, CX  // out2+=64
-	ADDQ $64, R10 // out3+=64
-	ADDQ $64, R12 // out4+=64
-
-	SUBQ $1, R9
-	JNZ  loopback_avx512_parallel84
-
-done_avx512_parallel84:
-	VZEROUPPER
-	RET
diff --git a/galoisAvx512_amd64_test.go b/galoisAvx512_amd64_test.go
deleted file mode 100644
index 685302f..0000000
--- a/galoisAvx512_amd64_test.go
+++ /dev/null
@@ -1,416 +0,0 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-// Copyright 2019, Minio, Inc.
-
-package reedsolomon
-
-import (
-	"bytes"
-	"math/rand"
-	"testing"
-	"time"
-)
-
-func testGaloisAvx512Parallelx1(t *testing.T, inputSize int) {
-
-	if !defaultOptions.useAVX512 {
-		t.Skip("AVX512 not detected")
-	}
-
-	rand.Seed(time.Now().UnixNano())
-
-	var size = 1024 * 1024
-	if testing.Short() {
-		size = 4096
-	}
-
-	in, out := make([][]byte, inputSize), make([][]byte, dimOut81)
-
-	for i := range in {
-		in[i] = make([]byte, size)
-		rand.Read(in[i])
-	}
-
-	for i := range out {
-		out[i] = make([]byte, size)
-		rand.Read(out[i])
-	}
-
-	opts := defaultOptions
-	opts.useSSSE3 = true
-
-	matrix := [(16 + 16) * dimIn * dimOut81]byte{}
-	coeffs := make([]byte, dimIn*len(out))
-
-	for i := 0; i < dimIn*len(out); i++ {
-		coeffs[i] = byte(rand.Int31n(256))
-		copy(matrix[i*32:], mulTableLow[coeffs[i]][:])
-		copy(matrix[i*32+16:], mulTableHigh[coeffs[i]][:])
-	}
-
-	// Do first run with clearing out any existing results
-	_galMulAVX512Parallel81(in, out, &matrix, false)
-
-	expect := make([][]byte, len(out))
-	for i := range expect {
-		expect[i] = make([]byte, size)
-		rand.Read(expect[i])
-	}
-
-	for i := range in {
-		if i == 0 {
-			galMulSlice(coeffs[i], in[i], expect[0], &options{})
-		} else {
-			galMulSliceXor(coeffs[i], in[i], expect[0], &options{})
-		}
-	}
-
-	for i := range out {
-		if 0 != bytes.Compare(out[i], expect[i]) {
-			t.Errorf("got [%d]%#v...,\n                  expected [%d]%#v...", i, out[i][:8], i, expect[i][:8])
-		}
-	}
-
-	inToAdd := make([][]byte, len(in))
-
-	for i := range inToAdd {
-		inToAdd[i] = make([]byte, size)
-		rand.Read(inToAdd[i])
-	}
-
-	for i := 0; i < dimIn*len(out); i++ {
-		coeffs[i] = byte(rand.Int31n(256))
-		copy(matrix[i*32:], mulTableLow[coeffs[i]][:])
-		copy(matrix[i*32+16:], mulTableHigh[coeffs[i]][:])
-	}
-
-	// Do second run by adding to original run
-	_galMulAVX512Parallel81(inToAdd, out, &matrix, true)
-
-	for i := range in {
-		galMulSliceXor(coeffs[i], inToAdd[i], expect[0], &options{})
-	}
-
-	for i := range out {
-		if 0 != bytes.Compare(out[i], expect[i]) {
-			t.Errorf("got [%d]%#v...,\n                  expected [%d]%#v...", i, out[i][:8], i, expect[i][:8])
-		}
-	}
-}
-
-func TestGaloisAvx512Parallel11(t *testing.T) { testGaloisAvx512Parallelx1(t, 1) }
-func TestGaloisAvx512Parallel21(t *testing.T) { testGaloisAvx512Parallelx1(t, 2) }
-func TestGaloisAvx512Parallel31(t *testing.T) { testGaloisAvx512Parallelx1(t, 3) }
-func TestGaloisAvx512Parallel41(t *testing.T) { testGaloisAvx512Parallelx1(t, 4) }
-func TestGaloisAvx512Parallel51(t *testing.T) { testGaloisAvx512Parallelx1(t, 5) }
-func TestGaloisAvx512Parallel61(t *testing.T) { testGaloisAvx512Parallelx1(t, 6) }
-func TestGaloisAvx512Parallel71(t *testing.T) { testGaloisAvx512Parallelx1(t, 7) }
-func TestGaloisAvx512Parallel81(t *testing.T) { testGaloisAvx512Parallelx1(t, 8) }
-
-func testGaloisAvx512Parallelx2(t *testing.T, inputSize int) {
-
-	if !defaultOptions.useAVX512 {
-		t.Skip("AVX512 not detected")
-	}
-
-	rand.Seed(time.Now().UnixNano())
-
-	var size = 1024 * 1024
-	if testing.Short() {
-		size = 4096
-	}
-
-	in, out := make([][]byte, inputSize), make([][]byte, dimOut82)
-
-	for i := range in {
-		in[i] = make([]byte, size)
-		rand.Read(in[i])
-	}
-
-	for i := range out {
-		out[i] = make([]byte, size)
-		rand.Read(out[i])
-	}
-
-	opts := defaultOptions
-	opts.useSSSE3 = true
-
-	matrix := [(16 + 16) * dimIn * dimOut82]byte{}
-	coeffs := make([]byte, dimIn*len(out))
-
-	for i := 0; i < dimIn*len(out); i++ {
-		coeffs[i] = byte(rand.Int31n(256))
-		copy(matrix[i*32:], mulTableLow[coeffs[i]][:])
-		copy(matrix[i*32+16:], mulTableHigh[coeffs[i]][:])
-	}
-
-	// Do first run with clearing out any existing results
-	_galMulAVX512Parallel82(in, out, &matrix, false)
-
-	expect := make([][]byte, len(out))
-	for i := range expect {
-		expect[i] = make([]byte, size)
-		rand.Read(expect[i])
-	}
-
-	for i := range in {
-		if i == 0 {
-			galMulSlice(coeffs[i], in[i], expect[0], &options{})
-			galMulSlice(coeffs[dimIn+i], in[i], expect[1], &options{})
-		} else {
-			galMulSliceXor(coeffs[i], in[i], expect[0], &options{})
-			galMulSliceXor(coeffs[dimIn+i], in[i], expect[1], &options{})
-		}
-	}
-
-	for i := range out {
-		if 0 != bytes.Compare(out[i], expect[i]) {
-			t.Errorf("got [%d]%#v...,\n                  expected [%d]%#v...", i, out[i][:8], i, expect[i][:8])
-		}
-	}
-
-	inToAdd := make([][]byte, len(in))
-
-	for i := range inToAdd {
-		inToAdd[i] = make([]byte, size)
-		rand.Read(inToAdd[i])
-	}
-
-	for i := 0; i < dimIn*len(out); i++ {
-		coeffs[i] = byte(rand.Int31n(256))
-		copy(matrix[i*32:], mulTableLow[coeffs[i]][:])
-		copy(matrix[i*32+16:], mulTableHigh[coeffs[i]][:])
-	}
-
-	// Do second run by adding to original run
-	_galMulAVX512Parallel82(inToAdd, out, &matrix, true)
-
-	for i := range in {
-		galMulSliceXor(coeffs[i], inToAdd[i], expect[0], &options{})
-		galMulSliceXor(coeffs[dimIn+i], inToAdd[i], expect[1], &options{})
-	}
-
-	for i := range out {
-		if 0 != bytes.Compare(out[i], expect[i]) {
-			t.Errorf("got [%d]%#v...,\n                  expected [%d]%#v...", i, out[i][:8], i, expect[i][:8])
-		}
-	}
-}
-
-func TestGaloisAvx512Parallel12(t *testing.T) { testGaloisAvx512Parallelx2(t, 1) }
-func TestGaloisAvx512Parallel22(t *testing.T) { testGaloisAvx512Parallelx2(t, 2) }
-func TestGaloisAvx512Parallel32(t *testing.T) { testGaloisAvx512Parallelx2(t, 3) }
-func TestGaloisAvx512Parallel42(t *testing.T) { testGaloisAvx512Parallelx2(t, 4) }
-func TestGaloisAvx512Parallel52(t *testing.T) { testGaloisAvx512Parallelx2(t, 5) }
-func TestGaloisAvx512Parallel62(t *testing.T) { testGaloisAvx512Parallelx2(t, 6) }
-func TestGaloisAvx512Parallel72(t *testing.T) { testGaloisAvx512Parallelx2(t, 7) }
-func TestGaloisAvx512Parallel82(t *testing.T) { testGaloisAvx512Parallelx2(t, 8) }
-
-func testGaloisAvx512Parallelx4(t *testing.T, inputSize int) {
-
-	if !defaultOptions.useAVX512 {
-		t.Skip("AVX512 not detected")
-	}
-
-	rand.Seed(time.Now().UnixNano())
-
-	var size = 1 << 20
-	if testing.Short() {
-		size = 4096
-	}
-
-	in, out := make([][]byte, inputSize), make([][]byte, dimOut84)
-
-	for i := range in {
-		in[i] = make([]byte, size)
-		rand.Read(in[i])
-	}
-
-	for i := range out {
-		out[i] = make([]byte, size)
-		rand.Read(out[i])
-	}
-
-	opts := defaultOptions
-	opts.useSSSE3 = true
-
-	matrix := [(16 + 16) * dimIn * dimOut84]byte{}
-	coeffs := make([]byte, dimIn*len(out))
-
-	for i := 0; i < dimIn*len(out); i++ {
-		coeffs[i] = byte(rand.Int31n(256))
-		copy(matrix[i*32:], mulTableLow[coeffs[i]][:])
-		copy(matrix[i*32+16:], mulTableHigh[coeffs[i]][:])
-	}
-
-	// Do first run with clearing out any existing results
-	_galMulAVX512Parallel84(in, out, &matrix, false)
-
-	expect := make([][]byte, 4)
-	for i := range expect {
-		expect[i] = make([]byte, size)
-		rand.Read(expect[i])
-	}
-
-	for i := range in {
-		if i == 0 {
-			galMulSlice(coeffs[i], in[i], expect[0], &options{})
-			galMulSlice(coeffs[dimIn+i], in[i], expect[1], &options{})
-			galMulSlice(coeffs[dimIn*2+i], in[i], expect[2], &options{})
-			galMulSlice(coeffs[dimIn*3+i], in[i], expect[3], &options{})
-		} else {
-			galMulSliceXor(coeffs[i], in[i], expect[0], &options{})
-			galMulSliceXor(coeffs[dimIn+i], in[i], expect[1], &options{})
-			galMulSliceXor(coeffs[dimIn*2+i], in[i], expect[2], &options{})
-			galMulSliceXor(coeffs[dimIn*3+i], in[i], expect[3], &options{})
-		}
-	}
-
-	for i := range out {
-		if 0 != bytes.Compare(out[i], expect[i]) {
-			t.Errorf("got [%d]%#v...,\n                  expected [%d]%#v...", i, out[i][:8], i, expect[i][:8])
-		}
-	}
-
-	inToAdd := make([][]byte, len(in))
-
-	for i := range inToAdd {
-		inToAdd[i] = make([]byte, size)
-		rand.Read(inToAdd[i])
-	}
-
-	for i := 0; i < dimIn*len(out); i++ {
-		coeffs[i] = byte(rand.Int31n(256))
-		copy(matrix[i*32:], mulTableLow[coeffs[i]][:])
-		copy(matrix[i*32+16:], mulTableHigh[coeffs[i]][:])
-	}
-
-	// Do second run by adding to original run
-	_galMulAVX512Parallel84(inToAdd, out, &matrix, true)
-
-	for i := range in {
-		galMulSliceXor(coeffs[i], inToAdd[i], expect[0], &options{})
-		galMulSliceXor(coeffs[dimIn+i], inToAdd[i], expect[1], &options{})
-		galMulSliceXor(coeffs[dimIn*2+i], inToAdd[i], expect[2], &options{})
-		galMulSliceXor(coeffs[dimIn*3+i], inToAdd[i], expect[3], &options{})
-	}
-
-	for i := range out {
-		if 0 != bytes.Compare(out[i], expect[i]) {
-			t.Errorf("got [%d]%#v...,\n                  expected [%d]%#v...", i, out[i][:8], i, expect[i][:8])
-		}
-	}
-}
-
-func TestGaloisAvx512Parallel14(t *testing.T) { testGaloisAvx512Parallelx4(t, 1) }
-func TestGaloisAvx512Parallel24(t *testing.T) { testGaloisAvx512Parallelx4(t, 2) }
-func TestGaloisAvx512Parallel34(t *testing.T) { testGaloisAvx512Parallelx4(t, 3) }
-func TestGaloisAvx512Parallel44(t *testing.T) { testGaloisAvx512Parallelx4(t, 4) }
-func TestGaloisAvx512Parallel54(t *testing.T) { testGaloisAvx512Parallelx4(t, 5) }
-func TestGaloisAvx512Parallel64(t *testing.T) { testGaloisAvx512Parallelx4(t, 6) }
-func TestGaloisAvx512Parallel74(t *testing.T) { testGaloisAvx512Parallelx4(t, 7) }
-func TestGaloisAvx512Parallel84(t *testing.T) { testGaloisAvx512Parallelx4(t, 8) }
-
-func testCodeSomeShardsAvx512WithLength(t *testing.T, ds, ps, l int, parallel bool) {
-
-	if !defaultOptions.useAVX512 {
-		t.Skip("AVX512 not detected")
-	}
-
-	var data = make([]byte, l)
-	fillRandom(data)
-	enc, _ := New(ds, ps)
-	r := enc.(*reedSolomon) // need to access private methods
-	shards, _ := enc.Split(data)
-
-	// Fill shards to encode with garbage
-	for i := r.DataShards; i < r.DataShards+r.ParityShards; i++ {
-		rand.Read(shards[i])
-	}
-
-	if parallel {
-		r.codeSomeShardsAvx512P(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
-	} else {
-		r.codeSomeShardsAvx512(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
-	}
-
-	correct, _ := r.Verify(shards)
-	if !correct {
-		t.Errorf("Verification of encoded shards failed")
-	}
-}
-
-func testCodeSomeShardsAvx512(t *testing.T, ds, ps int) {
-
-	if !defaultOptions.useAVX512 {
-		t.Skip("AVX512 not detected")
-	}
-	step := 1
-	if testing.Short() {
-		// A prime for variation
-		step += 29
-	}
-	for l := 1; l <= 8192; l += step {
-		testCodeSomeShardsAvx512WithLength(t, ds, ps, l, false)
-		testCodeSomeShardsAvx512WithLength(t, ds, ps, l, true)
-	}
-}
-
-func TestCodeSomeShardsAvx512_8x2(t *testing.T)  { testCodeSomeShardsAvx512(t, 8, 2) }
-func TestCodeSomeShardsAvx512_1x4(t *testing.T)  { testCodeSomeShardsAvx512(t, 1, 4) }
-func TestCodeSomeShardsAvx512_2x4(t *testing.T)  { testCodeSomeShardsAvx512(t, 2, 4) }
-func TestCodeSomeShardsAvx512_3x4(t *testing.T)  { testCodeSomeShardsAvx512(t, 3, 4) }
-func TestCodeSomeShardsAvx512_4x4(t *testing.T)  { testCodeSomeShardsAvx512(t, 4, 4) }
-func TestCodeSomeShardsAvx512_5x4(t *testing.T)  { testCodeSomeShardsAvx512(t, 5, 4) }
-func TestCodeSomeShardsAvx512_6x4(t *testing.T)  { testCodeSomeShardsAvx512(t, 6, 4) }
-func TestCodeSomeShardsAvx512_7x4(t *testing.T)  { testCodeSomeShardsAvx512(t, 7, 4) }
-func TestCodeSomeShardsAvx512_8x4(t *testing.T)  { testCodeSomeShardsAvx512(t, 8, 4) }
-func TestCodeSomeShardsAvx512_9x4(t *testing.T)  { testCodeSomeShardsAvx512(t, 9, 4) }
-func TestCodeSomeShardsAvx512_10x4(t *testing.T) { testCodeSomeShardsAvx512(t, 10, 4) }
-func TestCodeSomeShardsAvx512_12x4(t *testing.T) { testCodeSomeShardsAvx512(t, 12, 4) }
-func TestCodeSomeShardsAvx512_16x4(t *testing.T) { testCodeSomeShardsAvx512(t, 16, 4) }
-func TestCodeSomeShardsAvx512_3x6(t *testing.T)  { testCodeSomeShardsAvx512(t, 3, 6) }
-func TestCodeSomeShardsAvx512_8x6(t *testing.T)  { testCodeSomeShardsAvx512(t, 8, 6) }
-func TestCodeSomeShardsAvx512_8x7(t *testing.T)  { testCodeSomeShardsAvx512(t, 8, 7) }
-func TestCodeSomeShardsAvx512_3x8(t *testing.T)  { testCodeSomeShardsAvx512(t, 3, 8) }
-func TestCodeSomeShardsAvx512_8x8(t *testing.T)  { testCodeSomeShardsAvx512(t, 8, 8) }
-func TestCodeSomeShardsAvx512_5x10(t *testing.T) { testCodeSomeShardsAvx512(t, 5, 10) }
-func TestCodeSomeShardsAvx512_8x10(t *testing.T) { testCodeSomeShardsAvx512(t, 8, 10) }
-func TestCodeSomeShardsAvx512_9x10(t *testing.T) { testCodeSomeShardsAvx512(t, 9, 10) }
-
-func TestCodeSomeShardsAvx512_Manyx4(t *testing.T) {
-
-	if !defaultOptions.useAVX512 {
-		return
-	}
-
-	step := 1
-	if testing.Short() {
-		step += 7
-	}
-	for inputs := 1; inputs <= 200; inputs += step {
-		testCodeSomeShardsAvx512WithLength(t, inputs, 4, 1024+33, false)
-		testCodeSomeShardsAvx512WithLength(t, inputs, 4, 1024+33, true)
-	}
-}
-
-func TestCodeSomeShardsAvx512_ManyxMany(t *testing.T) {
-
-	if !defaultOptions.useAVX512 {
-		return
-	}
-
-	step := 1
-	if testing.Short() {
-		step += 5
-	}
-	for outputs := 1; outputs <= 32; outputs += step {
-		for inputs := 1; inputs <= 32; inputs += step {
-			testCodeSomeShardsAvx512WithLength(t, inputs, outputs, 1024+33, false)
-			testCodeSomeShardsAvx512WithLength(t, inputs, outputs, 1024+33, true)
-		}
-	}
-}
diff --git a/galois_amd64.go b/galois_amd64.go
index f757f9d..9f84276 100644
--- a/galois_amd64.go
+++ b/galois_amd64.go
@@ -1,6 +1,5 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
+//go:build !noasm && !appengine && !gccgo
+// +build !noasm,!appengine,!gccgo
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 
@@ -30,6 +29,9 @@ func galMulAVX2_64(low, high, in, out []byte)
 //go:noescape
 func sSE2XorSlice_64(in, out []byte)
 
+//go:noescape
+func avx2XorSlice_64(in, out []byte)
+
 // This is what the assembler routines do in blocks of 16 bytes:
 /*
 func galMulSSSE3(low, high, in, out []byte) {
@@ -108,6 +110,9 @@ func galMulSliceXor(c byte, in, out []byte, o *options) {
 		in = in[done:]
 		out = out[done:]
 	}
+	if len(in) == 0 {
+		return
+	}
 	out = out[:len(in)]
 	mt := mulTable[c][:256]
 	for i := range in {
@@ -115,14 +120,21 @@ func galMulSliceXor(c byte, in, out []byte, o *options) {
 	}
 }
 
-// slice galois add
+// simple slice xor
 func sliceXor(in, out []byte, o *options) {
 	if o.useSSE2 {
 		if len(in) >= bigSwitchover {
-			sSE2XorSlice_64(in, out)
-			done := (len(in) >> 6) << 6
-			in = in[done:]
-			out = out[done:]
+			if o.useAVX2 {
+				avx2XorSlice_64(in, out)
+				done := (len(in) >> 6) << 6
+				in = in[done:]
+				out = out[done:]
+			} else {
+				sSE2XorSlice_64(in, out)
+				done := (len(in) >> 6) << 6
+				in = in[done:]
+				out = out[done:]
+			}
 		}
 		if len(in) >= 16 {
 			sSE2XorSlice(in, out)
@@ -130,9 +142,450 @@ func sliceXor(in, out []byte, o *options) {
 			in = in[done:]
 			out = out[done:]
 		}
+	} else {
+		sliceXorGo(in, out, o)
+		return
 	}
 	out = out[:len(in)]
 	for i := range in {
 		out[i] ^= in[i]
 	}
 }
+
+// 4-way butterfly
+func ifftDIT4(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	if len(work[0]) == 0 {
+		return
+	}
+
+	t01 := &multiply256LUT[log_m01]
+	t23 := &multiply256LUT[log_m23]
+	t02 := &multiply256LUT[log_m02]
+	if o.useAVX512 {
+		if log_m01 == modulus {
+			if log_m23 == modulus {
+				if log_m02 == modulus {
+					ifftDIT4_avx512_7(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT4_avx512_3(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m02 == modulus {
+					ifftDIT4_avx512_5(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT4_avx512_1(work, dist*24, t01, t23, t02)
+				}
+			}
+		} else {
+			if log_m23 == modulus {
+				if log_m02 == modulus {
+					ifftDIT4_avx512_6(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT4_avx512_2(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m02 == modulus {
+					ifftDIT4_avx512_4(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT4_avx512_0(work, dist*24, t01, t23, t02)
+				}
+			}
+		}
+		return
+	} else if o.useAVX2 {
+		if log_m01 == modulus {
+			if log_m23 == modulus {
+				if log_m02 == modulus {
+					ifftDIT4_avx2_7(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT4_avx2_3(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m02 == modulus {
+					ifftDIT4_avx2_5(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT4_avx2_1(work, dist*24, t01, t23, t02)
+				}
+			}
+		} else {
+			if log_m23 == modulus {
+				if log_m02 == modulus {
+					ifftDIT4_avx2_6(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT4_avx2_2(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m02 == modulus {
+					ifftDIT4_avx2_4(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT4_avx2_0(work, dist*24, t01, t23, t02)
+				}
+			}
+		}
+		return
+	}
+	ifftDIT4Ref(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func ifftDIT48(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	if len(work[0]) == 0 {
+		return
+	}
+
+	if false && o.useGFNI {
+		// Note that these currently require that length is multiple of 64.
+		t01 := gf2p811dMulMatrices[log_m01]
+		t23 := gf2p811dMulMatrices[log_m23]
+		t02 := gf2p811dMulMatrices[log_m02]
+		if log_m01 == modulus8 {
+			if log_m23 == modulus8 {
+				if log_m02 == modulus8 {
+					ifftDIT48_gfni_7(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT48_gfni_3(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m02 == modulus8 {
+					ifftDIT48_gfni_5(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT48_gfni_1(work, dist*24, t01, t23, t02)
+				}
+			}
+		} else {
+			if log_m23 == modulus8 {
+				if log_m02 == modulus8 {
+					ifftDIT48_gfni_6(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT48_gfni_2(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m02 == modulus8 {
+					ifftDIT48_gfni_4(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT48_gfni_0(work, dist*24, t01, t23, t02)
+				}
+			}
+		}
+		return
+	}
+	if o.useAVX2 {
+		// Note that these currently require that length is multiple of 64.
+		t01 := &multiply256LUT8[log_m01]
+		t23 := &multiply256LUT8[log_m23]
+		t02 := &multiply256LUT8[log_m02]
+		if log_m01 == modulus8 {
+			if log_m23 == modulus8 {
+				if log_m02 == modulus8 {
+					ifftDIT48_avx2_7(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT48_avx2_3(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m02 == modulus8 {
+					ifftDIT48_avx2_5(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT48_avx2_1(work, dist*24, t01, t23, t02)
+				}
+			}
+		} else {
+			if log_m23 == modulus8 {
+				if log_m02 == modulus8 {
+					ifftDIT48_avx2_6(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT48_avx2_2(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m02 == modulus8 {
+					ifftDIT48_avx2_4(work, dist*24, t01, t23, t02)
+				} else {
+					ifftDIT48_avx2_0(work, dist*24, t01, t23, t02)
+				}
+			}
+		}
+		return
+	}
+	ifftDIT4Ref8(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+func fftDIT4(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	if len(work[0]) == 0 {
+		return
+	}
+
+	t01 := &multiply256LUT[log_m01]
+	t23 := &multiply256LUT[log_m23]
+	t02 := &multiply256LUT[log_m02]
+	if o.useAVX512 {
+		if log_m02 == modulus {
+			if log_m01 == modulus {
+				if log_m23 == modulus {
+					fftDIT4_avx512_7(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT4_avx512_3(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m23 == modulus {
+					fftDIT4_avx512_5(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT4_avx512_1(work, dist*24, t01, t23, t02)
+				}
+			}
+		} else {
+			if log_m01 == modulus {
+				if log_m23 == modulus {
+					fftDIT4_avx512_6(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT4_avx512_2(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m23 == modulus {
+					fftDIT4_avx512_4(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT4_avx512_0(work, dist*24, t01, t23, t02)
+				}
+			}
+		}
+		return
+	} else if o.useAVX2 {
+		if log_m02 == modulus {
+			if log_m01 == modulus {
+				if log_m23 == modulus {
+					fftDIT4_avx2_7(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT4_avx2_3(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m23 == modulus {
+					fftDIT4_avx2_5(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT4_avx2_1(work, dist*24, t01, t23, t02)
+				}
+			}
+		} else {
+			if log_m01 == modulus {
+				if log_m23 == modulus {
+					fftDIT4_avx2_6(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT4_avx2_2(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m23 == modulus {
+					fftDIT4_avx2_4(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT4_avx2_0(work, dist*24, t01, t23, t02)
+				}
+			}
+		}
+		return
+	}
+	fftDIT4Ref(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func fftDIT48(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	if len(work[0]) == 0 {
+		return
+	}
+
+	if false && o.useGFNI {
+		t01 := gf2p811dMulMatrices[log_m01]
+		t23 := gf2p811dMulMatrices[log_m23]
+		t02 := gf2p811dMulMatrices[log_m02]
+		// Note that these currently require that length is multiple of 64.
+		if log_m02 == modulus8 {
+			if log_m01 == modulus8 {
+				if log_m23 == modulus8 {
+					fftDIT48_gfni_7(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT48_gfni_3(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m23 == modulus8 {
+					fftDIT48_gfni_5(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT48_gfni_1(work, dist*24, t01, t23, t02)
+				}
+			}
+		} else {
+			if log_m01 == modulus8 {
+				if log_m23 == modulus8 {
+					fftDIT48_gfni_6(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT48_gfni_2(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m23 == modulus8 {
+					fftDIT48_gfni_4(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT48_gfni_0(work, dist*24, t01, t23, t02)
+				}
+			}
+		}
+		return
+	}
+	if o.useAVX2 {
+		t01 := &multiply256LUT8[log_m01]
+		t23 := &multiply256LUT8[log_m23]
+		t02 := &multiply256LUT8[log_m02]
+		// Note that these currently require that length is multiple of 64.
+		if log_m02 == modulus8 {
+			if log_m01 == modulus8 {
+				if log_m23 == modulus8 {
+					fftDIT48_avx2_7(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT48_avx2_3(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m23 == modulus8 {
+					fftDIT48_avx2_5(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT48_avx2_1(work, dist*24, t01, t23, t02)
+				}
+			}
+		} else {
+			if log_m01 == modulus8 {
+				if log_m23 == modulus8 {
+					fftDIT48_avx2_6(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT48_avx2_2(work, dist*24, t01, t23, t02)
+				}
+			} else {
+				if log_m23 == modulus8 {
+					fftDIT48_avx2_4(work, dist*24, t01, t23, t02)
+				} else {
+					fftDIT48_avx2_0(work, dist*24, t01, t23, t02)
+				}
+			}
+		}
+		return
+	}
+	fftDIT4Ref8(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 2-way butterfly forward
+func fftDIT2(x, y []byte, log_m ffe, o *options) {
+	if len(x) == 0 {
+		return
+	}
+	if o.useAVX2 {
+		tmp := &multiply256LUT[log_m]
+		fftDIT2_avx2(x, y, tmp)
+	} else if o.useSSSE3 {
+		tmp := &multiply256LUT[log_m]
+		fftDIT2_ssse3(x, y, tmp)
+	} else {
+		// Reference version:
+		refMulAdd(x, y, log_m)
+		sliceXor(x, y, o)
+	}
+}
+
+// 2-way butterfly forward
+func fftDIT28(x, y []byte, log_m ffe8, o *options) {
+	if len(x) == 0 {
+		return
+	}
+
+	if o.useAVX2 {
+		fftDIT28_avx2(x, y, &multiply256LUT8[log_m])
+		if len(x)&63 == 0 {
+			return
+		}
+		done := (len(y) >> 6) << 6
+		y = y[done:]
+		x = x[done:]
+	}
+	mulAdd8(x, y, log_m, o)
+	sliceXor(x, y, o)
+}
+
+// 2-way butterfly inverse
+func ifftDIT28(x, y []byte, log_m ffe8, o *options) {
+	if len(x) == 0 {
+		return
+	}
+
+	if o.useAVX2 {
+		ifftDIT28_avx2(x, y, &multiply256LUT8[log_m])
+		if len(x)&63 == 0 {
+			return
+		}
+		done := (len(y) >> 6) << 6
+		y = y[done:]
+		x = x[done:]
+	}
+	sliceXor(x, y, o)
+	mulAdd8(x, y, log_m, o)
+}
+
+func mulAdd8(x, y []byte, log_m ffe8, o *options) {
+	if o.useAVX2 {
+		t := &multiply256LUT8[log_m]
+		galMulAVX2Xor_64(t[:16], t[16:32], y, x)
+		done := (len(y) >> 6) << 6
+		y = y[done:]
+		x = x[done:]
+	} else if o.useSSSE3 {
+		t := &multiply256LUT8[log_m]
+		galMulSSSE3Xor(t[:16], t[16:32], y, x)
+		done := (len(y) >> 4) << 4
+		y = y[done:]
+		x = x[done:]
+	}
+	refMulAdd8(x, y, log_m)
+}
+
+// 2-way butterfly
+func ifftDIT2(x, y []byte, log_m ffe, o *options) {
+	if len(x) == 0 {
+		return
+	}
+	if o.useAVX2 {
+		tmp := &multiply256LUT[log_m]
+		ifftDIT2_avx2(x, y, tmp)
+	} else if o.useSSSE3 {
+		tmp := &multiply256LUT[log_m]
+		ifftDIT2_ssse3(x, y, tmp)
+	} else {
+		// Reference version:
+		sliceXor(x, y, o)
+		refMulAdd(x, y, log_m)
+	}
+}
+
+func mulgf16(x, y []byte, log_m ffe, o *options) {
+	if len(x) == 0 {
+		return
+	}
+	if o.useAVX2 {
+		tmp := &multiply256LUT[log_m]
+		mulgf16_avx2(x, y, tmp)
+	} else if o.useSSSE3 {
+		tmp := &multiply256LUT[log_m]
+		mulgf16_ssse3(x, y, tmp)
+	} else {
+		refMul(x, y, log_m)
+	}
+}
+
+func mulgf8(out, in []byte, log_m ffe8, o *options) {
+	if o.useAVX2 {
+		t := &multiply256LUT8[log_m]
+		galMulAVX2_64(t[:16], t[16:32], in, out)
+		done := (len(in) >> 6) << 6
+		in = in[done:]
+		out = out[done:]
+	} else if o.useSSSE3 {
+		t := &multiply256LUT8[log_m]
+		galMulSSSE3(t[:16], t[16:32], in, out)
+		done := (len(in) >> 4) << 4
+		in = in[done:]
+		out = out[done:]
+	}
+	out = out[:len(in)]
+	mt := mul8LUTs[log_m].Value[:]
+	for i := range in {
+		out[i] = byte(mt[in[i]])
+	}
+}
diff --git a/galois_amd64.s b/galois_amd64.s
index f1dc8d5..3e97c7c 100644
--- a/galois_amd64.s
+++ b/galois_amd64.s
@@ -239,17 +239,15 @@ done_xor_sse2:
 
 // func galMulAVX2Xor_64(low, high, in, out []byte)
 TEXT Β·galMulAVX2Xor_64(SB), 7, $0
-	MOVQ  low+0(FP), SI     // SI: &low
-	MOVQ  high+24(FP), DX   // DX: &high
-	MOVQ  $15, BX           // BX: low mask
-	MOVQ  BX, X5
-	MOVOU (SI), X6          // X6: low
-	MOVOU (DX), X7          // X7: high
-	MOVQ  in_len+56(FP), R9 // R9: len(in)
+	MOVQ low+0(FP), SI     // SI: &low
+	MOVQ high+24(FP), DX   // DX: &high
+	MOVQ $15, BX           // BX: low mask
+	MOVQ BX, X5
+	MOVQ in_len+56(FP), R9 // R9: len(in)
 
-	VINSERTI128  $1, X6, Y6, Y6 // low
-	VINSERTI128  $1, X7, Y7, Y7 // high
-	VPBROADCASTB X5, Y8         // Y8: lomask (unpacked)
+	VBROADCASTI128 (SI), Y6 // low table
+	VBROADCASTI128 (DX), Y7 // high high table
+	VPBROADCASTB   X5, Y8   // Y8: lomask (unpacked)
 
 	SHRQ  $6, R9           // len(in) / 64
 	MOVQ  out+72(FP), DX   // DX: &out
@@ -290,17 +288,14 @@ done_xor_avx2_64:
 
 // func galMulAVX2_64(low, high, in, out []byte)
 TEXT Β·galMulAVX2_64(SB), 7, $0
-	MOVQ  low+0(FP), SI     // SI: &low
-	MOVQ  high+24(FP), DX   // DX: &high
-	MOVQ  $15, BX           // BX: low mask
-	MOVQ  BX, X5
-	MOVOU (SI), X6          // X6: low
-	MOVOU (DX), X7          // X7: high
-	MOVQ  in_len+56(FP), R9 // R9: len(in)
-
-	VINSERTI128  $1, X6, Y6, Y6 // low
-	VINSERTI128  $1, X7, Y7, Y7 // high
-	VPBROADCASTB X5, Y8         // Y8: lomask (unpacked)
+	MOVQ           low+0(FP), SI     // SI: &low
+	MOVQ           high+24(FP), DX   // DX: &high
+	MOVQ           $15, BX           // BX: low mask
+	MOVQ           BX, X5
+	MOVQ           in_len+56(FP), R9 // R9: len(in)
+	VBROADCASTI128 (SI), Y6          // low table
+	VBROADCASTI128 (DX), Y7          // high high table
+	VPBROADCASTB   X5, Y8            // Y8: lomask (unpacked)
 
 	SHRQ  $6, R9         // len(in) / 64
 	MOVQ  out+72(FP), DX // DX: &out
@@ -368,3 +363,32 @@ loopback_xor_sse2_64:
 
 done_xor_sse2_64:
 	RET
+
+// func avx2XorSlice_64(in, out []byte)
+TEXT Β·avx2XorSlice_64(SB), 7, $0
+	MOVQ in+0(FP), SI     // SI: &in
+	MOVQ in_len+8(FP), R9 // R9: len(in)
+	MOVQ out+24(FP), DX   // DX: &out
+	SHRQ $6, R9           // len(in) / 64
+	CMPQ R9, $0
+	JEQ  done_xor_avx2_64
+
+loopback_xor_avx2_64:
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y2
+	VMOVDQU (DX), Y1
+	VMOVDQU 32(DX), Y3
+	VPXOR   Y0, Y1, Y1
+	VPXOR   Y2, Y3, Y3
+	VMOVDQU Y1, (DX)
+	VMOVDQU Y3, 32(DX)
+
+	ADDQ $64, SI              // in+=64
+	ADDQ $64, DX              // out+=64
+	SUBQ $1, R9
+	JNZ  loopback_xor_avx2_64
+	VZEROUPPER
+
+done_xor_avx2_64:
+
+	RET
diff --git a/galois_arm64.go b/galois_arm64.go
index 23a1dd2..9ab2794 100644
--- a/galois_arm64.go
+++ b/galois_arm64.go
@@ -1,6 +1,5 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
+//go:build !noasm && !appengine && !gccgo
+// +build !noasm,!appengine,!gccgo
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 // Copyright 2017, Minio, Inc.
@@ -52,7 +51,7 @@ func galMulSliceXor(c byte, in, out []byte, o *options) {
 	}
 }
 
-// slice galois add
+// simple slice xor
 func sliceXor(in, out []byte, o *options) {
 
 	galXorNEON(in, out)
@@ -65,3 +64,83 @@ func sliceXor(in, out []byte, o *options) {
 		}
 	}
 }
+
+// 4-way butterfly
+func ifftDIT4(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	ifftDIT4Ref(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func ifftDIT48(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	ifftDIT4Ref8(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func fftDIT4(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	fftDIT4Ref(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func fftDIT48(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	fftDIT4Ref8(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 2-way butterfly forward
+func fftDIT2(x, y []byte, log_m ffe, o *options) {
+	// Reference version:
+	refMulAdd(x, y, log_m)
+	// 64 byte aligned, always full.
+	galXorNEON(x, y)
+}
+
+// 2-way butterfly forward
+func fftDIT28(x, y []byte, log_m ffe8, o *options) {
+	// Reference version:
+	mulAdd8(x, y, log_m, o)
+	sliceXor(x, y, o)
+}
+
+// 2-way butterfly
+func ifftDIT2(x, y []byte, log_m ffe, o *options) {
+	// 64 byte aligned, always full.
+	galXorNEON(x, y)
+	// Reference version:
+	refMulAdd(x, y, log_m)
+}
+
+// 2-way butterfly inverse
+func ifftDIT28(x, y []byte, log_m ffe8, o *options) {
+	// Reference version:
+	sliceXor(x, y, o)
+	mulAdd8(x, y, log_m, o)
+}
+
+func mulgf16(x, y []byte, log_m ffe, o *options) {
+	refMul(x, y, log_m)
+}
+
+func mulAdd8(out, in []byte, log_m ffe8, o *options) {
+	t := &multiply256LUT8[log_m]
+	galMulXorNEON(t[:16], t[16:32], in, out)
+	done := (len(in) >> 5) << 5
+	in = in[done:]
+	if len(in) > 0 {
+		out = out[done:]
+		refMulAdd8(in, out, log_m)
+	}
+}
+
+func mulgf8(out, in []byte, log_m ffe8, o *options) {
+	var done int
+	t := &multiply256LUT8[log_m]
+	galMulNEON(t[:16], t[16:32], in, out)
+	done = (len(in) >> 5) << 5
+
+	remain := len(in) - done
+	if remain > 0 {
+		mt := mul8LUTs[log_m].Value[:]
+		for i := done; i < len(in); i++ {
+			out[i] ^= byte(mt[in[i]])
+		}
+	}
+}
diff --git a/galois_gen_amd64.go b/galois_gen_amd64.go
index dbd77aa..5f53c3b 100644
--- a/galois_gen_amd64.go
+++ b/galois_gen_amd64.go
@@ -1,658 +1,2753 @@
 // Code generated by command: go run gen.go -out ../galois_gen_amd64.s -stubs ../galois_gen_amd64.go -pkg=reedsolomon. DO NOT EDIT.
 
-// +build !appengine
-// +build !noasm
-// +build !nogen
-// +build gc
+//go:build !appengine && !noasm && !nogen && gc
 
 package reedsolomon
 
+func _dummy_()
+
 // mulAvxTwo_1x1 takes 1 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_1x1_64 takes 1 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x1_64 takes 1 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x1_64Xor takes 1 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulGFNI_1x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x1Xor takes 1 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x1_64Xor takes 1 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x2 takes 1 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_1x2_64 takes 1 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x2_64 takes 1 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x2_64Xor takes 1 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulGFNI_1x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x2Xor takes 1 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x2_64Xor takes 1 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x3 takes 1 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_1x3_64 takes 1 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x3_64 takes 1 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x3_64Xor takes 1 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulGFNI_1x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x3Xor takes 1 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x3_64Xor takes 1 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x4 takes 1 inputs and produces 4 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x4_64 takes 1 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x4_64Xor takes 1 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulGFNI_1x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x4Xor takes 1 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x5 takes 1 inputs and produces 5 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x5_64 takes 1 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x5_64Xor takes 1 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulGFNI_1x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x5Xor takes 1 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x6 takes 1 inputs and produces 6 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x6_64 takes 1 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x6_64Xor takes 1 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulGFNI_1x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x6Xor takes 1 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x7 takes 1 inputs and produces 7 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x7_64 takes 1 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x7_64Xor takes 1 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulGFNI_1x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x7Xor takes 1 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x8 takes 1 inputs and produces 8 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x8_64 takes 1 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x8_64Xor takes 1 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulGFNI_1x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x8Xor takes 1 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x9 takes 1 inputs and produces 9 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x9_64 takes 1 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x9_64Xor takes 1 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulGFNI_1x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x9Xor takes 1 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x10 takes 1 inputs and produces 10 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_1x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_1x10_64 takes 1 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_1x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_1x10_64Xor takes 1 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulGFNI_1x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x10Xor takes 1 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulAvxTwo_1x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x1 takes 2 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_2x1_64 takes 2 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x1_64 takes 2 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x1_64Xor takes 2 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulGFNI_2x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x1Xor takes 2 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x1_64Xor takes 2 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x2 takes 2 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_2x2_64 takes 2 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x2_64 takes 2 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x2_64Xor takes 2 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulGFNI_2x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x2Xor takes 2 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x2_64Xor takes 2 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x3 takes 2 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_2x3_64 takes 2 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x3_64 takes 2 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x3_64Xor takes 2 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulGFNI_2x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x3Xor takes 2 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x3_64Xor takes 2 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x4 takes 2 inputs and produces 4 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x4_64 takes 2 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x4_64Xor takes 2 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulGFNI_2x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x4Xor takes 2 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x5 takes 2 inputs and produces 5 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x5_64 takes 2 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x5_64Xor takes 2 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulGFNI_2x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x5Xor takes 2 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x6 takes 2 inputs and produces 6 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x6_64 takes 2 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x6_64Xor takes 2 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulGFNI_2x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x6Xor takes 2 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x7 takes 2 inputs and produces 7 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x7_64 takes 2 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x7_64Xor takes 2 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulGFNI_2x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x7Xor takes 2 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x8 takes 2 inputs and produces 8 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x8_64 takes 2 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x8_64Xor takes 2 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulGFNI_2x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x8Xor takes 2 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x9 takes 2 inputs and produces 9 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x9_64 takes 2 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x9_64Xor takes 2 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulGFNI_2x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x9Xor takes 2 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x10 takes 2 inputs and produces 10 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_2x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_2x10_64 takes 2 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_2x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_2x10_64Xor takes 2 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulGFNI_2x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x10Xor takes 2 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulAvxTwo_2x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x1 takes 3 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_3x1_64 takes 3 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x1_64 takes 3 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x1_64Xor takes 3 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulGFNI_3x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x1Xor takes 3 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x1_64Xor takes 3 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x2 takes 3 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_3x2_64 takes 3 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x2_64 takes 3 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x2_64Xor takes 3 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulGFNI_3x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x2Xor takes 3 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x2_64Xor takes 3 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x3 takes 3 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_3x3_64 takes 3 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x3_64 takes 3 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x3_64Xor takes 3 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulGFNI_3x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x3Xor takes 3 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x3_64Xor takes 3 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x4 takes 3 inputs and produces 4 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x4_64 takes 3 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x4_64Xor takes 3 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulGFNI_3x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x4Xor takes 3 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x5 takes 3 inputs and produces 5 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x5_64 takes 3 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x5_64Xor takes 3 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulGFNI_3x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x5Xor takes 3 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x6 takes 3 inputs and produces 6 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x6_64 takes 3 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x6_64Xor takes 3 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulGFNI_3x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x6Xor takes 3 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x7 takes 3 inputs and produces 7 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x7_64 takes 3 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x7_64Xor takes 3 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulGFNI_3x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x7Xor takes 3 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x8 takes 3 inputs and produces 8 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x8_64 takes 3 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x8_64Xor takes 3 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulGFNI_3x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x8Xor takes 3 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x9 takes 3 inputs and produces 9 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x9_64 takes 3 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x9_64Xor takes 3 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulGFNI_3x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x9Xor takes 3 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x10 takes 3 inputs and produces 10 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_3x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_3x10_64 takes 3 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_3x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_3x10_64Xor takes 3 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulGFNI_3x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x10Xor takes 3 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulAvxTwo_3x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x1 takes 4 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_4x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_4x1_64 takes 4 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_4x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_4x1_64 takes 4 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_4x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_4x1_64Xor takes 4 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulGFNI_4x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_4x1Xor takes 4 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_4x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_4x1_64Xor takes 4 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_4x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x2 takes 4 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_4x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_4x2_64 takes 4 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_4x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulGFNI_4x2_64 takes 4 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_4x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_4x2_64Xor takes 4 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulGFNI_4x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_4x2Xor takes 4 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_4x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_4x2_64Xor takes 4 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_4x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x3 takes 4 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_4x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
 // mulAvxTwo_4x3_64 takes 4 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
 func mulAvxTwo_4x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_4x4 takes 4 inputs and produces 4 outputs.
+// mulGFNI_4x3_64 takes 4 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_4x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_4x5 takes 4 inputs and produces 5 outputs.
-// The output is initialized to 0.
+// mulGFNI_4x3_64Xor takes 4 inputs and produces 3 outputs.
+//
 //go:noescape
-func mulAvxTwo_4x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_4x6 takes 4 inputs and produces 6 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_4x3Xor takes 4 inputs and produces 3 outputs.
+//
 //go:noescape
-func mulAvxTwo_4x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_4x7 takes 4 inputs and produces 7 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_4x3_64Xor takes 4 inputs and produces 3 outputs.
+//
 //go:noescape
-func mulAvxTwo_4x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_4x8 takes 4 inputs and produces 8 outputs.
+// mulAvxTwo_4x4 takes 4 inputs and produces 4 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_4x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_4x9 takes 4 inputs and produces 9 outputs.
+// mulGFNI_4x4_64 takes 4 inputs and produces 4 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_4x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_4x10 takes 4 inputs and produces 10 outputs.
-// The output is initialized to 0.
+// mulGFNI_4x4_64Xor takes 4 inputs and produces 4 outputs.
+//
 //go:noescape
-func mulAvxTwo_4x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x1 takes 5 inputs and produces 1 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_4x4Xor takes 4 inputs and produces 4 outputs.
+//
 //go:noescape
-func mulAvxTwo_5x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x1_64 takes 5 inputs and produces 1 outputs.
+// mulAvxTwo_4x5 takes 4 inputs and produces 5 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_5x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x2 takes 5 inputs and produces 2 outputs.
+// mulGFNI_4x5_64 takes 4 inputs and produces 5 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_5x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x2_64 takes 5 inputs and produces 2 outputs.
-// The output is initialized to 0.
+// mulGFNI_4x5_64Xor takes 4 inputs and produces 5 outputs.
+//
 //go:noescape
-func mulAvxTwo_5x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x3 takes 5 inputs and produces 3 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_4x5Xor takes 4 inputs and produces 5 outputs.
+//
 //go:noescape
-func mulAvxTwo_5x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x3_64 takes 5 inputs and produces 3 outputs.
+// mulAvxTwo_4x6 takes 4 inputs and produces 6 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_5x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x4 takes 5 inputs and produces 4 outputs.
+// mulGFNI_4x6_64 takes 4 inputs and produces 6 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_5x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x5 takes 5 inputs and produces 5 outputs.
-// The output is initialized to 0.
+// mulGFNI_4x6_64Xor takes 4 inputs and produces 6 outputs.
+//
 //go:noescape
-func mulAvxTwo_5x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x6 takes 5 inputs and produces 6 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_4x6Xor takes 4 inputs and produces 6 outputs.
+//
 //go:noescape
-func mulAvxTwo_5x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x7 takes 5 inputs and produces 7 outputs.
+// mulAvxTwo_4x7 takes 4 inputs and produces 7 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_5x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x8 takes 5 inputs and produces 8 outputs.
+// mulGFNI_4x7_64 takes 4 inputs and produces 7 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_5x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x9 takes 5 inputs and produces 9 outputs.
-// The output is initialized to 0.
+// mulGFNI_4x7_64Xor takes 4 inputs and produces 7 outputs.
+//
 //go:noescape
-func mulAvxTwo_5x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_5x10 takes 5 inputs and produces 10 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_4x7Xor takes 4 inputs and produces 7 outputs.
+//
 //go:noescape
-func mulAvxTwo_5x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x1 takes 6 inputs and produces 1 outputs.
+// mulAvxTwo_4x8 takes 4 inputs and produces 8 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_6x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x1_64 takes 6 inputs and produces 1 outputs.
+// mulGFNI_4x8_64 takes 4 inputs and produces 8 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_6x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x2 takes 6 inputs and produces 2 outputs.
-// The output is initialized to 0.
+// mulGFNI_4x8_64Xor takes 4 inputs and produces 8 outputs.
+//
 //go:noescape
-func mulAvxTwo_6x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x2_64 takes 6 inputs and produces 2 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_4x8Xor takes 4 inputs and produces 8 outputs.
+//
 //go:noescape
-func mulAvxTwo_6x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x3 takes 6 inputs and produces 3 outputs.
+// mulAvxTwo_4x9 takes 4 inputs and produces 9 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_6x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x3_64 takes 6 inputs and produces 3 outputs.
+// mulGFNI_4x9_64 takes 4 inputs and produces 9 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_6x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x4 takes 6 inputs and produces 4 outputs.
-// The output is initialized to 0.
+// mulGFNI_4x9_64Xor takes 4 inputs and produces 9 outputs.
+//
 //go:noescape
-func mulAvxTwo_6x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x5 takes 6 inputs and produces 5 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_4x9Xor takes 4 inputs and produces 9 outputs.
+//
 //go:noescape
-func mulAvxTwo_6x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x6 takes 6 inputs and produces 6 outputs.
+// mulAvxTwo_4x10 takes 4 inputs and produces 10 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_6x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x7 takes 6 inputs and produces 7 outputs.
+// mulGFNI_4x10_64 takes 4 inputs and produces 10 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_6x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x8 takes 6 inputs and produces 8 outputs.
-// The output is initialized to 0.
+// mulGFNI_4x10_64Xor takes 4 inputs and produces 10 outputs.
+//
 //go:noescape
-func mulAvxTwo_6x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_4x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x9 takes 6 inputs and produces 9 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_4x10Xor takes 4 inputs and produces 10 outputs.
+//
 //go:noescape
-func mulAvxTwo_6x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_4x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_6x10 takes 6 inputs and produces 10 outputs.
+// mulAvxTwo_5x1 takes 5 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_6x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x1 takes 7 inputs and produces 1 outputs.
+// mulAvxTwo_5x1_64 takes 5 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_7x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x1_64 takes 7 inputs and produces 1 outputs.
+// mulGFNI_5x1_64 takes 5 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_7x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x2 takes 7 inputs and produces 2 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x1_64Xor takes 5 inputs and produces 1 outputs.
+//
 //go:noescape
-func mulAvxTwo_7x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x2_64 takes 7 inputs and produces 2 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x1Xor takes 5 inputs and produces 1 outputs.
+//
 //go:noescape
-func mulAvxTwo_7x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x3 takes 7 inputs and produces 3 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x1_64Xor takes 5 inputs and produces 1 outputs.
+//
 //go:noescape
-func mulAvxTwo_7x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x3_64 takes 7 inputs and produces 3 outputs.
+// mulAvxTwo_5x2 takes 5 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_7x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x4 takes 7 inputs and produces 4 outputs.
+// mulAvxTwo_5x2_64 takes 5 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_7x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x5 takes 7 inputs and produces 5 outputs.
+// mulGFNI_5x2_64 takes 5 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_7x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x6 takes 7 inputs and produces 6 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x2_64Xor takes 5 inputs and produces 2 outputs.
+//
 //go:noescape
-func mulAvxTwo_7x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x7 takes 7 inputs and produces 7 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x2Xor takes 5 inputs and produces 2 outputs.
+//
 //go:noescape
-func mulAvxTwo_7x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x8 takes 7 inputs and produces 8 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x2_64Xor takes 5 inputs and produces 2 outputs.
+//
 //go:noescape
-func mulAvxTwo_7x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x9 takes 7 inputs and produces 9 outputs.
+// mulAvxTwo_5x3 takes 5 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_7x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_7x10 takes 7 inputs and produces 10 outputs.
+// mulAvxTwo_5x3_64 takes 5 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_7x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x1 takes 8 inputs and produces 1 outputs.
+// mulGFNI_5x3_64 takes 5 inputs and produces 3 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_8x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x1_64 takes 8 inputs and produces 1 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x3_64Xor takes 5 inputs and produces 3 outputs.
+//
 //go:noescape
-func mulAvxTwo_8x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x2 takes 8 inputs and produces 2 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x3Xor takes 5 inputs and produces 3 outputs.
+//
 //go:noescape
-func mulAvxTwo_8x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x2_64 takes 8 inputs and produces 2 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x3_64Xor takes 5 inputs and produces 3 outputs.
+//
 //go:noescape
-func mulAvxTwo_8x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x3 takes 8 inputs and produces 3 outputs.
+// mulAvxTwo_5x4 takes 5 inputs and produces 4 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_8x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x3_64 takes 8 inputs and produces 3 outputs.
+// mulGFNI_5x4_64 takes 5 inputs and produces 4 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_8x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x4 takes 8 inputs and produces 4 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x4_64Xor takes 5 inputs and produces 4 outputs.
+//
 //go:noescape
-func mulAvxTwo_8x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x5 takes 8 inputs and produces 5 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x4Xor takes 5 inputs and produces 4 outputs.
+//
 //go:noescape
-func mulAvxTwo_8x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x6 takes 8 inputs and produces 6 outputs.
+// mulAvxTwo_5x5 takes 5 inputs and produces 5 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_8x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x7 takes 8 inputs and produces 7 outputs.
+// mulGFNI_5x5_64 takes 5 inputs and produces 5 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_8x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x8 takes 8 inputs and produces 8 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x5_64Xor takes 5 inputs and produces 5 outputs.
+//
 //go:noescape
-func mulAvxTwo_8x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x9 takes 8 inputs and produces 9 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x5Xor takes 5 inputs and produces 5 outputs.
+//
 //go:noescape
-func mulAvxTwo_8x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_8x10 takes 8 inputs and produces 10 outputs.
+// mulAvxTwo_5x6 takes 5 inputs and produces 6 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_8x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x1 takes 9 inputs and produces 1 outputs.
+// mulGFNI_5x6_64 takes 5 inputs and produces 6 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_9x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x1_64 takes 9 inputs and produces 1 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x6_64Xor takes 5 inputs and produces 6 outputs.
+//
 //go:noescape
-func mulAvxTwo_9x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x2 takes 9 inputs and produces 2 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x6Xor takes 5 inputs and produces 6 outputs.
+//
 //go:noescape
-func mulAvxTwo_9x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x2_64 takes 9 inputs and produces 2 outputs.
+// mulAvxTwo_5x7 takes 5 inputs and produces 7 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_9x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x3 takes 9 inputs and produces 3 outputs.
+// mulGFNI_5x7_64 takes 5 inputs and produces 7 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_9x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x3_64 takes 9 inputs and produces 3 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x7_64Xor takes 5 inputs and produces 7 outputs.
+//
 //go:noescape
-func mulAvxTwo_9x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x4 takes 9 inputs and produces 4 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x7Xor takes 5 inputs and produces 7 outputs.
+//
 //go:noescape
-func mulAvxTwo_9x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x5 takes 9 inputs and produces 5 outputs.
+// mulAvxTwo_5x8 takes 5 inputs and produces 8 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_9x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x6 takes 9 inputs and produces 6 outputs.
+// mulGFNI_5x8_64 takes 5 inputs and produces 8 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_9x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x7 takes 9 inputs and produces 7 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x8_64Xor takes 5 inputs and produces 8 outputs.
+//
 //go:noescape
-func mulAvxTwo_9x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x8 takes 9 inputs and produces 8 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x8Xor takes 5 inputs and produces 8 outputs.
+//
 //go:noescape
-func mulAvxTwo_9x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x9 takes 9 inputs and produces 9 outputs.
+// mulAvxTwo_5x9 takes 5 inputs and produces 9 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_9x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_9x10 takes 9 inputs and produces 10 outputs.
+// mulGFNI_5x9_64 takes 5 inputs and produces 9 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_9x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x1 takes 10 inputs and produces 1 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x9_64Xor takes 5 inputs and produces 9 outputs.
+//
 //go:noescape
-func mulAvxTwo_10x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x1_64 takes 10 inputs and produces 1 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x9Xor takes 5 inputs and produces 9 outputs.
+//
 //go:noescape
-func mulAvxTwo_10x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x2 takes 10 inputs and produces 2 outputs.
+// mulAvxTwo_5x10 takes 5 inputs and produces 10 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_10x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x2_64 takes 10 inputs and produces 2 outputs.
+// mulGFNI_5x10_64 takes 5 inputs and produces 10 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_10x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x3 takes 10 inputs and produces 3 outputs.
-// The output is initialized to 0.
+// mulGFNI_5x10_64Xor takes 5 inputs and produces 10 outputs.
+//
 //go:noescape
-func mulAvxTwo_10x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_5x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x3_64 takes 10 inputs and produces 3 outputs.
-// The output is initialized to 0.
+// mulAvxTwo_5x10Xor takes 5 inputs and produces 10 outputs.
+//
 //go:noescape
-func mulAvxTwo_10x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_5x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x4 takes 10 inputs and produces 4 outputs.
+// mulAvxTwo_6x1 takes 6 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_10x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_6x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x5 takes 10 inputs and produces 5 outputs.
+// mulAvxTwo_6x1_64 takes 6 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_10x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_6x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x6 takes 10 inputs and produces 6 outputs.
+// mulGFNI_6x1_64 takes 6 inputs and produces 1 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_10x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_6x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x7 takes 10 inputs and produces 7 outputs.
-// The output is initialized to 0.
+// mulGFNI_6x1_64Xor takes 6 inputs and produces 1 outputs.
+//
 //go:noescape
-func mulAvxTwo_10x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_6x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x8 takes 10 inputs and produces 8 outputs.
+// mulAvxTwo_6x1Xor takes 6 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x1_64Xor takes 6 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x2 takes 6 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_10x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_6x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x9 takes 10 inputs and produces 9 outputs.
+// mulAvxTwo_6x2_64 takes 6 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_10x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulAvxTwo_6x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
-// mulAvxTwo_10x10 takes 10 inputs and produces 10 outputs.
+// mulGFNI_6x2_64 takes 6 inputs and produces 2 outputs.
 // The output is initialized to 0.
+//
 //go:noescape
-func mulAvxTwo_10x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+func mulGFNI_6x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x2_64Xor takes 6 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulGFNI_6x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x2Xor takes 6 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x2_64Xor takes 6 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x3 takes 6 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_6x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x3_64 takes 6 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_6x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x3_64 takes 6 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_6x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x3_64Xor takes 6 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulGFNI_6x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x3Xor takes 6 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x3_64Xor takes 6 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x4 takes 6 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_6x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x4_64 takes 6 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_6x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x4_64Xor takes 6 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulGFNI_6x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x4Xor takes 6 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x5 takes 6 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_6x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x5_64 takes 6 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_6x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x5_64Xor takes 6 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulGFNI_6x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x5Xor takes 6 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x6 takes 6 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_6x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x6_64 takes 6 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_6x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x6_64Xor takes 6 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulGFNI_6x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x6Xor takes 6 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x7 takes 6 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_6x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x7_64 takes 6 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_6x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x7_64Xor takes 6 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulGFNI_6x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x7Xor takes 6 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x8 takes 6 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_6x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x8_64 takes 6 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_6x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x8_64Xor takes 6 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulGFNI_6x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x8Xor takes 6 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x9 takes 6 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_6x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x9_64 takes 6 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_6x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x9_64Xor takes 6 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulGFNI_6x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x9Xor takes 6 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x10 takes 6 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_6x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x10_64 takes 6 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_6x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_6x10_64Xor takes 6 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulGFNI_6x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x10Xor takes 6 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulAvxTwo_6x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x1 takes 7 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x1_64 takes 7 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x1_64 takes 7 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x1_64Xor takes 7 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulGFNI_7x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x1Xor takes 7 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x1_64Xor takes 7 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x2 takes 7 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x2_64 takes 7 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x2_64 takes 7 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x2_64Xor takes 7 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulGFNI_7x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x2Xor takes 7 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x2_64Xor takes 7 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x3 takes 7 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x3_64 takes 7 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x3_64 takes 7 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x3_64Xor takes 7 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulGFNI_7x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x3Xor takes 7 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x3_64Xor takes 7 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x4 takes 7 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x4_64 takes 7 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x4_64Xor takes 7 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulGFNI_7x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x4Xor takes 7 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x5 takes 7 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x5_64 takes 7 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x5_64Xor takes 7 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulGFNI_7x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x5Xor takes 7 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x6 takes 7 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x6_64 takes 7 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x6_64Xor takes 7 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulGFNI_7x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x6Xor takes 7 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x7 takes 7 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x7_64 takes 7 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x7_64Xor takes 7 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulGFNI_7x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x7Xor takes 7 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x8 takes 7 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x8_64 takes 7 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x8_64Xor takes 7 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulGFNI_7x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x8Xor takes 7 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x9 takes 7 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x9_64 takes 7 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x9_64Xor takes 7 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulGFNI_7x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x9Xor takes 7 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x10 takes 7 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_7x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x10_64 takes 7 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_7x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_7x10_64Xor takes 7 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulGFNI_7x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x10Xor takes 7 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulAvxTwo_7x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x1 takes 8 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x1_64 takes 8 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x1_64 takes 8 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x1_64Xor takes 8 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulGFNI_8x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x1Xor takes 8 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x1_64Xor takes 8 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x2 takes 8 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x2_64 takes 8 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x2_64 takes 8 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x2_64Xor takes 8 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulGFNI_8x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x2Xor takes 8 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x2_64Xor takes 8 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x3 takes 8 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x3_64 takes 8 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x3_64 takes 8 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x3_64Xor takes 8 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulGFNI_8x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x3Xor takes 8 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x3_64Xor takes 8 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x4 takes 8 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x4_64 takes 8 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x4_64Xor takes 8 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulGFNI_8x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x4Xor takes 8 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x5 takes 8 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x5_64 takes 8 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x5_64Xor takes 8 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulGFNI_8x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x5Xor takes 8 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x6 takes 8 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x6_64 takes 8 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x6_64Xor takes 8 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulGFNI_8x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x6Xor takes 8 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x7 takes 8 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x7_64 takes 8 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x7_64Xor takes 8 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulGFNI_8x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x7Xor takes 8 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x8 takes 8 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x8_64 takes 8 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x8_64Xor takes 8 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulGFNI_8x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x8Xor takes 8 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x9 takes 8 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x9_64 takes 8 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x9_64Xor takes 8 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulGFNI_8x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x9Xor takes 8 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x10 takes 8 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_8x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x10_64 takes 8 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_8x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_8x10_64Xor takes 8 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulGFNI_8x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x10Xor takes 8 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulAvxTwo_8x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x1 takes 9 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x1_64 takes 9 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x1_64 takes 9 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x1_64Xor takes 9 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulGFNI_9x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x1Xor takes 9 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x1_64Xor takes 9 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x2 takes 9 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x2_64 takes 9 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x2_64 takes 9 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x2_64Xor takes 9 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulGFNI_9x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x2Xor takes 9 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x2_64Xor takes 9 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x3 takes 9 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x3_64 takes 9 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x3_64 takes 9 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x3_64Xor takes 9 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulGFNI_9x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x3Xor takes 9 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x3_64Xor takes 9 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x4 takes 9 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x4_64 takes 9 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x4_64Xor takes 9 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulGFNI_9x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x4Xor takes 9 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x5 takes 9 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x5_64 takes 9 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x5_64Xor takes 9 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulGFNI_9x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x5Xor takes 9 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x6 takes 9 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x6_64 takes 9 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x6_64Xor takes 9 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulGFNI_9x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x6Xor takes 9 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x7 takes 9 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x7_64 takes 9 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x7_64Xor takes 9 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulGFNI_9x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x7Xor takes 9 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x8 takes 9 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x8_64 takes 9 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x8_64Xor takes 9 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulGFNI_9x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x8Xor takes 9 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x9 takes 9 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x9_64 takes 9 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x9_64Xor takes 9 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulGFNI_9x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x9Xor takes 9 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x10 takes 9 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_9x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x10_64 takes 9 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_9x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_9x10_64Xor takes 9 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulGFNI_9x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x10Xor takes 9 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulAvxTwo_9x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x1 takes 10 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x1_64 takes 10 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x1_64 takes 10 inputs and produces 1 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x1_64Xor takes 10 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulGFNI_10x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x1Xor takes 10 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x1_64Xor takes 10 inputs and produces 1 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x2 takes 10 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x2_64 takes 10 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x2_64 takes 10 inputs and produces 2 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x2_64Xor takes 10 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulGFNI_10x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x2Xor takes 10 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x2_64Xor takes 10 inputs and produces 2 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x3 takes 10 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x3_64 takes 10 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x3_64 takes 10 inputs and produces 3 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x3_64Xor takes 10 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulGFNI_10x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x3Xor takes 10 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x3_64Xor takes 10 inputs and produces 3 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x4 takes 10 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x4_64 takes 10 inputs and produces 4 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x4_64Xor takes 10 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulGFNI_10x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x4Xor takes 10 inputs and produces 4 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x5 takes 10 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x5_64 takes 10 inputs and produces 5 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x5_64Xor takes 10 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulGFNI_10x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x5Xor takes 10 inputs and produces 5 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x6 takes 10 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x6_64 takes 10 inputs and produces 6 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x6_64Xor takes 10 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulGFNI_10x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x6Xor takes 10 inputs and produces 6 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x7 takes 10 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x7_64 takes 10 inputs and produces 7 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x7_64Xor takes 10 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulGFNI_10x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x7Xor takes 10 inputs and produces 7 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x8 takes 10 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x8_64 takes 10 inputs and produces 8 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x8_64Xor takes 10 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulGFNI_10x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x8Xor takes 10 inputs and produces 8 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x9 takes 10 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x9_64 takes 10 inputs and produces 9 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x9_64Xor takes 10 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulGFNI_10x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x9Xor takes 10 inputs and produces 9 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x10 takes 10 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulAvxTwo_10x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x10_64 takes 10 inputs and produces 10 outputs.
+// The output is initialized to 0.
+//
+//go:noescape
+func mulGFNI_10x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulGFNI_10x10_64Xor takes 10 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulGFNI_10x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x10Xor takes 10 inputs and produces 10 outputs.
+//
+//go:noescape
+func mulAvxTwo_10x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+//go:noescape
+func ifftDIT2_avx2(x []byte, y []byte, table *[128]uint8)
+
+//go:noescape
+func fftDIT2_avx2(x []byte, y []byte, table *[128]uint8)
+
+//go:noescape
+func mulgf16_avx2(x []byte, y []byte, table *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx512_0(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx512_0(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx512_1(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx512_1(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx512_2(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx512_2(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx512_3(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx512_3(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx512_4(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx512_4(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx512_5(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx512_5(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx512_6(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx512_6(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx512_7(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx512_7(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx2_0(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx2_0(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx2_1(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx2_1(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx2_2(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx2_2(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx2_3(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx2_3(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx2_4(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx2_4(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx2_5(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx2_5(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx2_6(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx2_6(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT4_avx2_7(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func fftDIT4_avx2_7(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+
+//go:noescape
+func ifftDIT2_ssse3(x []byte, y []byte, table *[128]uint8)
+
+//go:noescape
+func fftDIT2_ssse3(x []byte, y []byte, table *[128]uint8)
+
+//go:noescape
+func mulgf16_ssse3(x []byte, y []byte, table *[128]uint8)
+
+//go:noescape
+func ifftDIT28_avx2(x []byte, y []byte, table *[32]uint8)
+
+//go:noescape
+func fftDIT28_avx2(x []byte, y []byte, table *[32]uint8)
+
+//go:noescape
+func ifftDIT48_avx2_0(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func fftDIT48_avx2_0(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func ifftDIT48_avx2_1(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func fftDIT48_avx2_1(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func ifftDIT48_avx2_2(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func fftDIT48_avx2_2(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func ifftDIT48_avx2_3(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func fftDIT48_avx2_3(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func ifftDIT48_avx2_4(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func fftDIT48_avx2_4(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func ifftDIT48_avx2_5(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func fftDIT48_avx2_5(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func ifftDIT48_avx2_6(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func fftDIT48_avx2_6(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func ifftDIT48_avx2_7(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func fftDIT48_avx2_7(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+
+//go:noescape
+func ifftDIT48_gfni_0(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func fftDIT48_gfni_0(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func ifftDIT48_gfni_1(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func fftDIT48_gfni_1(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func ifftDIT48_gfni_2(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func fftDIT48_gfni_2(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func ifftDIT48_gfni_3(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func fftDIT48_gfni_3(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func ifftDIT48_gfni_4(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func fftDIT48_gfni_4(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func ifftDIT48_gfni_5(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func fftDIT48_gfni_5(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func ifftDIT48_gfni_6(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func fftDIT48_gfni_6(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func ifftDIT48_gfni_7(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+
+//go:noescape
+func fftDIT48_gfni_7(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
diff --git a/galois_gen_amd64.s b/galois_gen_amd64.s
index ab699ac..3a2acac 100644
--- a/galois_gen_amd64.s
+++ b/galois_gen_amd64.s
@@ -1,12 +1,23 @@
 // Code generated by command: go run gen.go -out ../galois_gen_amd64.s -stubs ../galois_gen_amd64.go -pkg=reedsolomon. DO NOT EDIT.
 
-// +build !appengine
-// +build !noasm
-// +build !nogen
-// +build gc
+//go:build !appengine && !noasm && !nogen && gc
 
 #include "textflag.h"
 
+// func _dummy_()
+TEXT Β·_dummy_(SB), $0
+#ifdef GOAMD64_v4
+#define XOR3WAY(ignore, a, b, dst) \
+	VPTERNLOGD $0x96, a, b, dst
+
+#else
+#define XOR3WAY(ignore, a, b, dst) \
+	VPXOR a, dst, dst \
+	VPXOR b, dst, dst
+
+#endif
+	RET
+
 // func mulAvxTwo_1x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT Β·mulAvxTwo_1x1(SB), NOSPLIT, $0-88
@@ -36,19 +47,15 @@ TEXT Β·mulAvxTwo_1x1(SB), NOSPLIT, $0-88
 	VPBROADCASTB X3, Y3
 
 mulAvxTwo_1x1_loop:
-	// Clear 1 outputs
-	VPXOR Y2, Y2, Y2
-
 	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (CX), Y4
+	VMOVDQU (CX), Y2
 	ADDQ    $0x20, CX
-	VPSRLQ  $0x04, Y4, Y5
+	VPSRLQ  $0x04, Y2, Y4
+	VPAND   Y3, Y2, Y2
 	VPAND   Y3, Y4, Y4
-	VPAND   Y3, Y5, Y5
-	VPSHUFB Y4, Y0, Y4
-	VPSHUFB Y5, Y1, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSHUFB Y2, Y0, Y2
+	VPSHUFB Y4, Y1, Y4
+	VPXOR   Y2, Y4, Y2
 
 	// Store 1 outputs
 	VMOVDQU Y2, (DX)
@@ -65,68 +72,269 @@ mulAvxTwo_1x1_end:
 // func mulAvxTwo_1x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT Β·mulAvxTwo_1x1_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 6 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_1x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), AX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  start+72(FP), BX
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x06, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x1_64_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), DX
+	MOVQ    start+72(FP), BX
+
+	// Add start offset to output
+	ADDQ BX, DX
 
 	// Add start offset to input
-	ADDQ         BX, AX
-	MOVQ         $0x0000000f, SI
-	MOVQ         SI, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), SI
-	SHRQ         $0x06, SI
+	ADDQ         BX, CX
+	MOVQ         $0x0000000f, BX
+	MOVQ         BX, X4
+	VPBROADCASTB X4, Y4
 
 mulAvxTwo_1x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
 	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	ADDQ    $0x40, CX
+	VPSRLQ  $0x04, Y2, Y6
+	VPSRLQ  $0x04, Y3, Y5
+	VPAND   Y4, Y2, Y2
+	VPAND   Y4, Y3, Y3
+	VPAND   Y4, Y6, Y6
+	VPAND   Y4, Y5, Y5
+	VPSHUFB Y2, Y0, Y2
+	VPSHUFB Y3, Y0, Y3
+	VPSHUFB Y6, Y1, Y6
+	VPSHUFB Y5, Y1, Y5
+	VPXOR   Y2, Y6, Y2
+	VPXOR   Y3, Y5, Y3
 
 	// Store 1 outputs
-	MOVQ    (DX), DI
-	VMOVDQU Y0, (DI)(BX*1)
-	VMOVDQU Y1, 32(DI)(BX*1)
+	VMOVDQU Y2, (DX)
+	VMOVDQU Y3, 32(DX)
+	ADDQ    $0x40, DX
 
 	// Prepare for next loop
-	ADDQ $0x40, BX
-	DECQ SI
+	DECQ AX
 	JNZ  mulAvxTwo_1x1_64_loop
 	VZEROUPPER
 
 mulAvxTwo_1x1_64_end:
 	RET
 
+// func mulGFNI_1x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x1_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 4 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), DX
+	MOVQ            start+72(FP), BX
+
+	// Add start offset to output
+	ADDQ BX, DX
+
+	// Add start offset to input
+	ADDQ BX, CX
+
+mulGFNI_1x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (CX), Z1
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z1, Z1
+
+	// Store 1 outputs
+	VMOVDQU64 Z1, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x1_64_loop
+	VZEROUPPER
+
+mulGFNI_1x1_64_end:
+	RET
+
+// func mulGFNI_1x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 4 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), DX
+	MOVQ            start+72(FP), BX
+
+	// Add start offset to output
+	ADDQ BX, DX
+
+	// Add start offset to input
+	ADDQ BX, CX
+
+mulGFNI_1x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (DX), Z1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (CX), Z2
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z2, Z2
+	VXORPD         Z1, Z2, Z1
+
+	// Store 1 outputs
+	VMOVDQU64 Z1, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 6 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), DX
+	MOVQ    start+72(FP), BX
+
+	// Add start offset to output
+	ADDQ BX, DX
+
+	// Add start offset to input
+	ADDQ         BX, CX
+	MOVQ         $0x0000000f, BX
+	MOVQ         BX, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_1x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (CX), Y4
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y3, Y4, Y4
+	VPAND   Y3, Y5, Y5
+	VMOVDQU (DX), Y2
+	VPSHUFB Y4, Y0, Y4
+	VPSHUFB Y5, Y1, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Store 1 outputs
+	VMOVDQU Y2, (DX)
+	ADDQ    $0x20, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_1x1Xor_end:
+	RET
+
+// func mulAvxTwo_1x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x06, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x1_64Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), DX
+	MOVQ    start+72(FP), BX
+
+	// Add start offset to output
+	ADDQ BX, DX
+
+	// Add start offset to input
+	ADDQ         BX, CX
+	MOVQ         $0x0000000f, BX
+	MOVQ         BX, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_1x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (DX), Y2
+	VMOVDQU 32(DX), Y3
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y7
+	ADDQ    $0x40, CX
+	VPSRLQ  $0x04, Y5, Y6
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y5, Y5
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y6, Y6
+	VPAND   Y4, Y8, Y8
+	VPSHUFB Y5, Y0, Y5
+	VPSHUFB Y7, Y0, Y7
+	VPSHUFB Y6, Y1, Y6
+	VPSHUFB Y8, Y1, Y8
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 1 outputs
+	VMOVDQU Y2, (DX)
+	VMOVDQU Y3, 32(DX)
+	ADDQ    $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x1_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_1x1_64Xor_end:
+	RET
+
 // func mulAvxTwo_1x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT Β·mulAvxTwo_1x2(SB), NOSPLIT, $0-88
@@ -160,24 +368,257 @@ TEXT Β·mulAvxTwo_1x2(SB), NOSPLIT, $0-88
 	VPBROADCASTB X6, Y6
 
 mulAvxTwo_1x2_loop:
-	// Clear 2 outputs
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (CX), Y8
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y9, Y9
+	VPSHUFB Y8, Y0, Y5
+	VPSHUFB Y9, Y1, Y7
+	VPXOR   Y5, Y7, Y4
+	VPSHUFB Y8, Y2, Y5
+	VPSHUFB Y9, Y3, Y7
+	VPXOR   Y5, Y7, Y5
+
+	// Store 2 outputs
+	VMOVDQU Y4, (BX)
+	ADDQ    $0x20, BX
+	VMOVDQU Y5, (DX)
+	ADDQ    $0x20, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x2_loop
+	VZEROUPPER
+
+mulAvxTwo_1x2_end:
+	RET
+
+// func mulAvxTwo_1x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·mulAvxTwo_1x2_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x2_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), BX
+	MOVQ  start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+	ADDQ DI, BX
+
+	// Add start offset to input
+	ADDQ         DI, DX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_1x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (DX), Y7
+	VMOVDQU 32(DX), Y9
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y8, Y8
+	VPAND   Y4, Y10, Y10
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y9, Y2, Y3
+	VPSHUFB Y7, Y2, Y2
+	VPSHUFB Y10, Y6, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y2, Y6, Y0
+	VPXOR   Y3, Y5, Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y9, Y2, Y3
+	VPSHUFB Y7, Y2, Y2
+	VPSHUFB Y10, Y6, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y2, Y6, Y2
+	VPXOR   Y3, Y5, Y3
+
+	// Store 2 outputs
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (BX)
+	VMOVDQU Y3, 32(BX)
+	ADDQ    $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x2_64_loop
+	VZEROUPPER
+
+mulAvxTwo_1x2_64_end:
+	RET
+
+// func mulGFNI_1x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x2_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 6 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), DX
+	MOVQ            start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
+	ADDQ SI, DX
+
+	// Add start offset to input
+	ADDQ SI, CX
+
+mulGFNI_1x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (CX), Z3
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z3, Z2
+	VGF2P8AFFINEQB $0x00, Z1, Z3, Z3
+
+	// Store 2 outputs
+	VMOVDQU64 Z2, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z3, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x2_64_loop
+	VZEROUPPER
+
+mulGFNI_1x2_64_end:
+	RET
+
+// func mulGFNI_1x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 6 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), DX
+	MOVQ            start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
+	ADDQ SI, DX
+
+	// Add start offset to input
+	ADDQ SI, CX
+
+mulGFNI_1x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (BX), Z2
+	VMOVDQU64 (DX), Z3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (CX), Z4
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z4, Z5
+	VXORPD         Z2, Z5, Z2
+	VGF2P8AFFINEQB $0x00, Z1, Z4, Z5
+	VXORPD         Z3, Z5, Z3
+
+	// Store 2 outputs
+	VMOVDQU64 Z2, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z3, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x2_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x2Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 11 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x2Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), BX
+	MOVQ    24(DX), DX
+	MOVQ    start+72(FP), SI
 
+	// Add start offset to output
+	ADDQ SI, BX
+	ADDQ SI, DX
+
+	// Add start offset to input
+	ADDQ         SI, CX
+	MOVQ         $0x0000000f, SI
+	MOVQ         SI, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_1x2Xor_loop:
 	// Load and process 32 bytes from input 0 to 2 outputs
 	VMOVDQU (CX), Y9
 	ADDQ    $0x20, CX
 	VPSRLQ  $0x04, Y9, Y10
 	VPAND   Y6, Y9, Y9
 	VPAND   Y6, Y10, Y10
+	VMOVDQU (BX), Y4
 	VPSHUFB Y9, Y0, Y7
 	VPSHUFB Y10, Y1, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU (DX), Y5
 	VPSHUFB Y9, Y2, Y7
 	VPSHUFB Y10, Y3, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Store 2 outputs
 	VMOVDQU Y4, (BX)
@@ -187,48 +628,52 @@ mulAvxTwo_1x2_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x2_loop
+	JNZ  mulAvxTwo_1x2Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x2_end:
+mulAvxTwo_1x2Xor_end:
 	RET
 
-// func mulAvxTwo_1x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_1x2_64(SB), $0-88
+// func mulAvxTwo_1x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x2_64Xor(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 11 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_1x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), AX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  start+72(FP), BX
+	JZ    mulAvxTwo_1x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), BX
+	MOVQ  start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+	ADDQ DI, BX
 
 	// Add start offset to input
-	ADDQ         BX, AX
-	MOVQ         $0x0000000f, SI
-	MOVQ         SI, X4
+	ADDQ         DI, DX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), SI
-	SHRQ         $0x06, SI
 
-mulAvxTwo_1x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+mulAvxTwo_1x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y1
+	VMOVDQU (BX), Y2
+	VMOVDQU 32(BX), Y3
 
 	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -241,36 +686,31 @@ mulAvxTwo_1x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Store 2 outputs
-	MOVQ    (DX), DI
-	VMOVDQU Y0, (DI)(BX*1)
-	VMOVDQU Y1, 32(DI)(BX*1)
-	MOVQ    24(DX), DI
-	VMOVDQU Y2, (DI)(BX*1)
-	VMOVDQU Y3, 32(DI)(BX*1)
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (BX)
+	VMOVDQU Y3, 32(BX)
+	ADDQ    $0x40, BX
 
 	// Prepare for next loop
-	ADDQ $0x40, BX
-	DECQ SI
-	JNZ  mulAvxTwo_1x2_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x2_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x2_64_end:
+mulAvxTwo_1x2_64Xor_end:
 	RET
 
 // func mulAvxTwo_1x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -310,29 +750,21 @@ TEXT Β·mulAvxTwo_1x3(SB), NOSPLIT, $0-88
 	VPBROADCASTB X9, Y9
 
 mulAvxTwo_1x3_loop:
-	// Clear 3 outputs
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-
 	// Load and process 32 bytes from input 0 to 3 outputs
-	VMOVDQU (CX), Y12
+	VMOVDQU (CX), Y11
 	ADDQ    $0x20, CX
-	VPSRLQ  $0x04, Y12, Y13
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y9, Y11, Y11
 	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VPSHUFB Y12, Y0, Y10
-	VPSHUFB Y13, Y1, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VPSHUFB Y12, Y2, Y10
-	VPSHUFB Y13, Y3, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VPSHUFB Y12, Y4, Y10
-	VPSHUFB Y13, Y5, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y11, Y0, Y8
+	VPSHUFB Y12, Y1, Y10
+	VPXOR   Y8, Y10, Y6
+	VPSHUFB Y11, Y2, Y8
+	VPSHUFB Y12, Y3, Y10
+	VPXOR   Y8, Y10, Y7
+	VPSHUFB Y11, Y4, Y8
+	VPSHUFB Y12, Y5, Y10
+	VPXOR   Y8, Y10, Y8
 
 	// Store 3 outputs
 	VMOVDQU Y6, (BX)
@@ -354,185 +786,664 @@ mulAvxTwo_1x3_end:
 // Requires: AVX, AVX2, SSE2
 TEXT Β·mulAvxTwo_1x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 14 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_1x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), AX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  start+72(FP), BX
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), BX
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, SI
+	ADDQ R8, DI
+	ADDQ R8, BX
 
 	// Add start offset to input
-	ADDQ         BX, AX
-	MOVQ         $0x0000000f, SI
-	MOVQ         SI, X6
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), SI
-	SHRQ         $0x06, SI
 
 mulAvxTwo_1x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y9, Y9
 	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y10, Y10
 	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU (CX), Y7
+	VMOVDQU (CX), Y4
 	VMOVDQU 32(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 64(CX), Y7
+	VPSHUFB Y11, Y4, Y5
+	VPSHUFB Y9, Y4, Y4
+	VPSHUFB Y12, Y8, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y4, Y8, Y0
+	VPXOR   Y5, Y7, Y1
+	VMOVDQU 64(CX), Y4
 	VMOVDQU 96(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 128(CX), Y7
+	VPSHUFB Y11, Y4, Y5
+	VPSHUFB Y9, Y4, Y4
+	VPSHUFB Y12, Y8, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y4, Y8, Y2
+	VPXOR   Y5, Y7, Y3
+	VMOVDQU 128(CX), Y4
 	VMOVDQU 160(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPSHUFB Y11, Y4, Y5
+	VPSHUFB Y9, Y4, Y4
+	VPSHUFB Y12, Y8, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y4, Y8, Y4
+	VPXOR   Y5, Y7, Y5
 
 	// Store 3 outputs
-	MOVQ    (DX), DI
-	VMOVDQU Y0, (DI)(BX*1)
-	VMOVDQU Y1, 32(DI)(BX*1)
-	MOVQ    24(DX), DI
-	VMOVDQU Y2, (DI)(BX*1)
-	VMOVDQU Y3, 32(DI)(BX*1)
-	MOVQ    48(DX), DI
-	VMOVDQU Y4, (DI)(BX*1)
-	VMOVDQU Y5, 32(DI)(BX*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, BX
-	DECQ SI
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y4, (BX)
+	VMOVDQU Y5, 32(BX)
+	ADDQ    $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
 	JNZ  mulAvxTwo_1x3_64_loop
 	VZEROUPPER
 
 mulAvxTwo_1x3_64_end:
 	RET
 
-// func mulAvxTwo_1x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_1x4(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+// func mulGFNI_1x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x3_64(SB), $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 17 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_1x4_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), DX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  (BX), SI
-	MOVQ  24(BX), DI
-	MOVQ  48(BX), R8
-	MOVQ  72(BX), BX
-	MOVQ  start+72(FP), R9
+	// Full registers estimated 8 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DX
+	MOVQ            start+72(FP), DI
 
 	// Add start offset to output
-	ADDQ R9, SI
-	ADDQ R9, DI
-	ADDQ R9, R8
-	ADDQ R9, BX
+	ADDQ DI, BX
+	ADDQ DI, SI
+	ADDQ DI, DX
 
 	// Add start offset to input
-	ADDQ         R9, DX
-	MOVQ         $0x0000000f, R9
-	MOVQ         R9, X4
-	VPBROADCASTB X4, Y4
+	ADDQ DI, CX
 
-mulAvxTwo_1x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (DX), Y7
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+mulGFNI_1x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (CX), Z5
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z5, Z3
+	VGF2P8AFFINEQB $0x00, Z1, Z5, Z4
+	VGF2P8AFFINEQB $0x00, Z2, Z5, Z5
 
-	// Store 4 outputs
-	VMOVDQU Y0, (SI)
-	ADDQ    $0x20, SI
-	VMOVDQU Y1, (DI)
-	ADDQ    $0x20, DI
-	VMOVDQU Y2, (R8)
-	ADDQ    $0x20, R8
-	VMOVDQU Y3, (BX)
-	ADDQ    $0x20, BX
+	// Store 3 outputs
+	VMOVDQU64 Z3, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z4, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z5, (DX)
+	ADDQ      $0x40, DX
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x4_loop
+	JNZ  mulGFNI_1x3_64_loop
+	VZEROUPPER
+
+mulGFNI_1x3_64_end:
+	RET
+
+// func mulGFNI_1x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x3_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 8 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DX
+	MOVQ            start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, BX
+	ADDQ DI, SI
+	ADDQ DI, DX
+
+	// Add start offset to input
+	ADDQ DI, CX
+
+mulGFNI_1x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (BX), Z3
+	VMOVDQU64 (SI), Z4
+	VMOVDQU64 (DX), Z5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (CX), Z6
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z6, Z7
+	VXORPD         Z3, Z7, Z3
+	VGF2P8AFFINEQB $0x00, Z1, Z6, Z7
+	VXORPD         Z4, Z7, Z4
+	VGF2P8AFFINEQB $0x00, Z2, Z6, Z7
+	VXORPD         Z5, Z7, Z5
+
+	// Store 3 outputs
+	VMOVDQU64 Z3, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z4, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z5, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x3_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x3Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x3Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), BX
+	MOVQ    24(DX), SI
+	MOVQ    48(DX), DX
+	MOVQ    start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, BX
+	ADDQ DI, SI
+	ADDQ DI, DX
+
+	// Add start offset to input
+	ADDQ         DI, CX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_1x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (CX), Y12
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (BX), Y6
+	VPSHUFB Y12, Y0, Y10
+	VPSHUFB Y13, Y1, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU (SI), Y7
+	VPSHUFB Y12, Y2, Y10
+	VPSHUFB Y13, Y3, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU (DX), Y8
+	VPSHUFB Y12, Y4, Y10
+	VPSHUFB Y13, Y5, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 3 outputs
+	VMOVDQU Y6, (BX)
+	ADDQ    $0x20, BX
+	VMOVDQU Y7, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y8, (DX)
+	ADDQ    $0x20, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x3Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_1x3Xor_end:
+	RET
+
+// func mulAvxTwo_1x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x3_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), BX
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, SI
+	ADDQ R8, DI
+	ADDQ R8, BX
+
+	// Add start offset to input
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_1x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y1
+	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y3
+	VMOVDQU (BX), Y4
+	VMOVDQU 32(BX), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y4, (BX)
+	VMOVDQU Y5, 32(BX)
+	ADDQ    $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_1x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·mulAvxTwo_1x4(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), BX
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, BX
+
+	// Add start offset to input
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_1x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y4, Y6, Y6
+	VPAND   Y4, Y7, Y7
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y3, Y5, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y3, Y5, Y1
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y3, Y5, Y2
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y3, Y5, Y3
+
+	// Store 4 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x4_loop
 	VZEROUPPER
 
 mulAvxTwo_1x4_end:
 	RET
 
+// func mulGFNI_1x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x4_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), DX
+	MOVQ            start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, BX
+	ADDQ R8, SI
+	ADDQ R8, DI
+	ADDQ R8, DX
+
+	// Add start offset to input
+	ADDQ R8, CX
+
+mulGFNI_1x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (CX), Z7
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z7, Z4
+	VGF2P8AFFINEQB $0x00, Z1, Z7, Z5
+	VGF2P8AFFINEQB $0x00, Z2, Z7, Z6
+	VGF2P8AFFINEQB $0x00, Z3, Z7, Z7
+
+	// Store 4 outputs
+	VMOVDQU64 Z4, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z5, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z6, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z7, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x4_64_loop
+	VZEROUPPER
+
+mulGFNI_1x4_64_end:
+	RET
+
+// func mulGFNI_1x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x4_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), DX
+	MOVQ            start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, BX
+	ADDQ R8, SI
+	ADDQ R8, DI
+	ADDQ R8, DX
+
+	// Add start offset to input
+	ADDQ R8, CX
+
+mulGFNI_1x4_64Xor_loop:
+	// Load 4 outputs
+	VMOVDQU64 (BX), Z4
+	VMOVDQU64 (SI), Z5
+	VMOVDQU64 (DI), Z6
+	VMOVDQU64 (DX), Z7
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (CX), Z8
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z8, Z9
+	VXORPD         Z4, Z9, Z4
+	VGF2P8AFFINEQB $0x00, Z1, Z8, Z9
+	VXORPD         Z5, Z9, Z5
+	VGF2P8AFFINEQB $0x00, Z2, Z8, Z9
+	VXORPD         Z6, Z9, Z6
+	VGF2P8AFFINEQB $0x00, Z3, Z8, Z9
+	VXORPD         Z7, Z9, Z7
+
+	// Store 4 outputs
+	VMOVDQU64 Z4, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z5, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z6, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z7, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x4_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x4_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x4Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x4Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), BX
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, BX
+
+	// Add start offset to input
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_1x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (SI), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU (DI), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU (R8), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU (BX), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_1x4Xor_end:
+	RET
+
 // func mulAvxTwo_1x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT Β·mulAvxTwo_1x5(SB), NOSPLIT, $0-88
@@ -568,49 +1479,275 @@ TEXT Β·mulAvxTwo_1x5(SB), NOSPLIT, $0-88
 	VPBROADCASTB X5, Y5
 
 mulAvxTwo_1x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y5, Y7, Y7
+	VPAND   Y5, Y8, Y8
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y0
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y1
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y2
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y3
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y4
+
+	// Store 5 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x5_loop
+	VZEROUPPER
+
+mulAvxTwo_1x5_end:
+	RET
+
+// func mulGFNI_1x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x5_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 12 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, BX
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, DX
 
+	// Add start offset to input
+	ADDQ R9, CX
+
+mulGFNI_1x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (CX), Z9
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z9, Z5
+	VGF2P8AFFINEQB $0x00, Z1, Z9, Z6
+	VGF2P8AFFINEQB $0x00, Z2, Z9, Z7
+	VGF2P8AFFINEQB $0x00, Z3, Z9, Z8
+	VGF2P8AFFINEQB $0x00, Z4, Z9, Z9
+
+	// Store 5 outputs
+	VMOVDQU64 Z5, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z6, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z7, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z8, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z9, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x5_64_loop
+	VZEROUPPER
+
+mulGFNI_1x5_64_end:
+	RET
+
+// func mulGFNI_1x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x5_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 12 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, BX
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, DX
+
+	// Add start offset to input
+	ADDQ R9, CX
+
+mulGFNI_1x5_64Xor_loop:
+	// Load 5 outputs
+	VMOVDQU64 (BX), Z5
+	VMOVDQU64 (SI), Z6
+	VMOVDQU64 (DI), Z7
+	VMOVDQU64 (R8), Z8
+	VMOVDQU64 (DX), Z9
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (CX), Z10
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z10, Z11
+	VXORPD         Z5, Z11, Z5
+	VGF2P8AFFINEQB $0x00, Z1, Z10, Z11
+	VXORPD         Z6, Z11, Z6
+	VGF2P8AFFINEQB $0x00, Z2, Z10, Z11
+	VXORPD         Z7, Z11, Z7
+	VGF2P8AFFINEQB $0x00, Z3, Z10, Z11
+	VXORPD         Z8, Z11, Z8
+	VGF2P8AFFINEQB $0x00, Z4, Z10, Z11
+	VXORPD         Z9, Z11, Z9
+
+	// Store 5 outputs
+	VMOVDQU64 Z5, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z6, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z7, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z8, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z9, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x5Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), BX
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, BX
+
+	// Add start offset to input
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_1x5Xor_loop:
 	// Load and process 32 bytes from input 0 to 5 outputs
 	VMOVDQU (DX), Y8
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y8, Y9
 	VPAND   Y5, Y8, Y8
 	VPAND   Y5, Y9, Y9
+	VMOVDQU (SI), Y0
 	VMOVDQU (CX), Y6
 	VMOVDQU 32(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU (DI), Y1
 	VMOVDQU 64(CX), Y6
 	VMOVDQU 96(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU (R8), Y2
 	VMOVDQU 128(CX), Y6
 	VMOVDQU 160(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU (R9), Y3
 	VMOVDQU 192(CX), Y6
 	VMOVDQU 224(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU (BX), Y4
 	VMOVDQU 256(CX), Y6
 	VMOVDQU 288(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Store 5 outputs
 	VMOVDQU Y0, (SI)
@@ -626,10 +1763,10 @@ mulAvxTwo_1x5_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x5_loop
+	JNZ  mulAvxTwo_1x5Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x5_end:
+mulAvxTwo_1x5Xor_end:
 	RET
 
 // func mulAvxTwo_1x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -669,56 +1806,42 @@ TEXT Β·mulAvxTwo_1x6(SB), NOSPLIT, $0-88
 	VPBROADCASTB X6, Y6
 
 mulAvxTwo_1x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 32 bytes from input 0 to 6 outputs
-	VMOVDQU (DX), Y9
+	VMOVDQU (DX), Y8
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y6, Y8, Y8
 	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU (CX), Y7
-	VMOVDQU 32(CX), Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y5, Y5
 	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 64(CX), Y7
-	VMOVDQU 96(CX), Y8
+	VPXOR   Y5, Y7, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y5, Y5
 	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 128(CX), Y7
-	VMOVDQU 160(CX), Y8
+	VPXOR   Y5, Y7, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y5, Y5
 	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 192(CX), Y7
-	VMOVDQU 224(CX), Y8
+	VPXOR   Y5, Y7, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y5, Y5
 	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 256(CX), Y7
-	VMOVDQU 288(CX), Y8
+	VPXOR   Y5, Y7, Y3
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y5, Y5
 	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 320(CX), Y7
-	VMOVDQU 352(CX), Y8
+	VPXOR   Y5, Y7, Y4
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y5, Y5
 	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPXOR   Y5, Y7, Y5
 
 	// Store 6 outputs
 	VMOVDQU Y0, (SI)
@@ -742,36 +1865,298 @@ mulAvxTwo_1x6_loop:
 mulAvxTwo_1x6_end:
 	RET
 
-// func mulAvxTwo_1x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_1x7(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+// func mulGFNI_1x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x6_64(SB), $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 26 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_1x7_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), DX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  (BX), SI
-	MOVQ  24(BX), DI
-	MOVQ  48(BX), R8
-	MOVQ  72(BX), R9
-	MOVQ  96(BX), R10
-	MOVQ  120(BX), R11
-	MOVQ  144(BX), BX
-	MOVQ  start+72(FP), R12
+	// Full registers estimated 14 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R12, SI
-	ADDQ R12, DI
-	ADDQ R12, R8
-	ADDQ R12, R9
-	ADDQ R12, R10
-	ADDQ R12, R11
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DX
+
+	// Add start offset to input
+	ADDQ R10, CX
+
+mulGFNI_1x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (CX), Z11
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z11, Z6
+	VGF2P8AFFINEQB $0x00, Z1, Z11, Z7
+	VGF2P8AFFINEQB $0x00, Z2, Z11, Z8
+	VGF2P8AFFINEQB $0x00, Z3, Z11, Z9
+	VGF2P8AFFINEQB $0x00, Z4, Z11, Z10
+	VGF2P8AFFINEQB $0x00, Z5, Z11, Z11
+
+	// Store 6 outputs
+	VMOVDQU64 Z6, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z7, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z8, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z9, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z10, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z11, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x6_64_loop
+	VZEROUPPER
+
+mulGFNI_1x6_64_end:
+	RET
+
+// func mulGFNI_1x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x6_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DX
+
+	// Add start offset to input
+	ADDQ R10, CX
+
+mulGFNI_1x6_64Xor_loop:
+	// Load 6 outputs
+	VMOVDQU64 (BX), Z6
+	VMOVDQU64 (SI), Z7
+	VMOVDQU64 (DI), Z8
+	VMOVDQU64 (R8), Z9
+	VMOVDQU64 (R9), Z10
+	VMOVDQU64 (DX), Z11
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (CX), Z12
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z12, Z13
+	VXORPD         Z6, Z13, Z6
+	VGF2P8AFFINEQB $0x00, Z1, Z12, Z13
+	VXORPD         Z7, Z13, Z7
+	VGF2P8AFFINEQB $0x00, Z2, Z12, Z13
+	VXORPD         Z8, Z13, Z8
+	VGF2P8AFFINEQB $0x00, Z3, Z12, Z13
+	VXORPD         Z9, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z4, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z5, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Store 6 outputs
+	VMOVDQU64 Z6, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z7, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z8, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z9, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z10, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z11, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x6_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x6_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x6Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 23 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), BX
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, BX
+
+	// Add start offset to input
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_1x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (SI), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU (DI), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU (R8), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU (R9), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU (R10), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU (BX), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_1x6Xor_end:
+	RET
+
+// func mulAvxTwo_1x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·mulAvxTwo_1x7(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x7_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), BX
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
 	ADDQ R12, BX
 
 	// Add start offset to input
@@ -781,63 +2166,333 @@ TEXT Β·mulAvxTwo_1x7(SB), NOSPLIT, $0-88
 	VPBROADCASTB X7, Y7
 
 mulAvxTwo_1x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y7, Y9, Y9
+	VPAND   Y7, Y10, Y10
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y4
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y5
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y6
+
+	// Store 7 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y6, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x7_loop
+	VZEROUPPER
+
+mulAvxTwo_1x7_end:
+	RET
+
+// func mulGFNI_1x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x7_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 16 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, DX
+
+	// Add start offset to input
+	ADDQ R11, CX
+
+mulGFNI_1x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (CX), Z13
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z13, Z7
+	VGF2P8AFFINEQB $0x00, Z1, Z13, Z8
+	VGF2P8AFFINEQB $0x00, Z2, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z3, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z4, Z13, Z11
+	VGF2P8AFFINEQB $0x00, Z5, Z13, Z12
+	VGF2P8AFFINEQB $0x00, Z6, Z13, Z13
+
+	// Store 7 outputs
+	VMOVDQU64 Z7, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z8, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z9, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z10, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z11, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z12, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z13, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x7_64_loop
+	VZEROUPPER
+
+mulGFNI_1x7_64_end:
+	RET
+
+// func mulGFNI_1x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x7_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 16 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, DX
+
+	// Add start offset to input
+	ADDQ R11, CX
+
+mulGFNI_1x7_64Xor_loop:
+	// Load 7 outputs
+	VMOVDQU64 (BX), Z7
+	VMOVDQU64 (SI), Z8
+	VMOVDQU64 (DI), Z9
+	VMOVDQU64 (R8), Z10
+	VMOVDQU64 (R9), Z11
+	VMOVDQU64 (R10), Z12
+	VMOVDQU64 (DX), Z13
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (CX), Z14
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z14, Z15
+	VXORPD         Z7, Z15, Z7
+	VGF2P8AFFINEQB $0x00, Z1, Z14, Z15
+	VXORPD         Z8, Z15, Z8
+	VGF2P8AFFINEQB $0x00, Z2, Z14, Z15
+	VXORPD         Z9, Z15, Z9
+	VGF2P8AFFINEQB $0x00, Z3, Z14, Z15
+	VXORPD         Z10, Z15, Z10
+	VGF2P8AFFINEQB $0x00, Z4, Z14, Z15
+	VXORPD         Z11, Z15, Z11
+	VGF2P8AFFINEQB $0x00, Z5, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z6, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Store 7 outputs
+	VMOVDQU64 Z7, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z8, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z9, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z10, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z11, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z12, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z13, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x7_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x7_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x7Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x7Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), BX
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, BX
+
+	// Add start offset to input
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X7
+	VPBROADCASTB X7, Y7
 
+mulAvxTwo_1x7Xor_loop:
 	// Load and process 32 bytes from input 0 to 7 outputs
 	VMOVDQU (DX), Y10
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y10, Y11
 	VPAND   Y7, Y10, Y10
 	VPAND   Y7, Y11, Y11
+	VMOVDQU (SI), Y0
 	VMOVDQU (CX), Y8
 	VMOVDQU 32(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU (DI), Y1
 	VMOVDQU 64(CX), Y8
 	VMOVDQU 96(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU (R8), Y2
 	VMOVDQU 128(CX), Y8
 	VMOVDQU 160(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU (R9), Y3
 	VMOVDQU 192(CX), Y8
 	VMOVDQU 224(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU (R10), Y4
 	VMOVDQU 256(CX), Y8
 	VMOVDQU 288(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU (R11), Y5
 	VMOVDQU 320(CX), Y8
 	VMOVDQU 352(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU (BX), Y6
 	VMOVDQU 384(CX), Y8
 	VMOVDQU 416(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Store 7 outputs
 	VMOVDQU Y0, (SI)
@@ -857,10 +2512,10 @@ mulAvxTwo_1x7_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x7_loop
+	JNZ  mulAvxTwo_1x7Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x7_end:
+mulAvxTwo_1x7Xor_end:
 	RET
 
 // func mulAvxTwo_1x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -904,70 +2559,362 @@ TEXT Β·mulAvxTwo_1x8(SB), NOSPLIT, $0-88
 	VPBROADCASTB X8, Y8
 
 mulAvxTwo_1x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y8, Y10, Y10
+	VPAND   Y8, Y11, Y11
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y0
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y1
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y2
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y3
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y4
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y5
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y6
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y7
+
+	// Store 8 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y6, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y7, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x8_loop
+	VZEROUPPER
+
+mulAvxTwo_1x8_end:
+	RET
+
+// func mulGFNI_1x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x8_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, DX
+
+	// Add start offset to input
+	ADDQ R12, CX
+
+mulGFNI_1x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (CX), Z15
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z15, Z8
+	VGF2P8AFFINEQB $0x00, Z1, Z15, Z9
+	VGF2P8AFFINEQB $0x00, Z2, Z15, Z10
+	VGF2P8AFFINEQB $0x00, Z3, Z15, Z11
+	VGF2P8AFFINEQB $0x00, Z4, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z5, Z15, Z13
+	VGF2P8AFFINEQB $0x00, Z6, Z15, Z14
+	VGF2P8AFFINEQB $0x00, Z7, Z15, Z15
+
+	// Store 8 outputs
+	VMOVDQU64 Z8, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z9, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z10, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z11, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z12, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z13, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z14, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z15, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x8_64_loop
+	VZEROUPPER
+
+mulGFNI_1x8_64_end:
+	RET
+
+// func mulGFNI_1x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x8_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, DX
+
+	// Add start offset to input
+	ADDQ R12, CX
+
+mulGFNI_1x8_64Xor_loop:
+	// Load 8 outputs
+	VMOVDQU64 (BX), Z8
+	VMOVDQU64 (SI), Z9
+	VMOVDQU64 (DI), Z10
+	VMOVDQU64 (R8), Z11
+	VMOVDQU64 (R9), Z12
+	VMOVDQU64 (R10), Z13
+	VMOVDQU64 (R11), Z14
+	VMOVDQU64 (DX), Z15
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (CX), Z16
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z16, Z17
+	VXORPD         Z8, Z17, Z8
+	VGF2P8AFFINEQB $0x00, Z1, Z16, Z17
+	VXORPD         Z9, Z17, Z9
+	VGF2P8AFFINEQB $0x00, Z2, Z16, Z17
+	VXORPD         Z10, Z17, Z10
+	VGF2P8AFFINEQB $0x00, Z3, Z16, Z17
+	VXORPD         Z11, Z17, Z11
+	VGF2P8AFFINEQB $0x00, Z4, Z16, Z17
+	VXORPD         Z12, Z17, Z12
+	VGF2P8AFFINEQB $0x00, Z5, Z16, Z17
+	VXORPD         Z13, Z17, Z13
+	VGF2P8AFFINEQB $0x00, Z6, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z7, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Store 8 outputs
+	VMOVDQU64 Z8, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z9, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z10, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z11, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z12, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z13, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z14, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z15, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x8Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 29 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), R12
+	MOVQ  168(BX), BX
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, BX
+
+	// Add start offset to input
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X8
+	VPBROADCASTB X8, Y8
 
+mulAvxTwo_1x8Xor_loop:
 	// Load and process 32 bytes from input 0 to 8 outputs
 	VMOVDQU (DX), Y11
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y8, Y11, Y11
 	VPAND   Y8, Y12, Y12
+	VMOVDQU (SI), Y0
 	VMOVDQU (CX), Y9
 	VMOVDQU 32(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU (DI), Y1
 	VMOVDQU 64(CX), Y9
 	VMOVDQU 96(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU (R8), Y2
 	VMOVDQU 128(CX), Y9
 	VMOVDQU 160(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU (R9), Y3
 	VMOVDQU 192(CX), Y9
 	VMOVDQU 224(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU (R10), Y4
 	VMOVDQU 256(CX), Y9
 	VMOVDQU 288(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU (R11), Y5
 	VMOVDQU 320(CX), Y9
 	VMOVDQU 352(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU (R12), Y6
 	VMOVDQU 384(CX), Y9
 	VMOVDQU 416(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU (BX), Y7
 	VMOVDQU 448(CX), Y9
 	VMOVDQU 480(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
 	// Store 8 outputs
 	VMOVDQU Y0, (SI)
@@ -989,10 +2936,10 @@ mulAvxTwo_1x8_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x8_loop
+	JNZ  mulAvxTwo_1x8Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x8_end:
+mulAvxTwo_1x8Xor_end:
 	RET
 
 // func mulAvxTwo_1x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -1038,77 +2985,57 @@ TEXT Β·mulAvxTwo_1x9(SB), NOSPLIT, $0-88
 	VPBROADCASTB X9, Y9
 
 mulAvxTwo_1x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-
 	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (DX), Y12
+	VMOVDQU (DX), Y11
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y9, Y11, Y11
 	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
+	VPXOR   Y8, Y10, Y0
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
+	VPXOR   Y8, Y10, Y1
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
+	VPXOR   Y8, Y10, Y2
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
+	VPXOR   Y8, Y10, Y3
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
+	VPXOR   Y8, Y10, Y4
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
+	VPXOR   Y8, Y10, Y5
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
+	VPXOR   Y8, Y10, Y6
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 512(CX), Y10
-	VMOVDQU 544(CX), Y11
+	VPXOR   Y8, Y10, Y7
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPXOR   Y8, Y10, Y8
 
 	// Store 9 outputs
 	VMOVDQU Y0, (SI)
@@ -1138,38 +3065,372 @@ mulAvxTwo_1x9_loop:
 mulAvxTwo_1x9_end:
 	RET
 
-// func mulAvxTwo_1x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_1x10(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+// func mulGFNI_1x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x9_64(SB), $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 35 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_1x10_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), DX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  (BX), SI
-	MOVQ  24(BX), DI
-	MOVQ  48(BX), R8
-	MOVQ  72(BX), R9
-	MOVQ  96(BX), R10
-	MOVQ  120(BX), R11
-	MOVQ  144(BX), R12
-	MOVQ  168(BX), R13
-	MOVQ  192(BX), R14
-	MOVQ  216(BX), BX
-	MOVQ  start+72(FP), R15
+	// Full registers estimated 20 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            start+72(FP), R13
 
 	// Add start offset to output
-	ADDQ R15, SI
-	ADDQ R15, DI
-	ADDQ R15, R8
-	ADDQ R15, R9
-	ADDQ R15, R10
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, DX
+
+	// Add start offset to input
+	ADDQ R13, CX
+
+mulGFNI_1x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (CX), Z17
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z17, Z9
+	VGF2P8AFFINEQB $0x00, Z1, Z17, Z10
+	VGF2P8AFFINEQB $0x00, Z2, Z17, Z11
+	VGF2P8AFFINEQB $0x00, Z3, Z17, Z12
+	VGF2P8AFFINEQB $0x00, Z4, Z17, Z13
+	VGF2P8AFFINEQB $0x00, Z5, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z6, Z17, Z15
+	VGF2P8AFFINEQB $0x00, Z7, Z17, Z16
+	VGF2P8AFFINEQB $0x00, Z8, Z17, Z17
+
+	// Store 9 outputs
+	VMOVDQU64 Z9, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z10, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z11, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z12, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z13, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z14, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z15, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z16, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z17, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x9_64_loop
+	VZEROUPPER
+
+mulGFNI_1x9_64_end:
+	RET
+
+// func mulGFNI_1x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x9_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, DX
+
+	// Add start offset to input
+	ADDQ R13, CX
+
+mulGFNI_1x9_64Xor_loop:
+	// Load 9 outputs
+	VMOVDQU64 (BX), Z9
+	VMOVDQU64 (SI), Z10
+	VMOVDQU64 (DI), Z11
+	VMOVDQU64 (R8), Z12
+	VMOVDQU64 (R9), Z13
+	VMOVDQU64 (R10), Z14
+	VMOVDQU64 (R11), Z15
+	VMOVDQU64 (R12), Z16
+	VMOVDQU64 (DX), Z17
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (CX), Z18
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z18, Z19
+	VXORPD         Z9, Z19, Z9
+	VGF2P8AFFINEQB $0x00, Z1, Z18, Z19
+	VXORPD         Z10, Z19, Z10
+	VGF2P8AFFINEQB $0x00, Z2, Z18, Z19
+	VXORPD         Z11, Z19, Z11
+	VGF2P8AFFINEQB $0x00, Z3, Z18, Z19
+	VXORPD         Z12, Z19, Z12
+	VGF2P8AFFINEQB $0x00, Z4, Z18, Z19
+	VXORPD         Z13, Z19, Z13
+	VGF2P8AFFINEQB $0x00, Z5, Z18, Z19
+	VXORPD         Z14, Z19, Z14
+	VGF2P8AFFINEQB $0x00, Z6, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z7, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z8, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Store 9 outputs
+	VMOVDQU64 Z9, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z10, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z11, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z12, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z13, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z14, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z15, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z16, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z17, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x9Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 32 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x9Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), R12
+	MOVQ  168(BX), R13
+	MOVQ  192(BX), BX
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, BX
+
+	// Add start offset to input
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_1x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (SI), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU (DI), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU (R8), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU (R9), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU (R10), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU (R11), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU (R12), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU (R13), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU (BX), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y6, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y7, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y8, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_1x9Xor_end:
+	RET
+
+// func mulAvxTwo_1x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·mulAvxTwo_1x10(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x10_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), R12
+	MOVQ  168(BX), R13
+	MOVQ  192(BX), R14
+	MOVQ  216(BX), BX
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
 	ADDQ R15, R11
 	ADDQ R15, R12
 	ADDQ R15, R13
@@ -1183,84 +3444,420 @@ TEXT Β·mulAvxTwo_1x10(SB), NOSPLIT, $0-88
 	VPBROADCASTB X10, Y10
 
 mulAvxTwo_1x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y10, Y12, Y12
+	VPAND   Y10, Y13, Y13
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y7
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y8
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y9
+
+	// Store 10 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y6, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y7, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y8, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y9, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x10_loop
+	VZEROUPPER
+
+mulAvxTwo_1x10_end:
+	RET
+
+// func mulGFNI_1x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x10_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            start+72(FP), R14
 
+	// Add start offset to output
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, DX
+
+	// Add start offset to input
+	ADDQ R14, CX
+
+mulGFNI_1x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (CX), Z19
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z19, Z10
+	VGF2P8AFFINEQB $0x00, Z1, Z19, Z11
+	VGF2P8AFFINEQB $0x00, Z2, Z19, Z12
+	VGF2P8AFFINEQB $0x00, Z3, Z19, Z13
+	VGF2P8AFFINEQB $0x00, Z4, Z19, Z14
+	VGF2P8AFFINEQB $0x00, Z5, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z6, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z7, Z19, Z17
+	VGF2P8AFFINEQB $0x00, Z8, Z19, Z18
+	VGF2P8AFFINEQB $0x00, Z9, Z19, Z19
+
+	// Store 10 outputs
+	VMOVDQU64 Z10, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z11, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z12, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z13, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z14, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z15, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z16, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z17, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z18, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z19, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x10_64_loop
+	VZEROUPPER
+
+mulGFNI_1x10_64_end:
+	RET
+
+// func mulGFNI_1x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_1x10_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_1x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), CX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            out_base+48(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, DX
+
+	// Add start offset to input
+	ADDQ R14, CX
+
+mulGFNI_1x10_64Xor_loop:
+	// Load 10 outputs
+	VMOVDQU64 (BX), Z10
+	VMOVDQU64 (SI), Z11
+	VMOVDQU64 (DI), Z12
+	VMOVDQU64 (R8), Z13
+	VMOVDQU64 (R9), Z14
+	VMOVDQU64 (R10), Z15
+	VMOVDQU64 (R11), Z16
+	VMOVDQU64 (R12), Z17
+	VMOVDQU64 (R13), Z18
+	VMOVDQU64 (DX), Z19
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (CX), Z20
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z0, Z20, Z21
+	VXORPD         Z10, Z21, Z10
+	VGF2P8AFFINEQB $0x00, Z1, Z20, Z21
+	VXORPD         Z11, Z21, Z11
+	VGF2P8AFFINEQB $0x00, Z2, Z20, Z21
+	VXORPD         Z12, Z21, Z12
+	VGF2P8AFFINEQB $0x00, Z3, Z20, Z21
+	VXORPD         Z13, Z21, Z13
+	VGF2P8AFFINEQB $0x00, Z4, Z20, Z21
+	VXORPD         Z14, Z21, Z14
+	VGF2P8AFFINEQB $0x00, Z5, Z20, Z21
+	VXORPD         Z15, Z21, Z15
+	VGF2P8AFFINEQB $0x00, Z6, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z7, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z8, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z9, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Store 10 outputs
+	VMOVDQU64 Z10, (BX)
+	ADDQ      $0x40, BX
+	VMOVDQU64 Z11, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z12, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z13, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z14, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z15, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z16, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z17, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z18, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z19, (DX)
+	ADDQ      $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_1x10_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_1x10_64Xor_end:
+	RET
+
+// func mulAvxTwo_1x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_1x10Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), R12
+	MOVQ  168(BX), R13
+	MOVQ  192(BX), R14
+	MOVQ  216(BX), BX
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, BX
+
+	// Add start offset to input
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_1x10Xor_loop:
 	// Load and process 32 bytes from input 0 to 10 outputs
 	VMOVDQU (DX), Y13
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y10, Y13, Y13
 	VPAND   Y10, Y14, Y14
+	VMOVDQU (SI), Y0
 	VMOVDQU (CX), Y11
 	VMOVDQU 32(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU (DI), Y1
 	VMOVDQU 64(CX), Y11
 	VMOVDQU 96(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU (R8), Y2
 	VMOVDQU 128(CX), Y11
 	VMOVDQU 160(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU (R9), Y3
 	VMOVDQU 192(CX), Y11
 	VMOVDQU 224(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU (R10), Y4
 	VMOVDQU 256(CX), Y11
 	VMOVDQU 288(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU (R11), Y5
 	VMOVDQU 320(CX), Y11
 	VMOVDQU 352(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU (R12), Y6
 	VMOVDQU 384(CX), Y11
 	VMOVDQU 416(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU (R13), Y7
 	VMOVDQU 448(CX), Y11
 	VMOVDQU 480(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU (R14), Y8
 	VMOVDQU 512(CX), Y11
 	VMOVDQU 544(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU (BX), Y9
 	VMOVDQU 576(CX), Y11
 	VMOVDQU 608(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Store 10 outputs
 	VMOVDQU Y0, (SI)
@@ -1286,14 +3883,14 @@ mulAvxTwo_1x10_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x10_loop
+	JNZ  mulAvxTwo_1x10Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x10_end:
+mulAvxTwo_1x10Xor_end:
 	RET
 
 // func mulAvxTwo_2x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_2x1(SB), NOSPLIT, $0-88
 	// Loading all tables to registers
 	// Destination kept in GP registers
@@ -1325,9 +3922,6 @@ TEXT Β·mulAvxTwo_2x1(SB), NOSPLIT, $0-88
 	VPBROADCASTB X5, Y5
 
 mulAvxTwo_2x1_loop:
-	// Clear 1 outputs
-	VPXOR Y4, Y4, Y4
-
 	// Load and process 32 bytes from input 0 to 1 outputs
 	VMOVDQU (DX), Y6
 	ADDQ    $0x20, DX
@@ -1336,8 +3930,7 @@ mulAvxTwo_2x1_loop:
 	VPAND   Y5, Y7, Y7
 	VPSHUFB Y6, Y0, Y6
 	VPSHUFB Y7, Y1, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPXOR   Y6, Y7, Y4
 
 	// Load and process 32 bytes from input 1 to 1 outputs
 	VMOVDQU (CX), Y6
@@ -1347,8 +3940,7 @@ mulAvxTwo_2x1_loop:
 	VPAND   Y5, Y7, Y7
 	VPSHUFB Y6, Y2, Y6
 	VPSHUFB Y7, Y3, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Store 1 outputs
 	VMOVDQU Y4, (BX)
@@ -1363,150 +3955,396 @@ mulAvxTwo_2x1_end:
 	RET
 
 // func mulAvxTwo_2x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_2x1_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 8 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_2x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), AX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  start+72(FP), SI
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x06, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_2x1_64_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), CX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    (BX), BX
+	MOVQ    start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
 
 	// Add start offset to input
 	ADDQ         SI, DX
-	ADDQ         SI, AX
-	MOVQ         $0x0000000f, DI
-	MOVQ         DI, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), DI
-	SHRQ         $0x06, DI
+	ADDQ         SI, CX
+	MOVQ         $0x0000000f, SI
+	MOVQ         SI, X6
+	VPBROADCASTB X6, Y6
 
 mulAvxTwo_2x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
 	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
+	VMOVDQU (DX), Y7
+	VMOVDQU 32(DX), Y9
 	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y7, Y7
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y10, Y10
+	VPSHUFB Y7, Y0, Y7
+	VPSHUFB Y9, Y0, Y9
+	VPSHUFB Y8, Y1, Y8
+	VPSHUFB Y10, Y1, Y10
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y9
+	ADDQ    $0x40, CX
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y7, Y7
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y10, Y10
+	VPSHUFB Y7, Y2, Y7
+	VPSHUFB Y9, Y2, Y9
+	VPSHUFB Y8, Y3, Y8
+	VPSHUFB Y10, Y3, Y10
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Store 1 outputs
-	MOVQ    (BX), R8
-	VMOVDQU Y0, (R8)(SI*1)
-	VMOVDQU Y1, 32(R8)(SI*1)
+	VMOVDQU Y4, (BX)
+	VMOVDQU Y5, 32(BX)
+	ADDQ    $0x40, BX
 
 	// Prepare for next loop
-	ADDQ $0x40, SI
-	DECQ DI
+	DECQ AX
 	JNZ  mulAvxTwo_2x1_64_loop
 	VZEROUPPER
 
 mulAvxTwo_2x1_64_end:
 	RET
 
-// func mulAvxTwo_2x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_2x2(SB), NOSPLIT, $0-88
+// func mulGFNI_2x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x1_64(SB), $0-88
 	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 15 YMM used
+	// Full registers estimated 5 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), BX
+	MOVQ            start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
+
+	// Add start offset to input
+	ADDQ SI, DX
+	ADDQ SI, CX
+
+mulGFNI_2x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z3
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z3, Z2
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (CX), Z3
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z1, Z3, Z3
+	VXORPD         Z2, Z3, Z2
+
+	// Store 1 outputs
+	VMOVDQU64 Z2, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x1_64_loop
+	VZEROUPPER
+
+mulGFNI_2x1_64_end:
+	RET
+
+// func mulGFNI_2x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 5 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), BX
+	MOVQ            start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
+
+	// Add start offset to input
+	ADDQ SI, DX
+	ADDQ SI, CX
+
+mulGFNI_2x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (BX), Z2
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z3
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z3, Z3
+	VXORPD         Z2, Z3, Z2
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (CX), Z3
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z1, Z3, Z3
+	VXORPD         Z2, Z3, Z2
+
+	// Store 1 outputs
+	VMOVDQU64 Z2, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_2x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 8 YMM used
 	MOVQ    n+80(FP), AX
 	MOVQ    matrix_base+0(FP), CX
 	SHRQ    $0x05, AX
 	TESTQ   AX, AX
-	JZ      mulAvxTwo_2x2_end
+	JZ      mulAvxTwo_2x1Xor_end
 	VMOVDQU (CX), Y0
 	VMOVDQU 32(CX), Y1
 	VMOVDQU 64(CX), Y2
 	VMOVDQU 96(CX), Y3
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
 	MOVQ    in_base+24(FP), CX
 	MOVQ    (CX), DX
 	MOVQ    24(CX), CX
 	MOVQ    out_base+48(FP), BX
-	MOVQ    (BX), SI
-	MOVQ    24(BX), BX
-	MOVQ    start+72(FP), DI
+	MOVQ    (BX), BX
+	MOVQ    start+72(FP), SI
 
 	// Add start offset to output
-	ADDQ DI, SI
-	ADDQ DI, BX
+	ADDQ SI, BX
 
 	// Add start offset to input
-	ADDQ         DI, DX
-	ADDQ         DI, CX
-	MOVQ         $0x0000000f, DI
-	MOVQ         DI, X10
-	VPBROADCASTB X10, Y10
-
-mulAvxTwo_2x2_loop:
-	// Clear 2 outputs
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	ADDQ         SI, DX
+	ADDQ         SI, CX
+	MOVQ         $0x0000000f, SI
+	MOVQ         SI, X5
+	VPBROADCASTB X5, Y5
 
-	// Load and process 32 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y13
+mulAvxTwo_2x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y6
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y5, Y6, Y6
+	VPAND   Y5, Y7, Y7
+	VMOVDQU (BX), Y4
+	VPSHUFB Y6, Y0, Y6
+	VPSHUFB Y7, Y1, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (CX), Y6
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y5, Y6, Y6
+	VPAND   Y5, Y7, Y7
+	VPSHUFB Y6, Y2, Y6
+	VPSHUFB Y7, Y3, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 1 outputs
+	VMOVDQU Y4, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x1Xor_end:
+	RET
+
+// func mulAvxTwo_2x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x06, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_2x1_64Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), CX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    (BX), BX
+	MOVQ    start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
+
+	// Add start offset to input
+	ADDQ         SI, DX
+	ADDQ         SI, CX
+	MOVQ         $0x0000000f, SI
+	MOVQ         SI, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_2x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (BX), Y4
+	VMOVDQU 32(BX), Y5
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y7
+	VMOVDQU 32(DX), Y9
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y7, Y7
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y10, Y10
+	VPSHUFB Y7, Y0, Y7
+	VPSHUFB Y9, Y0, Y9
+	VPSHUFB Y8, Y1, Y8
+	VPSHUFB Y10, Y1, Y10
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y9
+	ADDQ    $0x40, CX
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y7, Y7
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y10, Y10
+	VPSHUFB Y7, Y2, Y7
+	VPSHUFB Y9, Y2, Y9
+	VPSHUFB Y8, Y3, Y8
+	VPSHUFB Y10, Y3, Y10
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 1 outputs
+	VMOVDQU Y4, (BX)
+	VMOVDQU Y5, 32(BX)
+	ADDQ    $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x1_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x2(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 15 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_2x2_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), CX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    (BX), SI
+	MOVQ    24(BX), BX
+	MOVQ    start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+	ADDQ DI, BX
+
+	// Add start offset to input
+	ADDQ         DI, DX
+	ADDQ         DI, CX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_2x2_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
 	VPSHUFB Y13, Y0, Y11
 	VPSHUFB Y14, Y1, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	VPXOR   Y11, Y12, Y8
 	VPSHUFB Y13, Y2, Y11
 	VPSHUFB Y14, Y3, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPXOR   Y11, Y12, Y9
 
 	// Load and process 32 bytes from input 1 to 2 outputs
 	VMOVDQU (CX), Y13
@@ -1516,12 +4354,10 @@ mulAvxTwo_2x2_loop:
 	VPAND   Y10, Y14, Y14
 	VPSHUFB Y13, Y4, Y11
 	VPSHUFB Y14, Y5, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VPSHUFB Y13, Y6, Y11
 	VPSHUFB Y14, Y7, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Store 2 outputs
 	VMOVDQU Y8, (SI)
@@ -1538,40 +4374,65 @@ mulAvxTwo_2x2_end:
 	RET
 
 // func mulAvxTwo_2x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_2x2_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 15 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 25 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_2x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), AX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  start+72(FP), SI
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), SI
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+	ADDQ R8, SI
 
 	// Add start offset to input
-	ADDQ         SI, DX
-	ADDQ         SI, AX
-	MOVQ         $0x0000000f, DI
-	MOVQ         DI, X4
+	ADDQ         R8, BX
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), DI
-	SHRQ         $0x06, DI
 
 mulAvxTwo_2x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
+
+	// Load and process 64 bytes from input 1 to 2 outputs
 	VMOVDQU (DX), Y9
 	VMOVDQU 32(DX), Y11
 	ADDQ    $0x40, DX
@@ -1581,31 +4442,317 @@ mulAvxTwo_2x2_64_loop:
 	VPAND   Y4, Y11, Y11
 	VPAND   Y4, Y10, Y10
 	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y2, (SI)
+	VMOVDQU Y3, 32(SI)
+	ADDQ    $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x2_64_loop
+	VZEROUPPER
+
+mulAvxTwo_2x2_64_end:
+	RET
+
+// func mulGFNI_2x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x2_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 8 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), BX
+	MOVQ            start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+	ADDQ DI, BX
+
+	// Add start offset to input
+	ADDQ DI, DX
+	ADDQ DI, CX
+
+mulGFNI_2x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z6
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z6, Z4
+	VGF2P8AFFINEQB $0x00, Z1, Z6, Z5
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (CX), Z6
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z2, Z6, Z7
+	VXORPD         Z4, Z7, Z4
+	VGF2P8AFFINEQB $0x00, Z3, Z6, Z7
+	VXORPD         Z5, Z7, Z5
+
+	// Store 2 outputs
+	VMOVDQU64 Z4, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z5, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x2_64_loop
+	VZEROUPPER
+
+mulGFNI_2x2_64_end:
+	RET
+
+// func mulGFNI_2x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 8 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), BX
+	MOVQ            start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+	ADDQ DI, BX
+
+	// Add start offset to input
+	ADDQ DI, DX
+	ADDQ DI, CX
+
+mulGFNI_2x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (SI), Z4
+	VMOVDQU64 (BX), Z5
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z6
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z6, Z7
+	VXORPD         Z4, Z7, Z4
+	VGF2P8AFFINEQB $0x00, Z1, Z6, Z7
+	VXORPD         Z5, Z7, Z5
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (CX), Z6
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z2, Z6, Z7
+	VXORPD         Z4, Z7, Z4
+	VGF2P8AFFINEQB $0x00, Z3, Z6, Z7
+	VXORPD         Z5, Z7, Z5
+
+	// Store 2 outputs
+	VMOVDQU64 Z4, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z5, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x2_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_2x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x2Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 15 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_2x2Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), CX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    (BX), SI
+	MOVQ    24(BX), BX
+	MOVQ    start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+	ADDQ DI, BX
+
+	// Add start offset to input
+	ADDQ         DI, DX
+	ADDQ         DI, CX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_2x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (SI), Y8
+	VPSHUFB Y13, Y0, Y11
+	VPSHUFB Y14, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU (BX), Y9
+	VPSHUFB Y13, Y2, Y11
+	VPSHUFB Y14, Y3, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (CX), Y13
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VPSHUFB Y13, Y4, Y11
+	VPSHUFB Y14, Y5, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VPSHUFB Y13, Y6, Y11
+	VPSHUFB Y14, Y7, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 2 outputs
+	VMOVDQU Y8, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y9, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x2Xor_end:
+	RET
+
+// func mulAvxTwo_2x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 25 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), SI
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+	ADDQ R8, SI
+
+	// Add start offset to input
+	ADDQ         R8, BX
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_2x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (DI), Y0
+	VMOVDQU 32(DI), Y1
+	VMOVDQU (SI), Y2
+	VMOVDQU 32(SI), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
 	VMOVDQU (CX), Y5
 	VMOVDQU 32(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -1618,40 +4765,35 @@ mulAvxTwo_2x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Store 2 outputs
-	MOVQ    (BX), R8
-	VMOVDQU Y0, (R8)(SI*1)
-	VMOVDQU Y1, 32(R8)(SI*1)
-	MOVQ    24(BX), R8
-	VMOVDQU Y2, (R8)(SI*1)
-	VMOVDQU Y3, 32(R8)(SI*1)
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y2, (SI)
+	VMOVDQU Y3, 32(SI)
+	ADDQ    $0x40, SI
 
 	// Prepare for next loop
-	ADDQ $0x40, SI
-	DECQ DI
-	JNZ  mulAvxTwo_2x2_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x2_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_2x2_64_end:
+mulAvxTwo_2x2_64Xor_end:
 	RET
 
 // func mulAvxTwo_2x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_2x3(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -1683,11 +4825,6 @@ TEXT Β·mulAvxTwo_2x3(SB), NOSPLIT, $0-88
 	VPBROADCASTB X3, Y3
 
 mulAvxTwo_2x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-
 	// Load and process 32 bytes from input 0 to 3 outputs
 	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
@@ -1698,20 +4835,17 @@ mulAvxTwo_2x3_loop:
 	VMOVDQU 32(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	VPXOR   Y4, Y5, Y0
 	VMOVDQU 64(CX), Y4
 	VMOVDQU 96(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	VPXOR   Y4, Y5, Y1
 	VMOVDQU 128(CX), Y4
 	VMOVDQU 160(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y4, Y5, Y2
 
 	// Load and process 32 bytes from input 1 to 3 outputs
 	VMOVDQU (DX), Y6
@@ -1723,20 +4857,17 @@ mulAvxTwo_2x3_loop:
 	VMOVDQU 224(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 256(CX), Y4
 	VMOVDQU 288(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 320(CX), Y4
 	VMOVDQU 352(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Store 3 outputs
 	VMOVDQU Y0, (DI)
@@ -1755,45 +4886,43 @@ mulAvxTwo_2x3_end:
 	RET
 
 // func mulAvxTwo_2x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_2x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 20 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_2x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), AX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  start+72(FP), SI
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), SI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, SI
 
 	// Add start offset to input
-	ADDQ         SI, DX
-	ADDQ         SI, AX
-	MOVQ         $0x0000000f, DI
-	MOVQ         DI, X6
+	ADDQ         R9, BX
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), DI
-	SHRQ         $0x06, DI
 
 mulAvxTwo_2x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -1806,35 +4935,29 @@ mulAvxTwo_2x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -1847,122 +4970,480 @@ mulAvxTwo_2x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Store 3 outputs
-	MOVQ    (BX), R8
-	VMOVDQU Y0, (R8)(SI*1)
-	VMOVDQU Y1, 32(R8)(SI*1)
-	MOVQ    24(BX), R8
-	VMOVDQU Y2, (R8)(SI*1)
-	VMOVDQU Y3, 32(R8)(SI*1)
-	MOVQ    48(BX), R8
-	VMOVDQU Y4, (R8)(SI*1)
-	VMOVDQU Y5, 32(R8)(SI*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, SI
-	DECQ DI
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y2, (R8)
+	VMOVDQU Y3, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y4, (SI)
+	VMOVDQU Y5, 32(SI)
+	ADDQ    $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
 	JNZ  mulAvxTwo_2x3_64_loop
 	VZEROUPPER
 
 mulAvxTwo_2x3_64_end:
 	RET
 
-// func mulAvxTwo_2x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_2x4(SB), NOSPLIT, $0-88
+// func mulGFNI_2x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x3_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 11 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), BX
+	MOVQ            start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, SI
+	ADDQ R8, DI
+	ADDQ R8, BX
+
+	// Add start offset to input
+	ADDQ R8, DX
+	ADDQ R8, CX
+
+mulGFNI_2x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z9
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z9, Z6
+	VGF2P8AFFINEQB $0x00, Z1, Z9, Z7
+	VGF2P8AFFINEQB $0x00, Z2, Z9, Z8
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (CX), Z9
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z3, Z9, Z10
+	VXORPD         Z6, Z10, Z6
+	VGF2P8AFFINEQB $0x00, Z4, Z9, Z10
+	VXORPD         Z7, Z10, Z7
+	VGF2P8AFFINEQB $0x00, Z5, Z9, Z10
+	VXORPD         Z8, Z10, Z8
+
+	// Store 3 outputs
+	VMOVDQU64 Z6, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z7, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z8, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x3_64_loop
+	VZEROUPPER
+
+mulGFNI_2x3_64_end:
+	RET
+
+// func mulGFNI_2x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x3_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 11 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), BX
+	MOVQ            start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, SI
+	ADDQ R8, DI
+	ADDQ R8, BX
+
+	// Add start offset to input
+	ADDQ R8, DX
+	ADDQ R8, CX
+
+mulGFNI_2x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (SI), Z6
+	VMOVDQU64 (DI), Z7
+	VMOVDQU64 (BX), Z8
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z9
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z9, Z10
+	VXORPD         Z6, Z10, Z6
+	VGF2P8AFFINEQB $0x00, Z1, Z9, Z10
+	VXORPD         Z7, Z10, Z7
+	VGF2P8AFFINEQB $0x00, Z2, Z9, Z10
+	VXORPD         Z8, Z10, Z8
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (CX), Z9
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z3, Z9, Z10
+	VXORPD         Z6, Z10, Z6
+	VGF2P8AFFINEQB $0x00, Z4, Z9, Z10
+	VXORPD         Z7, Z10, Z7
+	VGF2P8AFFINEQB $0x00, Z5, Z9, Z10
+	VXORPD         Z8, Z10, Z8
+
+	// Store 3 outputs
+	VMOVDQU64 Z6, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z7, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z8, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x3_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_2x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x3Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 25 YMM used
+	// Full registers estimated 20 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_2x4_end
+	JZ    mulAvxTwo_2x3Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), DX
 	MOVQ  out_base+48(FP), SI
 	MOVQ  (SI), DI
 	MOVQ  24(SI), R8
-	MOVQ  48(SI), R9
-	MOVQ  72(SI), SI
-	MOVQ  start+72(FP), R10
+	MOVQ  48(SI), SI
+	MOVQ  start+72(FP), R9
 
 	// Add start offset to output
-	ADDQ R10, DI
-	ADDQ R10, R8
-	ADDQ R10, R9
-	ADDQ R10, SI
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, SI
 
 	// Add start offset to input
-	ADDQ         R10, BX
-	ADDQ         R10, DX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X4
-	VPBROADCASTB X4, Y4
-
-mulAvxTwo_2x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	ADDQ         R9, BX
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X3
+	VPBROADCASTB X3, Y3
 
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (BX), Y7
+mulAvxTwo_2x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU (SI), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x3Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x3Xor_end:
+	RET
+
+// func mulAvxTwo_2x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x3_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), SI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, SI
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_2x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (DI), Y0
+	VMOVDQU 32(DI), Y1
+	VMOVDQU (R8), Y2
+	VMOVDQU 32(R8), Y3
+	VMOVDQU (SI), Y4
+	VMOVDQU 32(SI), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y2, (R8)
+	VMOVDQU Y3, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y4, (SI)
+	VMOVDQU Y5, 32(SI)
+	ADDQ    $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x4(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 25 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), SI
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, SI
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_2x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y5, Y6, Y1
 	VMOVDQU 128(CX), Y5
 	VMOVDQU 160(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	VPXOR   Y5, Y6, Y2
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPXOR   Y5, Y6, Y3
 
 	// Load and process 32 bytes from input 1 to 4 outputs
 	VMOVDQU (DX), Y7
@@ -1974,26 +5455,22 @@ mulAvxTwo_2x4_loop:
 	VMOVDQU 288(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	XOR3WAY( $0x00, Y5, Y6, Y0)
 	VMOVDQU 320(CX), Y5
 	VMOVDQU 352(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 	VMOVDQU 384(CX), Y5
 	VMOVDQU 416(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	XOR3WAY( $0x00, Y5, Y6, Y2)
 	VMOVDQU 448(CX), Y5
 	VMOVDQU 480(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
 	// Store 4 outputs
 	VMOVDQU Y0, (DI)
@@ -2013,8 +5490,287 @@ mulAvxTwo_2x4_loop:
 mulAvxTwo_2x4_end:
 	RET
 
+// func mulGFNI_2x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x4_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), BX
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, BX
+
+	// Add start offset to input
+	ADDQ R9, DX
+	ADDQ R9, CX
+
+mulGFNI_2x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z12
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z12, Z8
+	VGF2P8AFFINEQB $0x00, Z1, Z12, Z9
+	VGF2P8AFFINEQB $0x00, Z2, Z12, Z10
+	VGF2P8AFFINEQB $0x00, Z3, Z12, Z11
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (CX), Z12
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z4, Z12, Z13
+	VXORPD         Z8, Z13, Z8
+	VGF2P8AFFINEQB $0x00, Z5, Z12, Z13
+	VXORPD         Z9, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z6, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z7, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Store 4 outputs
+	VMOVDQU64 Z8, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z9, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z10, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z11, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x4_64_loop
+	VZEROUPPER
+
+mulGFNI_2x4_64_end:
+	RET
+
+// func mulGFNI_2x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x4_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), BX
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, BX
+
+	// Add start offset to input
+	ADDQ R9, DX
+	ADDQ R9, CX
+
+mulGFNI_2x4_64Xor_loop:
+	// Load 4 outputs
+	VMOVDQU64 (SI), Z8
+	VMOVDQU64 (DI), Z9
+	VMOVDQU64 (R8), Z10
+	VMOVDQU64 (BX), Z11
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z12
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z12, Z13
+	VXORPD         Z8, Z13, Z8
+	VGF2P8AFFINEQB $0x00, Z1, Z12, Z13
+	VXORPD         Z9, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z2, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z3, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (CX), Z12
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z4, Z12, Z13
+	VXORPD         Z8, Z13, Z8
+	VGF2P8AFFINEQB $0x00, Z5, Z12, Z13
+	VXORPD         Z9, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z6, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z7, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Store 4 outputs
+	VMOVDQU64 Z8, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z9, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z10, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z11, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x4_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_2x4_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x4Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 25 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x4Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), SI
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, SI
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_2x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU (SI), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x4Xor_end:
+	RET
+
 // func mulAvxTwo_2x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_2x5(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -2050,13 +5806,6 @@ TEXT Β·mulAvxTwo_2x5(SB), NOSPLIT, $0-88
 	VPBROADCASTB X5, Y5
 
 mulAvxTwo_2x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-
 	// Load and process 32 bytes from input 0 to 5 outputs
 	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
@@ -2067,32 +5816,27 @@ mulAvxTwo_2x5_loop:
 	VMOVDQU 32(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	VPXOR   Y6, Y7, Y0
 	VMOVDQU 64(CX), Y6
 	VMOVDQU 96(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	VPXOR   Y6, Y7, Y1
 	VMOVDQU 128(CX), Y6
 	VMOVDQU 160(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	VPXOR   Y6, Y7, Y2
 	VMOVDQU 192(CX), Y6
 	VMOVDQU 224(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	VPXOR   Y6, Y7, Y3
 	VMOVDQU 256(CX), Y6
 	VMOVDQU 288(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPXOR   Y6, Y7, Y4
 
 	// Load and process 32 bytes from input 1 to 5 outputs
 	VMOVDQU (DX), Y8
@@ -2104,32 +5848,27 @@ mulAvxTwo_2x5_loop:
 	VMOVDQU 352(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 384(CX), Y6
 	VMOVDQU 416(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 448(CX), Y6
 	VMOVDQU 480(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 512(CX), Y6
 	VMOVDQU 544(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 576(CX), Y6
 	VMOVDQU 608(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Store 5 outputs
 	VMOVDQU Y0, (DI)
@@ -2151,8 +5890,322 @@ mulAvxTwo_2x5_loop:
 mulAvxTwo_2x5_end:
 	RET
 
+// func mulGFNI_2x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x5_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), BX
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, BX
+
+	// Add start offset to input
+	ADDQ R10, DX
+	ADDQ R10, CX
+
+mulGFNI_2x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z15
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z15, Z10
+	VGF2P8AFFINEQB $0x00, Z1, Z15, Z11
+	VGF2P8AFFINEQB $0x00, Z2, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z3, Z15, Z13
+	VGF2P8AFFINEQB $0x00, Z4, Z15, Z14
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (CX), Z15
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z5, Z15, Z16
+	VXORPD         Z10, Z16, Z10
+	VGF2P8AFFINEQB $0x00, Z6, Z15, Z16
+	VXORPD         Z11, Z16, Z11
+	VGF2P8AFFINEQB $0x00, Z7, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z8, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z9, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Store 5 outputs
+	VMOVDQU64 Z10, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z11, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z12, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z13, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z14, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x5_64_loop
+	VZEROUPPER
+
+mulGFNI_2x5_64_end:
+	RET
+
+// func mulGFNI_2x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x5_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), BX
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, BX
+
+	// Add start offset to input
+	ADDQ R10, DX
+	ADDQ R10, CX
+
+mulGFNI_2x5_64Xor_loop:
+	// Load 5 outputs
+	VMOVDQU64 (SI), Z10
+	VMOVDQU64 (DI), Z11
+	VMOVDQU64 (R8), Z12
+	VMOVDQU64 (R9), Z13
+	VMOVDQU64 (BX), Z14
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z15
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z15, Z16
+	VXORPD         Z10, Z16, Z10
+	VGF2P8AFFINEQB $0x00, Z1, Z15, Z16
+	VXORPD         Z11, Z16, Z11
+	VGF2P8AFFINEQB $0x00, Z2, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z3, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z4, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (CX), Z15
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z5, Z15, Z16
+	VXORPD         Z10, Z16, Z10
+	VGF2P8AFFINEQB $0x00, Z6, Z15, Z16
+	VXORPD         Z11, Z16, Z11
+	VGF2P8AFFINEQB $0x00, Z7, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z8, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z9, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Store 5 outputs
+	VMOVDQU64 Z10, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z11, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z12, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z13, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z14, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_2x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x5Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 30 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), SI
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, SI
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_2x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU (SI), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x5Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x5Xor_end:
+	RET
+
 // func mulAvxTwo_2x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_2x6(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -2190,14 +6243,6 @@ TEXT Β·mulAvxTwo_2x6(SB), NOSPLIT, $0-88
 	VPBROADCASTB X6, Y6
 
 mulAvxTwo_2x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 32 bytes from input 0 to 6 outputs
 	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
@@ -2208,38 +6253,32 @@ mulAvxTwo_2x6_loop:
 	VMOVDQU 32(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPXOR   Y7, Y8, Y0
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPXOR   Y7, Y8, Y2
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y7, Y8, Y3
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPXOR   Y7, Y8, Y4
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPXOR   Y7, Y8, Y5
 
 	// Load and process 32 bytes from input 1 to 6 outputs
 	VMOVDQU (DX), Y9
@@ -2251,38 +6290,32 @@ mulAvxTwo_2x6_loop:
 	VMOVDQU 416(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 576(CX), Y7
 	VMOVDQU 608(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 640(CX), Y7
 	VMOVDQU 672(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 704(CX), Y7
 	VMOVDQU 736(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Store 6 outputs
 	VMOVDQU Y0, (DI)
@@ -2306,56 +6339,396 @@ mulAvxTwo_2x6_loop:
 mulAvxTwo_2x6_end:
 	RET
 
-// func mulAvxTwo_2x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_2x7(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+// func mulGFNI_2x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x6_64(SB), $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 40 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_2x7_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), DX
-	MOVQ  out_base+48(FP), SI
-	MOVQ  (SI), DI
-	MOVQ  24(SI), R8
-	MOVQ  48(SI), R9
-	MOVQ  72(SI), R10
-	MOVQ  96(SI), R11
-	MOVQ  120(SI), R12
-	MOVQ  144(SI), SI
-	MOVQ  start+72(FP), R13
+	// Full registers estimated 20 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), BX
+	MOVQ            start+72(FP), R11
 
 	// Add start offset to output
-	ADDQ R13, DI
-	ADDQ R13, R8
-	ADDQ R13, R9
-	ADDQ R13, R10
-	ADDQ R13, R11
-	ADDQ R13, R12
-	ADDQ R13, SI
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, BX
 
 	// Add start offset to input
-	ADDQ         R13, BX
+	ADDQ R11, DX
+	ADDQ R11, CX
+
+mulGFNI_2x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (DX), Z18
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z18, Z12
+	VGF2P8AFFINEQB $0x00, Z1, Z18, Z13
+	VGF2P8AFFINEQB $0x00, Z2, Z18, Z14
+	VGF2P8AFFINEQB $0x00, Z3, Z18, Z15
+	VGF2P8AFFINEQB $0x00, Z4, Z18, Z16
+	VGF2P8AFFINEQB $0x00, Z5, Z18, Z17
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (CX), Z18
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z6, Z18, Z19
+	VXORPD         Z12, Z19, Z12
+	VGF2P8AFFINEQB $0x00, Z7, Z18, Z19
+	VXORPD         Z13, Z19, Z13
+	VGF2P8AFFINEQB $0x00, Z8, Z18, Z19
+	VXORPD         Z14, Z19, Z14
+	VGF2P8AFFINEQB $0x00, Z9, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z10, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z11, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Store 6 outputs
+	VMOVDQU64 Z12, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z13, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z14, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z15, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z16, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z17, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x6_64_loop
+	VZEROUPPER
+
+mulGFNI_2x6_64_end:
+	RET
+
+// func mulGFNI_2x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x6_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), BX
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, BX
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, CX
+
+mulGFNI_2x6_64Xor_loop:
+	// Load 6 outputs
+	VMOVDQU64 (SI), Z12
+	VMOVDQU64 (DI), Z13
+	VMOVDQU64 (R8), Z14
+	VMOVDQU64 (R9), Z15
+	VMOVDQU64 (R10), Z16
+	VMOVDQU64 (BX), Z17
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (DX), Z18
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z18, Z19
+	VXORPD         Z12, Z19, Z12
+	VGF2P8AFFINEQB $0x00, Z1, Z18, Z19
+	VXORPD         Z13, Z19, Z13
+	VGF2P8AFFINEQB $0x00, Z2, Z18, Z19
+	VXORPD         Z14, Z19, Z14
+	VGF2P8AFFINEQB $0x00, Z3, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z4, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z5, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (CX), Z18
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z6, Z18, Z19
+	VXORPD         Z12, Z19, Z12
+	VGF2P8AFFINEQB $0x00, Z7, Z18, Z19
+	VXORPD         Z13, Z19, Z13
+	VGF2P8AFFINEQB $0x00, Z8, Z18, Z19
+	VXORPD         Z14, Z19, Z14
+	VGF2P8AFFINEQB $0x00, Z9, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z10, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z11, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Store 6 outputs
+	VMOVDQU64 Z12, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z13, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z14, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z15, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z16, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z17, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x6_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_2x6_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x6Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), SI
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, SI
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_2x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU (SI), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x6Xor_end:
+	RET
+
+// func mulAvxTwo_2x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x7(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 40 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x7_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), SI
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, SI
+
+	// Add start offset to input
+	ADDQ         R13, BX
 	ADDQ         R13, DX
 	MOVQ         $0x0000000f, R13
 	MOVQ         R13, X7
 	VPBROADCASTB X7, Y7
 
 mulAvxTwo_2x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-
 	// Load and process 32 bytes from input 0 to 7 outputs
 	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
@@ -2366,44 +6739,37 @@ mulAvxTwo_2x7_loop:
 	VMOVDQU 32(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	VPXOR   Y8, Y9, Y0
 	VMOVDQU 64(CX), Y8
 	VMOVDQU 96(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	VPXOR   Y8, Y9, Y1
 	VMOVDQU 128(CX), Y8
 	VMOVDQU 160(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	VPXOR   Y8, Y9, Y2
 	VMOVDQU 192(CX), Y8
 	VMOVDQU 224(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	VPXOR   Y8, Y9, Y3
 	VMOVDQU 256(CX), Y8
 	VMOVDQU 288(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	VPXOR   Y8, Y9, Y4
 	VMOVDQU 320(CX), Y8
 	VMOVDQU 352(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	VPXOR   Y8, Y9, Y5
 	VMOVDQU 384(CX), Y8
 	VMOVDQU 416(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPXOR   Y8, Y9, Y6
 
 	// Load and process 32 bytes from input 1 to 7 outputs
 	VMOVDQU (DX), Y10
@@ -2415,44 +6781,37 @@ mulAvxTwo_2x7_loop:
 	VMOVDQU 480(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 512(CX), Y8
 	VMOVDQU 544(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 576(CX), Y8
 	VMOVDQU 608(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 640(CX), Y8
 	VMOVDQU 672(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 704(CX), Y8
 	VMOVDQU 736(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 768(CX), Y8
 	VMOVDQU 800(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 832(CX), Y8
 	VMOVDQU 864(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Store 7 outputs
 	VMOVDQU Y0, (DI)
@@ -2478,8 +6837,392 @@ mulAvxTwo_2x7_loop:
 mulAvxTwo_2x7_end:
 	RET
 
+// func mulGFNI_2x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x7_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 23 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), R11
+	MOVQ            144(BX), BX
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, BX
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, CX
+
+mulGFNI_2x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (DX), Z21
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z21, Z14
+	VGF2P8AFFINEQB $0x00, Z1, Z21, Z15
+	VGF2P8AFFINEQB $0x00, Z2, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z3, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z4, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z5, Z21, Z19
+	VGF2P8AFFINEQB $0x00, Z6, Z21, Z20
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (CX), Z21
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z7, Z21, Z22
+	VXORPD         Z14, Z22, Z14
+	VGF2P8AFFINEQB $0x00, Z8, Z21, Z22
+	VXORPD         Z15, Z22, Z15
+	VGF2P8AFFINEQB $0x00, Z9, Z21, Z22
+	VXORPD         Z16, Z22, Z16
+	VGF2P8AFFINEQB $0x00, Z10, Z21, Z22
+	VXORPD         Z17, Z22, Z17
+	VGF2P8AFFINEQB $0x00, Z11, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z12, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z13, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Store 7 outputs
+	VMOVDQU64 Z14, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z15, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z16, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z17, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z18, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z19, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z20, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x7_64_loop
+	VZEROUPPER
+
+mulGFNI_2x7_64_end:
+	RET
+
+// func mulGFNI_2x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x7_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 23 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), R11
+	MOVQ            144(BX), BX
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, BX
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, CX
+
+mulGFNI_2x7_64Xor_loop:
+	// Load 7 outputs
+	VMOVDQU64 (SI), Z14
+	VMOVDQU64 (DI), Z15
+	VMOVDQU64 (R8), Z16
+	VMOVDQU64 (R9), Z17
+	VMOVDQU64 (R10), Z18
+	VMOVDQU64 (R11), Z19
+	VMOVDQU64 (BX), Z20
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (DX), Z21
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z21, Z22
+	VXORPD         Z14, Z22, Z14
+	VGF2P8AFFINEQB $0x00, Z1, Z21, Z22
+	VXORPD         Z15, Z22, Z15
+	VGF2P8AFFINEQB $0x00, Z2, Z21, Z22
+	VXORPD         Z16, Z22, Z16
+	VGF2P8AFFINEQB $0x00, Z3, Z21, Z22
+	VXORPD         Z17, Z22, Z17
+	VGF2P8AFFINEQB $0x00, Z4, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z5, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z6, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (CX), Z21
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z7, Z21, Z22
+	VXORPD         Z14, Z22, Z14
+	VGF2P8AFFINEQB $0x00, Z8, Z21, Z22
+	VXORPD         Z15, Z22, Z15
+	VGF2P8AFFINEQB $0x00, Z9, Z21, Z22
+	VXORPD         Z16, Z22, Z16
+	VGF2P8AFFINEQB $0x00, Z10, Z21, Z22
+	VXORPD         Z17, Z22, Z17
+	VGF2P8AFFINEQB $0x00, Z11, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z12, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z13, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Store 7 outputs
+	VMOVDQU64 Z14, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z15, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z16, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z17, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z18, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z19, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z20, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x7_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_2x7_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x7Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 40 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x7Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), SI
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, SI
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_2x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU (R12), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU (SI), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x7Xor_end:
+	RET
+
 // func mulAvxTwo_2x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_2x8(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -2521,16 +7264,6 @@ TEXT Β·mulAvxTwo_2x8(SB), NOSPLIT, $0-88
 	VPBROADCASTB X8, Y8
 
 mulAvxTwo_2x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-
 	// Load and process 32 bytes from input 0 to 8 outputs
 	VMOVDQU (BX), Y11
 	ADDQ    $0x20, BX
@@ -2541,50 +7274,42 @@ mulAvxTwo_2x8_loop:
 	VMOVDQU 32(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	VPXOR   Y9, Y10, Y0
 	VMOVDQU 64(CX), Y9
 	VMOVDQU 96(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 128(CX), Y9
 	VMOVDQU 160(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	VPXOR   Y9, Y10, Y2
 	VMOVDQU 192(CX), Y9
 	VMOVDQU 224(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 256(CX), Y9
 	VMOVDQU 288(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	VPXOR   Y9, Y10, Y4
 	VMOVDQU 320(CX), Y9
 	VMOVDQU 352(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y9, Y10, Y5
 	VMOVDQU 384(CX), Y9
 	VMOVDQU 416(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	VPXOR   Y9, Y10, Y6
 	VMOVDQU 448(CX), Y9
 	VMOVDQU 480(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPXOR   Y9, Y10, Y7
 
 	// Load and process 32 bytes from input 1 to 8 outputs
 	VMOVDQU (DX), Y11
@@ -2596,50 +7321,42 @@ mulAvxTwo_2x8_loop:
 	VMOVDQU 544(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	XOR3WAY( $0x00, Y9, Y10, Y0)
 	VMOVDQU 576(CX), Y9
 	VMOVDQU 608(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 640(CX), Y9
 	VMOVDQU 672(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
 	VMOVDQU 704(CX), Y9
 	VMOVDQU 736(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 768(CX), Y9
 	VMOVDQU 800(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
 	VMOVDQU 832(CX), Y9
 	VMOVDQU 864(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 	VMOVDQU 896(CX), Y9
 	VMOVDQU 928(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
 	VMOVDQU 960(CX), Y9
 	VMOVDQU 992(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
 	// Store 8 outputs
 	VMOVDQU Y0, (DI)
@@ -2667,17 +7384,265 @@ mulAvxTwo_2x8_loop:
 mulAvxTwo_2x8_end:
 	RET
 
-// func mulAvxTwo_2x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_2x9(SB), NOSPLIT, $0-88
+// func mulGFNI_2x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x8_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), R11
+	MOVQ            144(BX), R12
+	MOVQ            168(BX), BX
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, BX
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, CX
+
+mulGFNI_2x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (DX), Z24
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z24, Z16
+	VGF2P8AFFINEQB $0x00, Z1, Z24, Z17
+	VGF2P8AFFINEQB $0x00, Z2, Z24, Z18
+	VGF2P8AFFINEQB $0x00, Z3, Z24, Z19
+	VGF2P8AFFINEQB $0x00, Z4, Z24, Z20
+	VGF2P8AFFINEQB $0x00, Z5, Z24, Z21
+	VGF2P8AFFINEQB $0x00, Z6, Z24, Z22
+	VGF2P8AFFINEQB $0x00, Z7, Z24, Z23
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (CX), Z24
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z8, Z24, Z25
+	VXORPD         Z16, Z25, Z16
+	VGF2P8AFFINEQB $0x00, Z9, Z24, Z25
+	VXORPD         Z17, Z25, Z17
+	VGF2P8AFFINEQB $0x00, Z10, Z24, Z25
+	VXORPD         Z18, Z25, Z18
+	VGF2P8AFFINEQB $0x00, Z11, Z24, Z25
+	VXORPD         Z19, Z25, Z19
+	VGF2P8AFFINEQB $0x00, Z12, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z13, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z14, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z15, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Store 8 outputs
+	VMOVDQU64 Z16, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z17, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z18, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z19, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z20, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z21, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z22, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z23, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x8_64_loop
+	VZEROUPPER
+
+mulGFNI_2x8_64_end:
+	RET
+
+// func mulGFNI_2x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x8_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), R11
+	MOVQ            144(BX), R12
+	MOVQ            168(BX), BX
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, BX
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, CX
+
+mulGFNI_2x8_64Xor_loop:
+	// Load 8 outputs
+	VMOVDQU64 (SI), Z16
+	VMOVDQU64 (DI), Z17
+	VMOVDQU64 (R8), Z18
+	VMOVDQU64 (R9), Z19
+	VMOVDQU64 (R10), Z20
+	VMOVDQU64 (R11), Z21
+	VMOVDQU64 (R12), Z22
+	VMOVDQU64 (BX), Z23
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (DX), Z24
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z24, Z25
+	VXORPD         Z16, Z25, Z16
+	VGF2P8AFFINEQB $0x00, Z1, Z24, Z25
+	VXORPD         Z17, Z25, Z17
+	VGF2P8AFFINEQB $0x00, Z2, Z24, Z25
+	VXORPD         Z18, Z25, Z18
+	VGF2P8AFFINEQB $0x00, Z3, Z24, Z25
+	VXORPD         Z19, Z25, Z19
+	VGF2P8AFFINEQB $0x00, Z4, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z5, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z6, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z7, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (CX), Z24
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z8, Z24, Z25
+	VXORPD         Z16, Z25, Z16
+	VGF2P8AFFINEQB $0x00, Z9, Z24, Z25
+	VXORPD         Z17, Z25, Z17
+	VGF2P8AFFINEQB $0x00, Z10, Z24, Z25
+	VXORPD         Z18, Z25, Z18
+	VGF2P8AFFINEQB $0x00, Z11, Z24, Z25
+	VXORPD         Z19, Z25, Z19
+	VGF2P8AFFINEQB $0x00, Z12, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z13, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z14, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z15, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Store 8 outputs
+	VMOVDQU64 Z16, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z17, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z18, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z19, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z20, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z21, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z22, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z23, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_2x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x8Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 50 YMM used
+	// Full registers estimated 45 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_2x9_end
+	JZ    mulAvxTwo_2x8Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), DX
@@ -2689,100 +7654,251 @@ TEXT Β·mulAvxTwo_2x9(SB), NOSPLIT, $0-88
 	MOVQ  96(SI), R11
 	MOVQ  120(SI), R12
 	MOVQ  144(SI), R13
-	MOVQ  168(SI), R14
-	MOVQ  192(SI), SI
-	MOVQ  start+72(FP), R15
+	MOVQ  168(SI), SI
+	MOVQ  start+72(FP), R14
 
 	// Add start offset to output
-	ADDQ R15, DI
-	ADDQ R15, R8
-	ADDQ R15, R9
-	ADDQ R15, R10
-	ADDQ R15, R11
-	ADDQ R15, R12
-	ADDQ R15, R13
-	ADDQ R15, R14
-	ADDQ R15, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, SI
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X9
-	VPBROADCASTB X9, Y9
-
-mulAvxTwo_2x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	ADDQ         R14, BX
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X8
+	VPBROADCASTB X8, Y8
 
-	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (BX), Y12
+mulAvxTwo_2x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU (R12), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU (R13), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU (SI), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y7, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x8Xor_end:
+	RET
+
+// func mulAvxTwo_2x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x9(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x9_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), R14
+	MOVQ  192(SI), SI
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, SI
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_2x9_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y2
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y3
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y4
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y5
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y6
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y7
 	VMOVDQU 512(CX), Y10
 	VMOVDQU 544(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPXOR   Y10, Y11, Y8
 
 	// Load and process 32 bytes from input 1 to 9 outputs
 	VMOVDQU (DX), Y12
@@ -2794,56 +7910,47 @@ mulAvxTwo_2x9_loop:
 	VMOVDQU 608(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	XOR3WAY( $0x00, Y10, Y11, Y0)
 	VMOVDQU 640(CX), Y10
 	VMOVDQU 672(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y1)
 	VMOVDQU 704(CX), Y10
 	VMOVDQU 736(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	XOR3WAY( $0x00, Y10, Y11, Y2)
 	VMOVDQU 768(CX), Y10
 	VMOVDQU 800(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	XOR3WAY( $0x00, Y10, Y11, Y3)
 	VMOVDQU 832(CX), Y10
 	VMOVDQU 864(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	XOR3WAY( $0x00, Y10, Y11, Y4)
 	VMOVDQU 896(CX), Y10
 	VMOVDQU 928(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	XOR3WAY( $0x00, Y10, Y11, Y5)
 	VMOVDQU 960(CX), Y10
 	VMOVDQU 992(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	XOR3WAY( $0x00, Y10, Y11, Y6)
 	VMOVDQU 1024(CX), Y10
 	VMOVDQU 1056(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y10, Y11, Y7)
 	VMOVDQU 1088(CX), Y10
 	VMOVDQU 1120(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Store 9 outputs
 	VMOVDQU Y0, (DI)
@@ -2873,8 +7980,462 @@ mulAvxTwo_2x9_loop:
 mulAvxTwo_2x9_end:
 	RET
 
+// func mulGFNI_2x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x9_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 29 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), R11
+	MOVQ            144(BX), R12
+	MOVQ            168(BX), R13
+	MOVQ            192(BX), BX
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, BX
+
+	// Add start offset to input
+	ADDQ R14, DX
+	ADDQ R14, CX
+
+mulGFNI_2x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (DX), Z27
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z27, Z18
+	VGF2P8AFFINEQB $0x00, Z1, Z27, Z19
+	VGF2P8AFFINEQB $0x00, Z2, Z27, Z20
+	VGF2P8AFFINEQB $0x00, Z3, Z27, Z21
+	VGF2P8AFFINEQB $0x00, Z4, Z27, Z22
+	VGF2P8AFFINEQB $0x00, Z5, Z27, Z23
+	VGF2P8AFFINEQB $0x00, Z6, Z27, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z27, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z27, Z26
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (CX), Z27
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z9, Z27, Z28
+	VXORPD         Z18, Z28, Z18
+	VGF2P8AFFINEQB $0x00, Z10, Z27, Z28
+	VXORPD         Z19, Z28, Z19
+	VGF2P8AFFINEQB $0x00, Z11, Z27, Z28
+	VXORPD         Z20, Z28, Z20
+	VGF2P8AFFINEQB $0x00, Z12, Z27, Z28
+	VXORPD         Z21, Z28, Z21
+	VGF2P8AFFINEQB $0x00, Z13, Z27, Z28
+	VXORPD         Z22, Z28, Z22
+	VGF2P8AFFINEQB $0x00, Z14, Z27, Z28
+	VXORPD         Z23, Z28, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Store 9 outputs
+	VMOVDQU64 Z18, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z19, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z20, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z21, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z22, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z23, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z24, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z25, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z26, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x9_64_loop
+	VZEROUPPER
+
+mulGFNI_2x9_64_end:
+	RET
+
+// func mulGFNI_2x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x9_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 29 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), R11
+	MOVQ            144(BX), R12
+	MOVQ            168(BX), R13
+	MOVQ            192(BX), BX
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, BX
+
+	// Add start offset to input
+	ADDQ R14, DX
+	ADDQ R14, CX
+
+mulGFNI_2x9_64Xor_loop:
+	// Load 9 outputs
+	VMOVDQU64 (SI), Z18
+	VMOVDQU64 (DI), Z19
+	VMOVDQU64 (R8), Z20
+	VMOVDQU64 (R9), Z21
+	VMOVDQU64 (R10), Z22
+	VMOVDQU64 (R11), Z23
+	VMOVDQU64 (R12), Z24
+	VMOVDQU64 (R13), Z25
+	VMOVDQU64 (BX), Z26
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (DX), Z27
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z27, Z28
+	VXORPD         Z18, Z28, Z18
+	VGF2P8AFFINEQB $0x00, Z1, Z27, Z28
+	VXORPD         Z19, Z28, Z19
+	VGF2P8AFFINEQB $0x00, Z2, Z27, Z28
+	VXORPD         Z20, Z28, Z20
+	VGF2P8AFFINEQB $0x00, Z3, Z27, Z28
+	VXORPD         Z21, Z28, Z21
+	VGF2P8AFFINEQB $0x00, Z4, Z27, Z28
+	VXORPD         Z22, Z28, Z22
+	VGF2P8AFFINEQB $0x00, Z5, Z27, Z28
+	VXORPD         Z23, Z28, Z23
+	VGF2P8AFFINEQB $0x00, Z6, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (CX), Z27
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z9, Z27, Z28
+	VXORPD         Z18, Z28, Z18
+	VGF2P8AFFINEQB $0x00, Z10, Z27, Z28
+	VXORPD         Z19, Z28, Z19
+	VGF2P8AFFINEQB $0x00, Z11, Z27, Z28
+	VXORPD         Z20, Z28, Z20
+	VGF2P8AFFINEQB $0x00, Z12, Z27, Z28
+	VXORPD         Z21, Z28, Z21
+	VGF2P8AFFINEQB $0x00, Z13, Z27, Z28
+	VXORPD         Z22, Z28, Z22
+	VGF2P8AFFINEQB $0x00, Z14, Z27, Z28
+	VXORPD         Z23, Z28, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Store 9 outputs
+	VMOVDQU64 Z18, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z19, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z20, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z21, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z22, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z23, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z24, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z25, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z26, (BX)
+	ADDQ      $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_2x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_2x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x9Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x9Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), R14
+	MOVQ  192(SI), SI
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, SI
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_2x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU (R12), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU (R13), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU (R14), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU (SI), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y7, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y8, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x9Xor_end:
+	RET
+
 // func mulAvxTwo_2x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_2x10(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -2920,18 +8481,6 @@ TEXT Β·mulAvxTwo_2x10(SB), NOSPLIT, $8-88
 	VPBROADCASTB X10, Y10
 
 mulAvxTwo_2x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
-
 	// Load and process 32 bytes from input 0 to 10 outputs
 	VMOVDQU (BX), Y13
 	ADDQ    $0x20, BX
@@ -2942,62 +8491,52 @@ mulAvxTwo_2x10_loop:
 	VMOVDQU 32(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	VPXOR   Y11, Y12, Y0
 	VMOVDQU 64(CX), Y11
 	VMOVDQU 96(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	VPXOR   Y11, Y12, Y1
 	VMOVDQU 128(CX), Y11
 	VMOVDQU 160(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	VPXOR   Y11, Y12, Y2
 	VMOVDQU 192(CX), Y11
 	VMOVDQU 224(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	VPXOR   Y11, Y12, Y3
 	VMOVDQU 256(CX), Y11
 	VMOVDQU 288(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	VPXOR   Y11, Y12, Y4
 	VMOVDQU 320(CX), Y11
 	VMOVDQU 352(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	VPXOR   Y11, Y12, Y5
 	VMOVDQU 384(CX), Y11
 	VMOVDQU 416(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	VPXOR   Y11, Y12, Y6
 	VMOVDQU 448(CX), Y11
 	VMOVDQU 480(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	VPXOR   Y11, Y12, Y7
 	VMOVDQU 512(CX), Y11
 	VMOVDQU 544(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	VPXOR   Y11, Y12, Y8
 	VMOVDQU 576(CX), Y11
 	VMOVDQU 608(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPXOR   Y11, Y12, Y9
 
 	// Load and process 32 bytes from input 1 to 10 outputs
 	VMOVDQU (DX), Y13
@@ -3009,62 +8548,52 @@ mulAvxTwo_2x10_loop:
 	VMOVDQU 672(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 704(CX), Y11
 	VMOVDQU 736(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 768(CX), Y11
 	VMOVDQU 800(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 832(CX), Y11
 	VMOVDQU 864(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 896(CX), Y11
 	VMOVDQU 928(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 960(CX), Y11
 	VMOVDQU 992(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 1024(CX), Y11
 	VMOVDQU 1056(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 1088(CX), Y11
 	VMOVDQU 1120(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 1152(CX), Y11
 	VMOVDQU 1184(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 1216(CX), Y11
 	VMOVDQU 1248(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Store 10 outputs
 	VMOVDQU Y0, (DI)
@@ -3096,147 +8625,919 @@ mulAvxTwo_2x10_loop:
 mulAvxTwo_2x10_end:
 	RET
 
-// func mulAvxTwo_3x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_3x1(SB), NOSPLIT, $0-88
+// func mulGFNI_2x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x10_64(SB), $0-88
 	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 10 YMM used
-	MOVQ    n+80(FP), AX
-	MOVQ    matrix_base+0(FP), CX
-	SHRQ    $0x05, AX
-	TESTQ   AX, AX
-	JZ      mulAvxTwo_3x1_end
-	VMOVDQU (CX), Y0
-	VMOVDQU 32(CX), Y1
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	MOVQ    in_base+24(FP), CX
-	MOVQ    (CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    48(CX), CX
-	MOVQ    out_base+48(FP), SI
-	MOVQ    (SI), SI
-	MOVQ    start+72(FP), DI
+	// Full registers estimated 32 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), R11
+	MOVQ            144(BX), R12
+	MOVQ            168(BX), R13
+	MOVQ            192(BX), R14
+	MOVQ            216(BX), BX
+	MOVQ            start+72(FP), R15
 
 	// Add start offset to output
-	ADDQ DI, SI
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, BX
 
 	// Add start offset to input
-	ADDQ         DI, DX
-	ADDQ         DI, BX
-	ADDQ         DI, CX
-	MOVQ         $0x0000000f, DI
-	MOVQ         DI, X7
-	VPBROADCASTB X7, Y7
+	ADDQ R15, DX
+	ADDQ R15, CX
+
+mulGFNI_2x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (CX), Z30
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
 
-mulAvxTwo_3x1_loop:
-	// Clear 1 outputs
-	VPXOR Y6, Y6, Y6
+	// Store 10 outputs
+	VMOVDQU64 Z20, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z21, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z22, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z23, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (BX)
+	ADDQ      $0x40, BX
 
-	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y8
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y7, Y8, Y8
-	VPAND   Y7, Y9, Y9
-	VPSHUFB Y8, Y0, Y8
-	VPSHUFB Y9, Y1, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_2x10_64_loop
+	VZEROUPPER
 
-	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y8
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y7, Y8, Y8
-	VPAND   Y7, Y9, Y9
-	VPSHUFB Y8, Y2, Y8
-	VPSHUFB Y9, Y3, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+mulGFNI_2x10_64_end:
+	RET
 
-	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (CX), Y8
-	ADDQ    $0x20, CX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y7, Y8, Y8
-	VPAND   Y7, Y9, Y9
-	VPSHUFB Y8, Y4, Y8
-	VPSHUFB Y9, Y5, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+// func mulGFNI_2x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_2x10_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 32 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_2x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), CX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            out_base+48(FP), BX
+	MOVQ            (BX), SI
+	MOVQ            24(BX), DI
+	MOVQ            48(BX), R8
+	MOVQ            72(BX), R9
+	MOVQ            96(BX), R10
+	MOVQ            120(BX), R11
+	MOVQ            144(BX), R12
+	MOVQ            168(BX), R13
+	MOVQ            192(BX), R14
+	MOVQ            216(BX), BX
+	MOVQ            start+72(FP), R15
 
-	// Store 1 outputs
-	VMOVDQU Y6, (SI)
-	ADDQ    $0x20, SI
+	// Add start offset to output
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, BX
+
+	// Add start offset to input
+	ADDQ R15, DX
+	ADDQ R15, CX
+
+mulGFNI_2x10_64Xor_loop:
+	// Load 10 outputs
+	VMOVDQU64 (SI), Z20
+	VMOVDQU64 (DI), Z21
+	VMOVDQU64 (R8), Z22
+	VMOVDQU64 (R9), Z23
+	VMOVDQU64 (R10), Z24
+	VMOVDQU64 (R11), Z25
+	VMOVDQU64 (R12), Z26
+	VMOVDQU64 (R13), Z27
+	VMOVDQU64 (R14), Z28
+	VMOVDQU64 (BX), Z29
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (CX), Z30
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Store 10 outputs
+	VMOVDQU64 Z20, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z21, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z22, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z23, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (BX)
+	ADDQ      $0x40, BX
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_3x1_loop
+	JNZ  mulGFNI_2x10_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_3x1_end:
+mulGFNI_2x10_64Xor_end:
 	RET
 
-// func mulAvxTwo_3x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_3x1_64(SB), $0-88
+// func mulAvxTwo_2x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_2x10Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 10 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 55 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), AX
-	MOVQ  out_base+48(FP), SI
+	JZ    mulAvxTwo_2x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
 	MOVQ  out_base+48(FP), SI
-	MOVQ  start+72(FP), DI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), R14
+	MOVQ  192(SI), R15
+	MOVQ  216(SI), SI
+	MOVQ  start+72(FP), BP
 
-	// Add start offset to input
-	ADDQ         DI, DX
-	ADDQ         DI, BX
-	ADDQ         DI, AX
-	MOVQ         $0x0000000f, R8
-	MOVQ         R8, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R8
-	SHRQ         $0x06, R8
+	// Add start offset to output
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, SI
 
-mulAvxTwo_3x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X10
+	VPBROADCASTB X10, Y10
 
-	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
+mulAvxTwo_2x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU (R12), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU (R13), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU (R14), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU (R15), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU (SI), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 10 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y7, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y8, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y9, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x10Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x10Xor_end:
+	RET
+
+// func mulAvxTwo_3x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x1(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_3x1_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), CX
+	MOVQ    out_base+48(FP), SI
+	MOVQ    (SI), SI
+	MOVQ    start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+
+	// Add start offset to input
+	ADDQ         DI, DX
+	ADDQ         DI, BX
+	ADDQ         DI, CX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_3x1_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y7, Y8, Y8
+	VPAND   Y7, Y9, Y9
+	VPSHUFB Y8, Y0, Y8
+	VPSHUFB Y9, Y1, Y9
+	VPXOR   Y8, Y9, Y6
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y7, Y8, Y8
+	VPAND   Y7, Y9, Y9
+	VPSHUFB Y8, Y2, Y8
+	VPSHUFB Y9, Y3, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (CX), Y8
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y7, Y8, Y8
+	VPAND   Y7, Y9, Y9
+	VPSHUFB Y8, Y4, Y8
+	VPSHUFB Y9, Y5, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 1 outputs
+	VMOVDQU Y6, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x1_loop
+	VZEROUPPER
+
+mulAvxTwo_3x1_end:
+	RET
+
+// func mulAvxTwo_3x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x1_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x1_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), DI
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+
+	// Add start offset to input
+	ADDQ         R8, BX
+	ADDQ         R8, SI
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_3x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
 	VPSHUFB Y5, Y3, Y5
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x1_64_loop
+	VZEROUPPER
+
+mulAvxTwo_3x1_64_end:
+	RET
+
+// func mulGFNI_3x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x1_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 6 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), SI
+	MOVQ            start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+
+	// Add start offset to input
+	ADDQ DI, DX
+	ADDQ DI, BX
+	ADDQ DI, CX
+
+mulGFNI_3x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z4
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z4, Z3
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z4
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z4, Z4
+	VXORPD         Z3, Z4, Z3
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (CX), Z4
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z2, Z4, Z4
+	VXORPD         Z3, Z4, Z3
+
+	// Store 1 outputs
+	VMOVDQU64 Z3, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x1_64_loop
+	VZEROUPPER
+
+mulGFNI_3x1_64_end:
+	RET
+
+// func mulGFNI_3x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 6 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), SI
+	MOVQ            start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+
+	// Add start offset to input
+	ADDQ DI, DX
+	ADDQ DI, BX
+	ADDQ DI, CX
+
+mulGFNI_3x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (SI), Z3
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z4
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z4, Z4
+	VXORPD         Z3, Z4, Z3
 
 	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z4
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z4, Z4
+	VXORPD         Z3, Z4, Z3
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (CX), Z4
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z2, Z4, Z4
+	VXORPD         Z3, Z4, Z3
+
+	// Store 1 outputs
+	VMOVDQU64 Z3, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_3x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), CX
+	MOVQ    out_base+48(FP), SI
+	MOVQ    (SI), SI
+	MOVQ    start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+
+	// Add start offset to input
+	ADDQ         DI, DX
+	ADDQ         DI, BX
+	ADDQ         DI, CX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_3x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y7, Y8, Y8
+	VPAND   Y7, Y9, Y9
+	VMOVDQU (SI), Y6
+	VPSHUFB Y8, Y0, Y8
+	VPSHUFB Y9, Y1, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y7, Y8, Y8
+	VPAND   Y7, Y9, Y9
+	VPSHUFB Y8, Y2, Y8
+	VPSHUFB Y9, Y3, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (CX), Y8
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y7, Y8, Y8
+	VPAND   Y7, Y9, Y9
+	VPSHUFB Y8, Y4, Y8
+	VPSHUFB Y9, Y5, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 1 outputs
+	VMOVDQU Y6, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x1Xor_end:
+	RET
+
+// func mulAvxTwo_3x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x1_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x1_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), DI
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+
+	// Add start offset to input
+	ADDQ         R8, BX
+	ADDQ         R8, SI
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_3x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (DI), Y0
+	VMOVDQU 32(DI), Y1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
 	VMOVDQU (BX), Y6
 	VMOVDQU 32(BX), Y5
 	ADDQ    $0x40, BX
@@ -3246,21 +9547,38 @@ mulAvxTwo_3x1_64_loop:
 	VPAND   Y2, Y5, Y5
 	VPAND   Y2, Y7, Y7
 	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU 64(CX), Y3
 	VMOVDQU 96(CX), Y4
 	VPSHUFB Y5, Y3, Y5
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
 	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -3273,27 +9591,24 @@ mulAvxTwo_3x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
 	// Store 1 outputs
-	MOVQ    (SI), R9
-	VMOVDQU Y0, (R9)(DI*1)
-	VMOVDQU Y1, 32(R9)(DI*1)
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
 
 	// Prepare for next loop
-	ADDQ $0x40, DI
-	DECQ R8
-	JNZ  mulAvxTwo_3x1_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x1_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_3x1_64_end:
+mulAvxTwo_3x1_64Xor_end:
 	RET
 
 // func mulAvxTwo_3x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_3x2(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -3325,10 +9640,6 @@ TEXT Β·mulAvxTwo_3x2(SB), NOSPLIT, $0-88
 	VPBROADCASTB X2, Y2
 
 mulAvxTwo_3x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
 	// Load and process 32 bytes from input 0 to 2 outputs
 	VMOVDQU (BX), Y5
 	ADDQ    $0x20, BX
@@ -3339,14 +9650,12 @@ mulAvxTwo_3x2_loop:
 	VMOVDQU 32(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	VPXOR   Y3, Y4, Y0
 	VMOVDQU 64(CX), Y3
 	VMOVDQU 96(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPXOR   Y3, Y4, Y1
 
 	// Load and process 32 bytes from input 1 to 2 outputs
 	VMOVDQU (SI), Y5
@@ -3358,14 +9667,12 @@ mulAvxTwo_3x2_loop:
 	VMOVDQU 160(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 192(CX), Y3
 	VMOVDQU 224(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 2 to 2 outputs
 	VMOVDQU (DX), Y5
@@ -3377,14 +9684,12 @@ mulAvxTwo_3x2_loop:
 	VMOVDQU 288(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 320(CX), Y3
 	VMOVDQU 352(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Store 2 outputs
 	VMOVDQU Y0, (R8)
@@ -3401,45 +9706,43 @@ mulAvxTwo_3x2_end:
 	RET
 
 // func mulAvxTwo_3x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_3x2_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 19 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 33 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_3x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), AX
-	MOVQ  out_base+48(FP), SI
-	MOVQ  out_base+48(FP), SI
-	MOVQ  start+72(FP), DI
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), DI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+	ADDQ R9, DI
 
 	// Add start offset to input
-	ADDQ         DI, DX
-	ADDQ         DI, BX
-	ADDQ         DI, AX
-	MOVQ         $0x0000000f, R8
-	MOVQ         R8, X4
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R8
-	SHRQ         $0x06, R8
 
 mulAvxTwo_3x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -3452,25 +9755,21 @@ mulAvxTwo_3x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
 
 	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -3483,25 +9782,21 @@ mulAvxTwo_3x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -3514,40 +9809,424 @@ mulAvxTwo_3x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 320(CX), Y5
 	VMOVDQU 352(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Store 2 outputs
-	MOVQ    (SI), R9
-	VMOVDQU Y0, (R9)(DI*1)
-	VMOVDQU Y1, 32(R9)(DI*1)
-	MOVQ    24(SI), R9
-	VMOVDQU Y2, (R9)(DI*1)
-	VMOVDQU Y3, 32(R9)(DI*1)
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
 
 	// Prepare for next loop
-	ADDQ $0x40, DI
-	DECQ R8
+	DECQ AX
 	JNZ  mulAvxTwo_3x2_64_loop
 	VZEROUPPER
 
 mulAvxTwo_3x2_64_end:
 	RET
 
+// func mulGFNI_3x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x2_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), SI
+	MOVQ            start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+	ADDQ R8, SI
+
+	// Add start offset to input
+	ADDQ R8, DX
+	ADDQ R8, BX
+	ADDQ R8, CX
+
+mulGFNI_3x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z8
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z8, Z6
+	VGF2P8AFFINEQB $0x00, Z1, Z8, Z7
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z8
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z8, Z9
+	VXORPD         Z6, Z9, Z6
+	VGF2P8AFFINEQB $0x00, Z3, Z8, Z9
+	VXORPD         Z7, Z9, Z7
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (CX), Z8
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z4, Z8, Z9
+	VXORPD         Z6, Z9, Z6
+	VGF2P8AFFINEQB $0x00, Z5, Z8, Z9
+	VXORPD         Z7, Z9, Z7
+
+	// Store 2 outputs
+	VMOVDQU64 Z6, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z7, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x2_64_loop
+	VZEROUPPER
+
+mulGFNI_3x2_64_end:
+	RET
+
+// func mulGFNI_3x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), SI
+	MOVQ            start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+	ADDQ R8, SI
+
+	// Add start offset to input
+	ADDQ R8, DX
+	ADDQ R8, BX
+	ADDQ R8, CX
+
+mulGFNI_3x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (DI), Z6
+	VMOVDQU64 (SI), Z7
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z8
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z8, Z9
+	VXORPD         Z6, Z9, Z6
+	VGF2P8AFFINEQB $0x00, Z1, Z8, Z9
+	VXORPD         Z7, Z9, Z7
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z8
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z8, Z9
+	VXORPD         Z6, Z9, Z6
+	VGF2P8AFFINEQB $0x00, Z3, Z8, Z9
+	VXORPD         Z7, Z9, Z7
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (CX), Z8
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z4, Z8, Z9
+	VXORPD         Z6, Z9, Z6
+	VGF2P8AFFINEQB $0x00, Z5, Z8, Z9
+	VXORPD         Z7, Z9, Z7
+
+	// Store 2 outputs
+	VMOVDQU64 Z6, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z7, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x2_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x2Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 19 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x2Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), DI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+	ADDQ R9, DI
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_3x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU (DI), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x2Xor_end:
+	RET
+
+// func mulAvxTwo_3x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 33 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), DI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+	ADDQ R9, DI
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_3x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R8), Y0
+	VMOVDQU 32(R8), Y1
+	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x2_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x2_64Xor_end:
+	RET
+
 // func mulAvxTwo_3x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_3x3(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -3581,11 +10260,6 @@ TEXT Β·mulAvxTwo_3x3(SB), NOSPLIT, $0-88
 	VPBROADCASTB X3, Y3
 
 mulAvxTwo_3x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-
 	// Load and process 32 bytes from input 0 to 3 outputs
 	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
@@ -3596,20 +10270,17 @@ mulAvxTwo_3x3_loop:
 	VMOVDQU 32(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	VPXOR   Y4, Y5, Y0
 	VMOVDQU 64(CX), Y4
 	VMOVDQU 96(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	VPXOR   Y4, Y5, Y1
 	VMOVDQU 128(CX), Y4
 	VMOVDQU 160(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y4, Y5, Y2
 
 	// Load and process 32 bytes from input 1 to 3 outputs
 	VMOVDQU (SI), Y6
@@ -3621,20 +10292,17 @@ mulAvxTwo_3x3_loop:
 	VMOVDQU 224(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 256(CX), Y4
 	VMOVDQU 288(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 320(CX), Y4
 	VMOVDQU 352(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 2 to 3 outputs
 	VMOVDQU (DX), Y6
@@ -3646,20 +10314,17 @@ mulAvxTwo_3x3_loop:
 	VMOVDQU 416(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 448(CX), Y4
 	VMOVDQU 480(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 512(CX), Y4
 	VMOVDQU 544(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Store 3 outputs
 	VMOVDQU Y0, (R8)
@@ -3678,47 +10343,45 @@ mulAvxTwo_3x3_end:
 	RET
 
 // func mulAvxTwo_3x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_3x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 26 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 46 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_3x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), AX
-	MOVQ  out_base+48(FP), SI
-	MOVQ  out_base+48(FP), SI
-	MOVQ  start+72(FP), DI
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), DI
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DI
 
 	// Add start offset to input
-	ADDQ         DI, DX
-	ADDQ         DI, BX
-	ADDQ         DI, AX
-	MOVQ         $0x0000000f, R8
-	MOVQ         R8, X6
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R8
-	SHRQ         $0x06, R8
 
 mulAvxTwo_3x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -3731,35 +10394,29 @@ mulAvxTwo_3x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -3772,35 +10429,29 @@ mulAvxTwo_3x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 2 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -3813,62 +10464,233 @@ mulAvxTwo_3x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Store 3 outputs
-	MOVQ    (SI), R9
-	VMOVDQU Y0, (R9)(DI*1)
-	VMOVDQU Y1, 32(R9)(DI*1)
-	MOVQ    24(SI), R9
-	VMOVDQU Y2, (R9)(DI*1)
-	VMOVDQU Y3, 32(R9)(DI*1)
-	MOVQ    48(SI), R9
-	VMOVDQU Y4, (R9)(DI*1)
-	VMOVDQU Y5, 32(R9)(DI*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, DI
-	DECQ R8
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y2, (R9)
+	VMOVDQU Y3, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y4, (DI)
+	VMOVDQU Y5, 32(DI)
+	ADDQ    $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
 	JNZ  mulAvxTwo_3x3_64_loop
 	VZEROUPPER
 
 mulAvxTwo_3x3_64_end:
 	RET
 
-// func mulAvxTwo_3x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_3x4(SB), NOSPLIT, $0-88
+// func mulGFNI_3x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x3_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), SI
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, SI
+
+	// Add start offset to input
+	ADDQ R9, DX
+	ADDQ R9, BX
+	ADDQ R9, CX
+
+mulGFNI_3x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z12
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z12, Z9
+	VGF2P8AFFINEQB $0x00, Z1, Z12, Z10
+	VGF2P8AFFINEQB $0x00, Z2, Z12, Z11
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z12
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z12, Z13
+	VXORPD         Z9, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z4, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z5, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (CX), Z12
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z6, Z12, Z13
+	VXORPD         Z9, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z7, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z8, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Store 3 outputs
+	VMOVDQU64 Z9, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z10, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z11, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x3_64_loop
+	VZEROUPPER
+
+mulGFNI_3x3_64_end:
+	RET
+
+// func mulGFNI_3x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x3_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), SI
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, SI
+
+	// Add start offset to input
+	ADDQ R9, DX
+	ADDQ R9, BX
+	ADDQ R9, CX
+
+mulGFNI_3x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (DI), Z9
+	VMOVDQU64 (R8), Z10
+	VMOVDQU64 (SI), Z11
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z12
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z12, Z13
+	VXORPD         Z9, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z1, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z2, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z12
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z12, Z13
+	VXORPD         Z9, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z4, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z5, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (CX), Z12
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z6, Z12, Z13
+	VXORPD         Z9, Z13, Z9
+	VGF2P8AFFINEQB $0x00, Z7, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z8, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Store 3 outputs
+	VMOVDQU64 Z9, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z10, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z11, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x3_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x3Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 33 YMM used
+	// Full registers estimated 26 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x4_end
+	JZ    mulAvxTwo_3x3Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -3876,151 +10698,765 @@ TEXT Β·mulAvxTwo_3x4(SB), NOSPLIT, $0-88
 	MOVQ  out_base+48(FP), DI
 	MOVQ  (DI), R8
 	MOVQ  24(DI), R9
-	MOVQ  48(DI), R10
-	MOVQ  72(DI), DI
-	MOVQ  start+72(FP), R11
+	MOVQ  48(DI), DI
+	MOVQ  start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R11, R8
-	ADDQ R11, R9
-	ADDQ R11, R10
-	ADDQ R11, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DI
 
 	// Add start offset to input
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X4
-	VPBROADCASTB X4, Y4
-
-mulAvxTwo_3x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X3
+	VPBROADCASTB X3, Y3
 
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (BX), Y7
+mulAvxTwo_3x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU (DI), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 1 to 4 outputs
-	VMOVDQU (SI), Y7
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 2 to 4 outputs
-	VMOVDQU (DX), Y7
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DX), Y6
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Store 4 outputs
+	// Store 3 outputs
 	VMOVDQU Y0, (R8)
 	ADDQ    $0x20, R8
 	VMOVDQU Y1, (R9)
 	ADDQ    $0x20, R9
-	VMOVDQU Y2, (R10)
-	ADDQ    $0x20, R10
-	VMOVDQU Y3, (DI)
+	VMOVDQU Y2, (DI)
 	ADDQ    $0x20, DI
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_3x4_loop
+	JNZ  mulAvxTwo_3x3Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_3x4_end:
+mulAvxTwo_3x3Xor_end:
 	RET
 
-// func mulAvxTwo_3x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_3x5(SB), NOSPLIT, $0-88
+// func mulAvxTwo_3x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x3_64Xor(SB), $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 40 YMM used
+	// Full registers estimated 46 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), DI
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DI
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_3x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R8), Y0
+	VMOVDQU 32(R8), Y1
+	VMOVDQU (R9), Y2
+	VMOVDQU 32(R9), Y3
+	VMOVDQU (DI), Y4
+	VMOVDQU 32(DI), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y2, (R9)
+	VMOVDQU Y3, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y4, (DI)
+	VMOVDQU Y5, 32(DI)
+	ADDQ    $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x4(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 33 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), DI
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, DI
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_3x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x4_loop
+	VZEROUPPER
+
+mulAvxTwo_3x4_end:
+	RET
+
+// func mulGFNI_3x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x4_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), SI
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, SI
+
+	// Add start offset to input
+	ADDQ R10, DX
+	ADDQ R10, BX
+	ADDQ R10, CX
+
+mulGFNI_3x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z16
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z1, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z2, Z16, Z14
+	VGF2P8AFFINEQB $0x00, Z3, Z16, Z15
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z16
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z16, Z17
+	VXORPD         Z12, Z17, Z12
+	VGF2P8AFFINEQB $0x00, Z5, Z16, Z17
+	VXORPD         Z13, Z17, Z13
+	VGF2P8AFFINEQB $0x00, Z6, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z7, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (CX), Z16
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z8, Z16, Z17
+	VXORPD         Z12, Z17, Z12
+	VGF2P8AFFINEQB $0x00, Z9, Z16, Z17
+	VXORPD         Z13, Z17, Z13
+	VGF2P8AFFINEQB $0x00, Z10, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z11, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Store 4 outputs
+	VMOVDQU64 Z12, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z13, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z14, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z15, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x4_64_loop
+	VZEROUPPER
+
+mulGFNI_3x4_64_end:
+	RET
+
+// func mulGFNI_3x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x4_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), SI
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, SI
+
+	// Add start offset to input
+	ADDQ R10, DX
+	ADDQ R10, BX
+	ADDQ R10, CX
+
+mulGFNI_3x4_64Xor_loop:
+	// Load 4 outputs
+	VMOVDQU64 (DI), Z12
+	VMOVDQU64 (R8), Z13
+	VMOVDQU64 (R9), Z14
+	VMOVDQU64 (SI), Z15
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z16
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z16, Z17
+	VXORPD         Z12, Z17, Z12
+	VGF2P8AFFINEQB $0x00, Z1, Z16, Z17
+	VXORPD         Z13, Z17, Z13
+	VGF2P8AFFINEQB $0x00, Z2, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z3, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z16
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z16, Z17
+	VXORPD         Z12, Z17, Z12
+	VGF2P8AFFINEQB $0x00, Z5, Z16, Z17
+	VXORPD         Z13, Z17, Z13
+	VGF2P8AFFINEQB $0x00, Z6, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z7, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (CX), Z16
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z8, Z16, Z17
+	VXORPD         Z12, Z17, Z12
+	VGF2P8AFFINEQB $0x00, Z9, Z16, Z17
+	VXORPD         Z13, Z17, Z13
+	VGF2P8AFFINEQB $0x00, Z10, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z11, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Store 4 outputs
+	VMOVDQU64 Z12, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z13, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z14, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z15, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x4_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x4_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x4Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 33 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x4Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), DI
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, DI
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_3x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU (DI), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x4Xor_end:
+	RET
+
+// func mulAvxTwo_3x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x5(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 40 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_3x5_end
 	MOVQ  in_base+24(FP), DX
@@ -4051,13 +11487,6 @@ TEXT Β·mulAvxTwo_3x5(SB), NOSPLIT, $0-88
 	VPBROADCASTB X5, Y5
 
 mulAvxTwo_3x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-
 	// Load and process 32 bytes from input 0 to 5 outputs
 	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
@@ -4068,32 +11497,27 @@ mulAvxTwo_3x5_loop:
 	VMOVDQU 32(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	VPXOR   Y6, Y7, Y0
 	VMOVDQU 64(CX), Y6
 	VMOVDQU 96(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	VPXOR   Y6, Y7, Y1
 	VMOVDQU 128(CX), Y6
 	VMOVDQU 160(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	VPXOR   Y6, Y7, Y2
 	VMOVDQU 192(CX), Y6
 	VMOVDQU 224(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	VPXOR   Y6, Y7, Y3
 	VMOVDQU 256(CX), Y6
 	VMOVDQU 288(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPXOR   Y6, Y7, Y4
 
 	// Load and process 32 bytes from input 1 to 5 outputs
 	VMOVDQU (SI), Y8
@@ -4105,32 +11529,27 @@ mulAvxTwo_3x5_loop:
 	VMOVDQU 352(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 384(CX), Y6
 	VMOVDQU 416(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 448(CX), Y6
 	VMOVDQU 480(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 512(CX), Y6
 	VMOVDQU 544(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 576(CX), Y6
 	VMOVDQU 608(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Load and process 32 bytes from input 2 to 5 outputs
 	VMOVDQU (DX), Y8
@@ -4142,32 +11561,27 @@ mulAvxTwo_3x5_loop:
 	VMOVDQU 672(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 704(CX), Y6
 	VMOVDQU 736(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 768(CX), Y6
 	VMOVDQU 800(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 832(CX), Y6
 	VMOVDQU 864(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 896(CX), Y6
 	VMOVDQU 928(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Store 5 outputs
 	VMOVDQU Y0, (R8)
@@ -4189,17 +11603,247 @@ mulAvxTwo_3x5_loop:
 mulAvxTwo_3x5_end:
 	RET
 
-// func mulAvxTwo_3x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_3x6(SB), NOSPLIT, $0-88
+// func mulGFNI_3x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x5_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), R10
+	MOVQ            96(SI), SI
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, SI
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, CX
+
+mulGFNI_3x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z20
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z20, Z15
+	VGF2P8AFFINEQB $0x00, Z1, Z20, Z16
+	VGF2P8AFFINEQB $0x00, Z2, Z20, Z17
+	VGF2P8AFFINEQB $0x00, Z3, Z20, Z18
+	VGF2P8AFFINEQB $0x00, Z4, Z20, Z19
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (BX), Z20
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z5, Z20, Z21
+	VXORPD         Z15, Z21, Z15
+	VGF2P8AFFINEQB $0x00, Z6, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z7, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z8, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z9, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (CX), Z20
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z10, Z20, Z21
+	VXORPD         Z15, Z21, Z15
+	VGF2P8AFFINEQB $0x00, Z11, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z12, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z13, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z14, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Store 5 outputs
+	VMOVDQU64 Z15, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z16, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z17, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z18, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z19, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x5_64_loop
+	VZEROUPPER
+
+mulGFNI_3x5_64_end:
+	RET
+
+// func mulGFNI_3x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x5_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), R10
+	MOVQ            96(SI), SI
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, SI
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, CX
+
+mulGFNI_3x5_64Xor_loop:
+	// Load 5 outputs
+	VMOVDQU64 (DI), Z15
+	VMOVDQU64 (R8), Z16
+	VMOVDQU64 (R9), Z17
+	VMOVDQU64 (R10), Z18
+	VMOVDQU64 (SI), Z19
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z20
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z20, Z21
+	VXORPD         Z15, Z21, Z15
+	VGF2P8AFFINEQB $0x00, Z1, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z2, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z3, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z4, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (BX), Z20
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z5, Z20, Z21
+	VXORPD         Z15, Z21, Z15
+	VGF2P8AFFINEQB $0x00, Z6, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z7, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z8, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z9, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (CX), Z20
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z10, Z20, Z21
+	VXORPD         Z15, Z21, Z15
+	VGF2P8AFFINEQB $0x00, Z11, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z12, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z13, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z14, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Store 5 outputs
+	VMOVDQU64 Z15, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z16, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z17, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z18, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z19, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x5Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 47 YMM used
+	// Full registers estimated 40 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x6_end
+	JZ    mulAvxTwo_3x5Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -4209,77 +11853,223 @@ TEXT Β·mulAvxTwo_3x6(SB), NOSPLIT, $0-88
 	MOVQ  24(DI), R9
 	MOVQ  48(DI), R10
 	MOVQ  72(DI), R11
-	MOVQ  96(DI), R12
-	MOVQ  120(DI), DI
-	MOVQ  start+72(FP), R13
+	MOVQ  96(DI), DI
+	MOVQ  start+72(FP), R12
 
 	// Add start offset to output
-	ADDQ R13, R8
-	ADDQ R13, R9
-	ADDQ R13, R10
-	ADDQ R13, R11
-	ADDQ R13, R12
-	ADDQ R13, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, DI
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X6
-	VPBROADCASTB X6, Y6
-
-mulAvxTwo_3x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X5
+	VPBROADCASTB X5, Y5
 
-	// Load and process 32 bytes from input 0 to 6 outputs
-	VMOVDQU (BX), Y9
+mulAvxTwo_3x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU (CX), Y7
-	VMOVDQU 32(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU (DI), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x5Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x5Xor_end:
+	RET
+
+// func mulAvxTwo_3x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x6(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 47 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x6_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), DI
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, DI
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_3x6_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y0
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPXOR   Y7, Y8, Y2
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y7, Y8, Y3
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPXOR   Y7, Y8, Y4
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPXOR   Y7, Y8, Y5
 
 	// Load and process 32 bytes from input 1 to 6 outputs
 	VMOVDQU (SI), Y9
@@ -4291,38 +12081,32 @@ mulAvxTwo_3x6_loop:
 	VMOVDQU 416(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 576(CX), Y7
 	VMOVDQU 608(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 640(CX), Y7
 	VMOVDQU 672(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 704(CX), Y7
 	VMOVDQU 736(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Load and process 32 bytes from input 2 to 6 outputs
 	VMOVDQU (DX), Y9
@@ -4334,38 +12118,32 @@ mulAvxTwo_3x6_loop:
 	VMOVDQU 800(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 832(CX), Y7
 	VMOVDQU 864(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 896(CX), Y7
 	VMOVDQU 928(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 960(CX), Y7
 	VMOVDQU 992(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 1024(CX), Y7
 	VMOVDQU 1056(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 1088(CX), Y7
 	VMOVDQU 1120(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Store 6 outputs
 	VMOVDQU Y0, (R8)
@@ -4389,8 +12167,444 @@ mulAvxTwo_3x6_loop:
 mulAvxTwo_3x6_end:
 	RET
 
+// func mulGFNI_3x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x6_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), R10
+	MOVQ            96(SI), R11
+	MOVQ            120(SI), SI
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, SI
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, CX
+
+mulGFNI_3x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (DX), Z24
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z24, Z18
+	VGF2P8AFFINEQB $0x00, Z1, Z24, Z19
+	VGF2P8AFFINEQB $0x00, Z2, Z24, Z20
+	VGF2P8AFFINEQB $0x00, Z3, Z24, Z21
+	VGF2P8AFFINEQB $0x00, Z4, Z24, Z22
+	VGF2P8AFFINEQB $0x00, Z5, Z24, Z23
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (BX), Z24
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z6, Z24, Z25
+	VXORPD         Z18, Z25, Z18
+	VGF2P8AFFINEQB $0x00, Z7, Z24, Z25
+	VXORPD         Z19, Z25, Z19
+	VGF2P8AFFINEQB $0x00, Z8, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z9, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (CX), Z24
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z12, Z24, Z25
+	VXORPD         Z18, Z25, Z18
+	VGF2P8AFFINEQB $0x00, Z13, Z24, Z25
+	VXORPD         Z19, Z25, Z19
+	VGF2P8AFFINEQB $0x00, Z14, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z15, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z16, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z17, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Store 6 outputs
+	VMOVDQU64 Z18, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z19, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z20, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z21, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z22, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z23, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x6_64_loop
+	VZEROUPPER
+
+mulGFNI_3x6_64_end:
+	RET
+
+// func mulGFNI_3x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x6_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), R10
+	MOVQ            96(SI), R11
+	MOVQ            120(SI), SI
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, SI
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, CX
+
+mulGFNI_3x6_64Xor_loop:
+	// Load 6 outputs
+	VMOVDQU64 (DI), Z18
+	VMOVDQU64 (R8), Z19
+	VMOVDQU64 (R9), Z20
+	VMOVDQU64 (R10), Z21
+	VMOVDQU64 (R11), Z22
+	VMOVDQU64 (SI), Z23
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (DX), Z24
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z24, Z25
+	VXORPD         Z18, Z25, Z18
+	VGF2P8AFFINEQB $0x00, Z1, Z24, Z25
+	VXORPD         Z19, Z25, Z19
+	VGF2P8AFFINEQB $0x00, Z2, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z3, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z4, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z5, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (BX), Z24
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z6, Z24, Z25
+	VXORPD         Z18, Z25, Z18
+	VGF2P8AFFINEQB $0x00, Z7, Z24, Z25
+	VXORPD         Z19, Z25, Z19
+	VGF2P8AFFINEQB $0x00, Z8, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z9, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (CX), Z24
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z12, Z24, Z25
+	VXORPD         Z18, Z25, Z18
+	VGF2P8AFFINEQB $0x00, Z13, Z24, Z25
+	VXORPD         Z19, Z25, Z19
+	VGF2P8AFFINEQB $0x00, Z14, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z15, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z16, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z17, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Store 6 outputs
+	VMOVDQU64 Z18, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z19, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z20, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z21, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z22, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z23, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x6_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x6_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x6Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 47 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), DI
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, DI
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_3x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU (R12), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU (DI), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x6Xor_end:
+	RET
+
 // func mulAvxTwo_3x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_3x7(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -4432,15 +12646,6 @@ TEXT Β·mulAvxTwo_3x7(SB), NOSPLIT, $0-88
 	VPBROADCASTB X7, Y7
 
 mulAvxTwo_3x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-
 	// Load and process 32 bytes from input 0 to 7 outputs
 	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
@@ -4451,44 +12656,37 @@ mulAvxTwo_3x7_loop:
 	VMOVDQU 32(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	VPXOR   Y8, Y9, Y0
 	VMOVDQU 64(CX), Y8
 	VMOVDQU 96(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	VPXOR   Y8, Y9, Y1
 	VMOVDQU 128(CX), Y8
 	VMOVDQU 160(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	VPXOR   Y8, Y9, Y2
 	VMOVDQU 192(CX), Y8
 	VMOVDQU 224(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	VPXOR   Y8, Y9, Y3
 	VMOVDQU 256(CX), Y8
 	VMOVDQU 288(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	VPXOR   Y8, Y9, Y4
 	VMOVDQU 320(CX), Y8
 	VMOVDQU 352(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	VPXOR   Y8, Y9, Y5
 	VMOVDQU 384(CX), Y8
 	VMOVDQU 416(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPXOR   Y8, Y9, Y6
 
 	// Load and process 32 bytes from input 1 to 7 outputs
 	VMOVDQU (SI), Y10
@@ -4500,44 +12698,37 @@ mulAvxTwo_3x7_loop:
 	VMOVDQU 480(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 512(CX), Y8
 	VMOVDQU 544(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 576(CX), Y8
 	VMOVDQU 608(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 640(CX), Y8
 	VMOVDQU 672(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 704(CX), Y8
 	VMOVDQU 736(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 768(CX), Y8
 	VMOVDQU 800(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 832(CX), Y8
 	VMOVDQU 864(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Load and process 32 bytes from input 2 to 7 outputs
 	VMOVDQU (DX), Y10
@@ -4549,44 +12740,37 @@ mulAvxTwo_3x7_loop:
 	VMOVDQU 928(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 960(CX), Y8
 	VMOVDQU 992(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 1024(CX), Y8
 	VMOVDQU 1056(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 1088(CX), Y8
 	VMOVDQU 1120(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 1152(CX), Y8
 	VMOVDQU 1184(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 1216(CX), Y8
 	VMOVDQU 1248(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 1280(CX), Y8
 	VMOVDQU 1312(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Store 7 outputs
 	VMOVDQU Y0, (R8)
@@ -4612,17 +12796,299 @@ mulAvxTwo_3x7_loop:
 mulAvxTwo_3x7_end:
 	RET
 
-// func mulAvxTwo_3x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_3x8(SB), NOSPLIT, $0-88
+// func mulGFNI_3x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x7_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 30 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), R10
+	MOVQ            96(SI), R11
+	MOVQ            120(SI), R12
+	MOVQ            144(SI), SI
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, SI
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, CX
+
+mulGFNI_3x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (DX), Z28
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z28, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z28, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z28, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z28, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z28, Z27
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (BX), Z28
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z7, Z28, Z29
+	VXORPD         Z21, Z29, Z21
+	VGF2P8AFFINEQB $0x00, Z8, Z28, Z29
+	VXORPD         Z22, Z29, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z28, Z29
+	VXORPD         Z23, Z29, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (CX), Z28
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z14, Z28, Z29
+	VXORPD         Z21, Z29, Z21
+	VGF2P8AFFINEQB $0x00, Z15, Z28, Z29
+	VXORPD         Z22, Z29, Z22
+	VGF2P8AFFINEQB $0x00, Z16, Z28, Z29
+	VXORPD         Z23, Z29, Z23
+	VGF2P8AFFINEQB $0x00, Z17, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z18, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z19, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z20, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Store 7 outputs
+	VMOVDQU64 Z21, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z22, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z23, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x7_64_loop
+	VZEROUPPER
+
+mulGFNI_3x7_64_end:
+	RET
+
+// func mulGFNI_3x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x7_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 30 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), CX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), R10
+	MOVQ            96(SI), R11
+	MOVQ            120(SI), R12
+	MOVQ            144(SI), SI
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, SI
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, CX
+
+mulGFNI_3x7_64Xor_loop:
+	// Load 7 outputs
+	VMOVDQU64 (DI), Z21
+	VMOVDQU64 (R8), Z22
+	VMOVDQU64 (R9), Z23
+	VMOVDQU64 (R10), Z24
+	VMOVDQU64 (R11), Z25
+	VMOVDQU64 (R12), Z26
+	VMOVDQU64 (SI), Z27
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (DX), Z28
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z28, Z29
+	VXORPD         Z21, Z29, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z28, Z29
+	VXORPD         Z22, Z29, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z28, Z29
+	VXORPD         Z23, Z29, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (BX), Z28
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z7, Z28, Z29
+	VXORPD         Z21, Z29, Z21
+	VGF2P8AFFINEQB $0x00, Z8, Z28, Z29
+	VXORPD         Z22, Z29, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z28, Z29
+	VXORPD         Z23, Z29, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (CX), Z28
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z14, Z28, Z29
+	VXORPD         Z21, Z29, Z21
+	VGF2P8AFFINEQB $0x00, Z15, Z28, Z29
+	VXORPD         Z22, Z29, Z22
+	VGF2P8AFFINEQB $0x00, Z16, Z28, Z29
+	VXORPD         Z23, Z29, Z23
+	VGF2P8AFFINEQB $0x00, Z17, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z18, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z19, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z20, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Store 7 outputs
+	VMOVDQU64 Z21, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z22, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z23, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x7_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x7_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x7Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 61 YMM used
+	// Full registers estimated 54 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x8_end
+	JZ    mulAvxTwo_3x7Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -4634,148 +13100,322 @@ TEXT Β·mulAvxTwo_3x8(SB), NOSPLIT, $0-88
 	MOVQ  72(DI), R11
 	MOVQ  96(DI), R12
 	MOVQ  120(DI), R13
-	MOVQ  144(DI), R14
-	MOVQ  168(DI), DI
-	MOVQ  start+72(FP), R15
+	MOVQ  144(DI), DI
+	MOVQ  start+72(FP), R14
 
 	// Add start offset to output
-	ADDQ R15, R8
-	ADDQ R15, R9
-	ADDQ R15, R10
-	ADDQ R15, R11
-	ADDQ R15, R12
-	ADDQ R15, R13
-	ADDQ R15, R14
-	ADDQ R15, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, DI
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, SI
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X8
-	VPBROADCASTB X8, Y8
-
-mulAvxTwo_3x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X7
+	VPBROADCASTB X7, Y7
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (BX), Y11
+mulAvxTwo_3x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU (R12), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU (R13), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU (DI), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 1 to 8 outputs
-	VMOVDQU (SI), Y11
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 512(CX), Y9
-	VMOVDQU 544(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 576(CX), Y9
-	VMOVDQU 608(CX), Y10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y6, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x7Xor_end:
+	RET
+
+// func mulAvxTwo_3x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x8(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 61 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x8_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), R13
+	MOVQ  144(DI), R14
+	MOVQ  168(DI), DI
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, DI
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_3x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 640(CX), Y9
 	VMOVDQU 672(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
 	VMOVDQU 704(CX), Y9
 	VMOVDQU 736(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 768(CX), Y9
 	VMOVDQU 800(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
 	VMOVDQU 832(CX), Y9
 	VMOVDQU 864(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 	VMOVDQU 896(CX), Y9
 	VMOVDQU 928(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
 	VMOVDQU 960(CX), Y9
 	VMOVDQU 992(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
 	// Load and process 32 bytes from input 2 to 8 outputs
 	VMOVDQU (DX), Y11
@@ -4787,50 +13427,42 @@ mulAvxTwo_3x8_loop:
 	VMOVDQU 1056(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	XOR3WAY( $0x00, Y9, Y10, Y0)
 	VMOVDQU 1088(CX), Y9
 	VMOVDQU 1120(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1152(CX), Y9
 	VMOVDQU 1184(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
 	VMOVDQU 1216(CX), Y9
 	VMOVDQU 1248(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1280(CX), Y9
 	VMOVDQU 1312(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
 	VMOVDQU 1344(CX), Y9
 	VMOVDQU 1376(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 	VMOVDQU 1408(CX), Y9
 	VMOVDQU 1440(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
 	VMOVDQU 1472(CX), Y9
 	VMOVDQU 1504(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
 	// Store 8 outputs
 	VMOVDQU Y0, (R8)
@@ -4858,8 +13490,532 @@ mulAvxTwo_3x8_loop:
 mulAvxTwo_3x8_end:
 	RET
 
+// func mulGFNI_3x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x8_64(SB), $0-88
+	// Loading 22 of 24 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), R12
+	MOVQ            120(DI), R13
+	MOVQ            144(DI), R14
+	MOVQ            168(DI), DI
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, DI
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DX
+
+mulGFNI_3x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	VMOVDQU64 Z22, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z23, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x8_64_loop
+	VZEROUPPER
+
+mulGFNI_3x8_64_end:
+	RET
+
+// func mulGFNI_3x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x8_64Xor(SB), $0-88
+	// Loading 22 of 24 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), R12
+	MOVQ            120(DI), R13
+	MOVQ            144(DI), R14
+	MOVQ            168(DI), DI
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, DI
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DX
+
+mulGFNI_3x8_64Xor_loop:
+	// Load 8 outputs
+	VMOVDQU64 (R8), Z22
+	VMOVDQU64 (R9), Z23
+	VMOVDQU64 (R10), Z24
+	VMOVDQU64 (R11), Z25
+	VMOVDQU64 (R12), Z26
+	VMOVDQU64 (R13), Z27
+	VMOVDQU64 (R14), Z28
+	VMOVDQU64 (DI), Z29
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	VMOVDQU64 Z22, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z23, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x8Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 61 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), R13
+	MOVQ  144(DI), R14
+	MOVQ  168(DI), DI
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, DI
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_3x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU (R12), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU (R13), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU (R14), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU (DI), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y6, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y7, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x8Xor_end:
+	RET
+
 // func mulAvxTwo_3x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_3x9(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -4905,17 +14061,6 @@ TEXT Β·mulAvxTwo_3x9(SB), NOSPLIT, $8-88
 	VPBROADCASTB X9, Y9
 
 mulAvxTwo_3x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-
 	// Load and process 32 bytes from input 0 to 9 outputs
 	VMOVDQU (BX), Y12
 	ADDQ    $0x20, BX
@@ -4926,56 +14071,47 @@ mulAvxTwo_3x9_loop:
 	VMOVDQU 32(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	VPXOR   Y10, Y11, Y0
 	VMOVDQU 64(CX), Y10
 	VMOVDQU 96(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	VPXOR   Y10, Y11, Y1
 	VMOVDQU 128(CX), Y10
 	VMOVDQU 160(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	VPXOR   Y10, Y11, Y2
 	VMOVDQU 192(CX), Y10
 	VMOVDQU 224(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	VPXOR   Y10, Y11, Y3
 	VMOVDQU 256(CX), Y10
 	VMOVDQU 288(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	VPXOR   Y10, Y11, Y4
 	VMOVDQU 320(CX), Y10
 	VMOVDQU 352(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	VPXOR   Y10, Y11, Y5
 	VMOVDQU 384(CX), Y10
 	VMOVDQU 416(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	VPXOR   Y10, Y11, Y6
 	VMOVDQU 448(CX), Y10
 	VMOVDQU 480(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	VPXOR   Y10, Y11, Y7
 	VMOVDQU 512(CX), Y10
 	VMOVDQU 544(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPXOR   Y10, Y11, Y8
 
 	// Load and process 32 bytes from input 1 to 9 outputs
 	VMOVDQU (SI), Y12
@@ -4987,56 +14123,47 @@ mulAvxTwo_3x9_loop:
 	VMOVDQU 608(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	XOR3WAY( $0x00, Y10, Y11, Y0)
 	VMOVDQU 640(CX), Y10
 	VMOVDQU 672(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y1)
 	VMOVDQU 704(CX), Y10
 	VMOVDQU 736(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	XOR3WAY( $0x00, Y10, Y11, Y2)
 	VMOVDQU 768(CX), Y10
 	VMOVDQU 800(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	XOR3WAY( $0x00, Y10, Y11, Y3)
 	VMOVDQU 832(CX), Y10
 	VMOVDQU 864(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	XOR3WAY( $0x00, Y10, Y11, Y4)
 	VMOVDQU 896(CX), Y10
 	VMOVDQU 928(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	XOR3WAY( $0x00, Y10, Y11, Y5)
 	VMOVDQU 960(CX), Y10
 	VMOVDQU 992(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	XOR3WAY( $0x00, Y10, Y11, Y6)
 	VMOVDQU 1024(CX), Y10
 	VMOVDQU 1056(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y10, Y11, Y7)
 	VMOVDQU 1088(CX), Y10
 	VMOVDQU 1120(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Load and process 32 bytes from input 2 to 9 outputs
 	VMOVDQU (DX), Y12
@@ -5048,56 +14175,47 @@ mulAvxTwo_3x9_loop:
 	VMOVDQU 1184(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	XOR3WAY( $0x00, Y10, Y11, Y0)
 	VMOVDQU 1216(CX), Y10
 	VMOVDQU 1248(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y1)
 	VMOVDQU 1280(CX), Y10
 	VMOVDQU 1312(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	XOR3WAY( $0x00, Y10, Y11, Y2)
 	VMOVDQU 1344(CX), Y10
 	VMOVDQU 1376(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	XOR3WAY( $0x00, Y10, Y11, Y3)
 	VMOVDQU 1408(CX), Y10
 	VMOVDQU 1440(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	XOR3WAY( $0x00, Y10, Y11, Y4)
 	VMOVDQU 1472(CX), Y10
 	VMOVDQU 1504(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	XOR3WAY( $0x00, Y10, Y11, Y5)
 	VMOVDQU 1536(CX), Y10
 	VMOVDQU 1568(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	XOR3WAY( $0x00, Y10, Y11, Y6)
 	VMOVDQU 1600(CX), Y10
 	VMOVDQU 1632(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y10, Y11, Y7)
 	VMOVDQU 1664(CX), Y10
 	VMOVDQU 1696(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Store 9 outputs
 	VMOVDQU Y0, (R8)
@@ -5127,36 +14245,356 @@ mulAvxTwo_3x9_loop:
 mulAvxTwo_3x9_end:
 	RET
 
-// func mulAvxTwo_3x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_3x10(SB), NOSPLIT, $8-88
+// func mulGFNI_3x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x9_64(SB), $8-88
+	// Loading 21 of 27 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), R12
+	MOVQ            120(DI), R13
+	MOVQ            144(DI), R14
+	MOVQ            168(DI), R15
+	MOVQ            192(DI), DI
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, DI
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DX
+
+mulGFNI_3x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	VMOVDQU64 Z21, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x9_64_loop
+	VZEROUPPER
+
+mulGFNI_3x9_64_end:
+	RET
+
+// func mulGFNI_3x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x9_64Xor(SB), $8-88
+	// Loading 21 of 27 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), R12
+	MOVQ            120(DI), R13
+	MOVQ            144(DI), R14
+	MOVQ            168(DI), R15
+	MOVQ            192(DI), DI
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, DI
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DX
+
+mulGFNI_3x9_64Xor_loop:
+	// Load 9 outputs
+	VMOVDQU64 (R8), Z21
+	VMOVDQU64 (R9), Z22
+	VMOVDQU64 (R10), Z23
+	VMOVDQU64 (R11), Z24
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (DI), Z29
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	VMOVDQU64 Z21, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_3x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x9Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 75 YMM used
+	// Full registers estimated 68 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x10_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), AX
-	MOVQ  out_base+48(FP), SI
-	MOVQ  (SI), DI
-	MOVQ  24(SI), R8
-	MOVQ  48(SI), R9
-	MOVQ  72(SI), R10
-	MOVQ  96(SI), R11
-	MOVQ  120(SI), R12
-	MOVQ  144(SI), R13
-	MOVQ  168(SI), R14
-	MOVQ  192(SI), R15
-	MOVQ  216(SI), SI
+	JZ    mulAvxTwo_3x9Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), R13
+	MOVQ  144(DI), R14
+	MOVQ  168(DI), R15
+	MOVQ  192(DI), DI
 	MOVQ  start+72(FP), BP
 
 	// Add start offset to output
-	ADDQ BP, DI
 	ADDQ BP, R8
 	ADDQ BP, R9
 	ADDQ BP, R10
@@ -5165,31 +14603,261 @@ TEXT Β·mulAvxTwo_3x10(SB), NOSPLIT, $8-88
 	ADDQ BP, R13
 	ADDQ BP, R14
 	ADDQ BP, R15
-	ADDQ BP, SI
+	ADDQ BP, DI
 
 	// Add start offset to input
-	ADDQ         BP, DX
 	ADDQ         BP, BX
-	ADDQ         BP, AX
+	ADDQ         BP, SI
+	ADDQ         BP, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X10
-	VPBROADCASTB X10, Y10
-	MOVQ         n+80(FP), BP
-	SHRQ         $0x05, BP
-
-mulAvxTwo_3x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	MOVQ         BP, X9
+	VPBROADCASTB X9, Y9
 
+mulAvxTwo_3x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU (R12), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU (R13), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU (R14), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU (R15), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU (DI), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y6, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y7, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y8, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x9Xor_end:
+	RET
+
+// func mulAvxTwo_3x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x10(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 75 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x10_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), AX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), R14
+	MOVQ  192(SI), R15
+	MOVQ  216(SI), SI
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, SI
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X10
+	VPBROADCASTB X10, Y10
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_3x10_loop:
 	// Load and process 32 bytes from input 0 to 10 outputs
 	VMOVDQU (DX), Y13
 	ADDQ    $0x20, DX
@@ -5200,62 +14868,52 @@ mulAvxTwo_3x10_loop:
 	VMOVDQU 32(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	VPXOR   Y11, Y12, Y0
 	VMOVDQU 64(CX), Y11
 	VMOVDQU 96(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	VPXOR   Y11, Y12, Y1
 	VMOVDQU 128(CX), Y11
 	VMOVDQU 160(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	VPXOR   Y11, Y12, Y2
 	VMOVDQU 192(CX), Y11
 	VMOVDQU 224(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	VPXOR   Y11, Y12, Y3
 	VMOVDQU 256(CX), Y11
 	VMOVDQU 288(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	VPXOR   Y11, Y12, Y4
 	VMOVDQU 320(CX), Y11
 	VMOVDQU 352(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	VPXOR   Y11, Y12, Y5
 	VMOVDQU 384(CX), Y11
 	VMOVDQU 416(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	VPXOR   Y11, Y12, Y6
 	VMOVDQU 448(CX), Y11
 	VMOVDQU 480(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	VPXOR   Y11, Y12, Y7
 	VMOVDQU 512(CX), Y11
 	VMOVDQU 544(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	VPXOR   Y11, Y12, Y8
 	VMOVDQU 576(CX), Y11
 	VMOVDQU 608(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPXOR   Y11, Y12, Y9
 
 	// Load and process 32 bytes from input 1 to 10 outputs
 	VMOVDQU (BX), Y13
@@ -5267,62 +14925,52 @@ mulAvxTwo_3x10_loop:
 	VMOVDQU 672(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 704(CX), Y11
 	VMOVDQU 736(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 768(CX), Y11
 	VMOVDQU 800(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 832(CX), Y11
 	VMOVDQU 864(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 896(CX), Y11
 	VMOVDQU 928(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 960(CX), Y11
 	VMOVDQU 992(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 1024(CX), Y11
 	VMOVDQU 1056(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 1088(CX), Y11
 	VMOVDQU 1120(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 1152(CX), Y11
 	VMOVDQU 1184(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 1216(CX), Y11
 	VMOVDQU 1248(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 2 to 10 outputs
 	VMOVDQU (AX), Y13
@@ -5334,62 +14982,52 @@ mulAvxTwo_3x10_loop:
 	VMOVDQU 1312(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 1344(CX), Y11
 	VMOVDQU 1376(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 1408(CX), Y11
 	VMOVDQU 1440(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 1472(CX), Y11
 	VMOVDQU 1504(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 1536(CX), Y11
 	VMOVDQU 1568(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 1600(CX), Y11
 	VMOVDQU 1632(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 1664(CX), Y11
 	VMOVDQU 1696(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 1728(CX), Y11
 	VMOVDQU 1760(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 1792(CX), Y11
 	VMOVDQU 1824(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 1856(CX), Y11
 	VMOVDQU 1888(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Store 10 outputs
 	VMOVDQU Y0, (DI)
@@ -5421,8 +15059,618 @@ mulAvxTwo_3x10_loop:
 mulAvxTwo_3x10_end:
 	RET
 
+// func mulGFNI_3x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x10_64(SB), $8-88
+	// Loading 20 of 30 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), AX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), R10
+	MOVQ            96(SI), R11
+	MOVQ            120(SI), R12
+	MOVQ            144(SI), R13
+	MOVQ            168(SI), R14
+	MOVQ            192(SI), R15
+	MOVQ            216(SI), SI
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, SI
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_3x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	VMOVDQU64 Z20, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z21, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_3x10_64_loop
+	VZEROUPPER
+
+mulGFNI_3x10_64_end:
+	RET
+
+// func mulGFNI_3x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_3x10_64Xor(SB), $8-88
+	// Loading 20 of 30 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_3x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), AX
+	MOVQ            out_base+48(FP), SI
+	MOVQ            out_base+48(FP), SI
+	MOVQ            (SI), DI
+	MOVQ            24(SI), R8
+	MOVQ            48(SI), R9
+	MOVQ            72(SI), R10
+	MOVQ            96(SI), R11
+	MOVQ            120(SI), R12
+	MOVQ            144(SI), R13
+	MOVQ            168(SI), R14
+	MOVQ            192(SI), R15
+	MOVQ            216(SI), SI
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, SI
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_3x10_64Xor_loop:
+	// Load 10 outputs
+	VMOVDQU64 (DI), Z20
+	VMOVDQU64 (R8), Z21
+	VMOVDQU64 (R9), Z22
+	VMOVDQU64 (R10), Z23
+	VMOVDQU64 (R11), Z24
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (SI), Z29
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	VMOVDQU64 Z20, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z21, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (SI)
+	ADDQ      $0x40, SI
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_3x10_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_3x10_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_3x10Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 75 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x10Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), AX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), R14
+	MOVQ  192(SI), R15
+	MOVQ  216(SI), SI
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, SI
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X10
+	VPBROADCASTB X10, Y10
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_3x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU (R12), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU (R13), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU (R14), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU (R15), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU (SI), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (AX), Y13
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 10 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y7, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y8, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y9, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_3x10Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x10Xor_end:
+	RET
+
 // func mulAvxTwo_4x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_4x1(SB), NOSPLIT, $0-88
 	// Loading all tables to registers
 	// Destination kept in GP registers
@@ -5462,9 +15710,6 @@ TEXT Β·mulAvxTwo_4x1(SB), NOSPLIT, $0-88
 	VPBROADCASTB X9, Y9
 
 mulAvxTwo_4x1_loop:
-	// Clear 1 outputs
-	VPXOR Y8, Y8, Y8
-
 	// Load and process 32 bytes from input 0 to 1 outputs
 	VMOVDQU (DX), Y10
 	ADDQ    $0x20, DX
@@ -5473,8 +15718,7 @@ mulAvxTwo_4x1_loop:
 	VPAND   Y9, Y11, Y11
 	VPSHUFB Y10, Y0, Y10
 	VPSHUFB Y11, Y1, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPXOR   Y10, Y11, Y8
 
 	// Load and process 32 bytes from input 1 to 1 outputs
 	VMOVDQU (BX), Y10
@@ -5484,8 +15728,7 @@ mulAvxTwo_4x1_loop:
 	VPAND   Y9, Y11, Y11
 	VPSHUFB Y10, Y2, Y10
 	VPSHUFB Y11, Y3, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Load and process 32 bytes from input 2 to 1 outputs
 	VMOVDQU (SI), Y10
@@ -5495,8 +15738,7 @@ mulAvxTwo_4x1_loop:
 	VPAND   Y9, Y11, Y11
 	VPSHUFB Y10, Y4, Y10
 	VPSHUFB Y11, Y5, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Load and process 32 bytes from input 3 to 1 outputs
 	VMOVDQU (CX), Y10
@@ -5506,8 +15748,7 @@ mulAvxTwo_4x1_loop:
 	VPAND   Y9, Y11, Y11
 	VPSHUFB Y10, Y6, Y10
 	VPSHUFB Y11, Y7, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Store 1 outputs
 	VMOVDQU Y8, (DI)
@@ -5522,46 +15763,44 @@ mulAvxTwo_4x1_end:
 	RET
 
 // func mulAvxTwo_4x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_4x1_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 12 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_4x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), AX
-	MOVQ  out_base+48(FP), DI
-	MOVQ  out_base+48(FP), DI
-	MOVQ  start+72(FP), R8
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R8
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
 
 	// Add start offset to input
-	ADDQ         R8, DX
-	ADDQ         R8, BX
-	ADDQ         R8, SI
-	ADDQ         R8, AX
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DI
+	ADDQ         R9, DX
 	MOVQ         $0x0000000f, R9
 	MOVQ         R9, X2
 	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R9
-	SHRQ         $0x06, R9
 
 mulAvxTwo_4x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
 	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
 	VPAND   Y2, Y5, Y5
@@ -5573,15 +15812,13 @@ mulAvxTwo_4x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
 
 	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -5594,12 +15831,356 @@ mulAvxTwo_4x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x1_64_loop
+	VZEROUPPER
+
+mulAvxTwo_4x1_64_end:
+	RET
+
+// func mulGFNI_4x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x1_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 7 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), DI
+	MOVQ            start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+
+	// Add start offset to input
+	ADDQ R8, DX
+	ADDQ R8, BX
+	ADDQ R8, SI
+	ADDQ R8, CX
+
+mulGFNI_4x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z5
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z5, Z4
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z5
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z5, Z5
+	VXORPD         Z4, Z5, Z4
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z5
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z5, Z5
+	VXORPD         Z4, Z5, Z4
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (CX), Z5
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z3, Z5, Z5
+	VXORPD         Z4, Z5, Z4
+
+	// Store 1 outputs
+	VMOVDQU64 Z4, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x1_64_loop
+	VZEROUPPER
+
+mulGFNI_4x1_64_end:
+	RET
+
+// func mulGFNI_4x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 7 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), DI
+	MOVQ            start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+
+	// Add start offset to input
+	ADDQ R8, DX
+	ADDQ R8, BX
+	ADDQ R8, SI
+	ADDQ R8, CX
+
+mulGFNI_4x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (DI), Z4
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z5
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z5, Z5
+	VXORPD         Z4, Z5, Z4
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z5
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z5, Z5
+	VXORPD         Z4, Z5, Z4
 
 	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z5
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z5, Z5
+	VXORPD         Z4, Z5, Z4
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (CX), Z5
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z3, Z5, Z5
+	VXORPD         Z4, Z5, Z4
+
+	// Store 1 outputs
+	VMOVDQU64 Z4, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_4x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 12 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_4x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), CX
+	MOVQ    out_base+48(FP), DI
+	MOVQ    (DI), DI
+	MOVQ    start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+
+	// Add start offset to input
+	ADDQ         R8, DX
+	ADDQ         R8, BX
+	ADDQ         R8, SI
+	ADDQ         R8, CX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_4x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VMOVDQU (DI), Y8
+	VPSHUFB Y10, Y0, Y10
+	VPSHUFB Y11, Y1, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y2, Y10
+	VPSHUFB Y11, Y3, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y4, Y10
+	VPSHUFB Y11, Y5, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (CX), Y10
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y6, Y10
+	VPSHUFB Y11, Y7, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 1 outputs
+	VMOVDQU Y8, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x1Xor_end:
+	RET
+
+// func mulAvxTwo_4x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x1_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x1_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R8
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_4x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (R8), Y0
+	VMOVDQU 32(R8), Y1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 1 to 1 outputs
 	VMOVDQU (SI), Y6
 	VMOVDQU 32(SI), Y5
 	ADDQ    $0x40, SI
@@ -5609,21 +16190,38 @@ mulAvxTwo_4x1_64_loop:
 	VPAND   Y2, Y5, Y5
 	VPAND   Y2, Y7, Y7
 	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU 128(CX), Y3
 	VMOVDQU 160(CX), Y4
 	VPSHUFB Y5, Y3, Y5
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
 	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -5636,27 +16234,24 @@ mulAvxTwo_4x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
 	// Store 1 outputs
-	MOVQ    (DI), R10
-	VMOVDQU Y0, (R10)(R8*1)
-	VMOVDQU Y1, 32(R10)(R8*1)
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
 
 	// Prepare for next loop
-	ADDQ $0x40, R8
-	DECQ R9
-	JNZ  mulAvxTwo_4x1_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x1_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_4x1_64_end:
+mulAvxTwo_4x1_64Xor_end:
 	RET
 
 // func mulAvxTwo_4x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_4x2(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -5690,10 +16285,6 @@ TEXT Β·mulAvxTwo_4x2(SB), NOSPLIT, $0-88
 	VPBROADCASTB X2, Y2
 
 mulAvxTwo_4x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
 	// Load and process 32 bytes from input 0 to 2 outputs
 	VMOVDQU (BX), Y5
 	ADDQ    $0x20, BX
@@ -5704,14 +16295,12 @@ mulAvxTwo_4x2_loop:
 	VMOVDQU 32(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	VPXOR   Y3, Y4, Y0
 	VMOVDQU 64(CX), Y3
 	VMOVDQU 96(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPXOR   Y3, Y4, Y1
 
 	// Load and process 32 bytes from input 1 to 2 outputs
 	VMOVDQU (SI), Y5
@@ -5723,14 +16312,12 @@ mulAvxTwo_4x2_loop:
 	VMOVDQU 160(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 192(CX), Y3
 	VMOVDQU 224(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 2 to 2 outputs
 	VMOVDQU (DI), Y5
@@ -5742,14 +16329,12 @@ mulAvxTwo_4x2_loop:
 	VMOVDQU 288(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 320(CX), Y3
 	VMOVDQU 352(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 3 to 2 outputs
 	VMOVDQU (DX), Y5
@@ -5761,14 +16346,12 @@ mulAvxTwo_4x2_loop:
 	VMOVDQU 416(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 448(CX), Y3
 	VMOVDQU 480(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Store 2 outputs
 	VMOVDQU Y0, (R9)
@@ -5785,47 +16368,45 @@ mulAvxTwo_4x2_end:
 	RET
 
 // func mulAvxTwo_4x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_4x2_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 23 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 41 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_4x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), AX
-	MOVQ  out_base+48(FP), DI
-	MOVQ  out_base+48(FP), DI
-	MOVQ  start+72(FP), R8
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R8
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+	ADDQ R10, R8
 
 	// Add start offset to input
-	ADDQ         R8, DX
-	ADDQ         R8, BX
-	ADDQ         R8, SI
-	ADDQ         R8, AX
-	MOVQ         $0x0000000f, R9
-	MOVQ         R9, X4
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R9
-	SHRQ         $0x06, R9
 
 mulAvxTwo_4x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -5838,25 +16419,21 @@ mulAvxTwo_4x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
 
 	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -5869,25 +16446,21 @@ mulAvxTwo_4x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -5900,25 +16473,21 @@ mulAvxTwo_4x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 320(CX), Y5
 	VMOVDQU 352(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 3 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -5931,49 +16500,220 @@ mulAvxTwo_4x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 448(CX), Y5
 	VMOVDQU 480(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Store 2 outputs
-	MOVQ    (DI), R10
-	VMOVDQU Y0, (R10)(R8*1)
-	VMOVDQU Y1, 32(R10)(R8*1)
-	MOVQ    24(DI), R10
-	VMOVDQU Y2, (R10)(R8*1)
-	VMOVDQU Y3, 32(R10)(R8*1)
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y2, (R8)
+	VMOVDQU Y3, 32(R8)
+	ADDQ    $0x40, R8
 
 	// Prepare for next loop
-	ADDQ $0x40, R8
-	DECQ R9
+	DECQ AX
 	JNZ  mulAvxTwo_4x2_64_loop
 	VZEROUPPER
 
 mulAvxTwo_4x2_64_end:
 	RET
 
-// func mulAvxTwo_4x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_4x3(SB), NOSPLIT, $0-88
+// func mulGFNI_4x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x2_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 12 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), DI
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+	ADDQ R9, DI
+
+	// Add start offset to input
+	ADDQ R9, DX
+	ADDQ R9, BX
+	ADDQ R9, SI
+	ADDQ R9, CX
+
+mulGFNI_4x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z10
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z10, Z8
+	VGF2P8AFFINEQB $0x00, Z1, Z10, Z9
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z10
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z10, Z11
+	VXORPD         Z8, Z11, Z8
+	VGF2P8AFFINEQB $0x00, Z3, Z10, Z11
+	VXORPD         Z9, Z11, Z9
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z10
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z10, Z11
+	VXORPD         Z8, Z11, Z8
+	VGF2P8AFFINEQB $0x00, Z5, Z10, Z11
+	VXORPD         Z9, Z11, Z9
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (CX), Z10
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z6, Z10, Z11
+	VXORPD         Z8, Z11, Z8
+	VGF2P8AFFINEQB $0x00, Z7, Z10, Z11
+	VXORPD         Z9, Z11, Z9
+
+	// Store 2 outputs
+	VMOVDQU64 Z8, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z9, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x2_64_loop
+	VZEROUPPER
+
+mulGFNI_4x2_64_end:
+	RET
+
+// func mulGFNI_4x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 12 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), DI
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+	ADDQ R9, DI
+
+	// Add start offset to input
+	ADDQ R9, DX
+	ADDQ R9, BX
+	ADDQ R9, SI
+	ADDQ R9, CX
+
+mulGFNI_4x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (R8), Z8
+	VMOVDQU64 (DI), Z9
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z10
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z10, Z11
+	VXORPD         Z8, Z11, Z8
+	VGF2P8AFFINEQB $0x00, Z1, Z10, Z11
+	VXORPD         Z9, Z11, Z9
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z10
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z10, Z11
+	VXORPD         Z8, Z11, Z8
+	VGF2P8AFFINEQB $0x00, Z3, Z10, Z11
+	VXORPD         Z9, Z11, Z9
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z10
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z10, Z11
+	VXORPD         Z8, Z11, Z8
+	VGF2P8AFFINEQB $0x00, Z5, Z10, Z11
+	VXORPD         Z9, Z11, Z9
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (CX), Z10
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z6, Z10, Z11
+	VXORPD         Z8, Z11, Z8
+	VGF2P8AFFINEQB $0x00, Z7, Z10, Z11
+	VXORPD         Z9, Z11, Z9
+
+	// Store 2 outputs
+	VMOVDQU64 Z8, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z9, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x2_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_4x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x2Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 32 YMM used
+	// Full registers estimated 23 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_4x3_end
+	JZ    mulAvxTwo_4x2Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -5981,129 +16721,397 @@ TEXT Β·mulAvxTwo_4x3(SB), NOSPLIT, $0-88
 	MOVQ  72(DX), DX
 	MOVQ  out_base+48(FP), R8
 	MOVQ  (R8), R9
-	MOVQ  24(R8), R10
-	MOVQ  48(R8), R8
-	MOVQ  start+72(FP), R11
+	MOVQ  24(R8), R8
+	MOVQ  start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R11, R9
-	ADDQ R11, R10
-	ADDQ R11, R8
+	ADDQ R10, R9
+	ADDQ R10, R8
 
 	// Add start offset to input
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DI
-	ADDQ         R11, DX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X3
-	VPBROADCASTB X3, Y3
-
-mulAvxTwo_4x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X2
+	VPBROADCASTB X2, Y2
 
-	// Load and process 32 bytes from input 0 to 3 outputs
-	VMOVDQU (BX), Y6
+mulAvxTwo_4x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU (CX), Y4
-	VMOVDQU 32(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 64(CX), Y4
-	VMOVDQU 96(CX), Y5
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 1 to 3 outputs
-	VMOVDQU (SI), Y6
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 192(CX), Y4
-	VMOVDQU 224(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 256(CX), Y4
-	VMOVDQU 288(CX), Y5
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 320(CX), Y4
-	VMOVDQU 352(CX), Y5
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 2 to 3 outputs
-	VMOVDQU (DI), Y6
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 384(CX), Y4
-	VMOVDQU 416(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 448(CX), Y4
-	VMOVDQU 480(CX), Y5
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 512(CX), Y4
-	VMOVDQU 544(CX), Y5
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 3 to 3 outputs
-	VMOVDQU (DX), Y6
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (DX), Y5
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 576(CX), Y4
-	VMOVDQU 608(CX), Y5
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x2Xor_end:
+	RET
+
+// func mulAvxTwo_4x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 41 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R8
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+	ADDQ R10, R8
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_4x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R9), Y0
+	VMOVDQU 32(R9), Y1
+	VMOVDQU (R8), Y2
+	VMOVDQU 32(R8), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y2, (R8)
+	VMOVDQU Y3, 32(R8)
+	ADDQ    $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x2_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x3(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 32 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x3_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R8
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, R8
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_4x3_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y0
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y1
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y2
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 640(CX), Y4
 	VMOVDQU 672(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 704(CX), Y4
 	VMOVDQU 736(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Store 3 outputs
 	VMOVDQU Y0, (R9)
@@ -6122,49 +17130,47 @@ mulAvxTwo_4x3_end:
 	RET
 
 // func mulAvxTwo_4x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_4x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 32 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 58 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_4x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), AX
-	MOVQ  out_base+48(FP), DI
-	MOVQ  out_base+48(FP), DI
-	MOVQ  start+72(FP), R8
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R8
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, R8
 
 	// Add start offset to input
-	ADDQ         R8, DX
-	ADDQ         R8, BX
-	ADDQ         R8, SI
-	ADDQ         R8, AX
-	MOVQ         $0x0000000f, R9
-	MOVQ         R9, X6
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R9
-	SHRQ         $0x06, R9
 
 mulAvxTwo_4x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -6177,35 +17183,29 @@ mulAvxTwo_4x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -6218,35 +17218,29 @@ mulAvxTwo_4x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 2 to 3 outputs
-	VMOVDQU (SI), Y11
-	VMOVDQU 32(SI), Y13
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -6259,35 +17253,29 @@ mulAvxTwo_4x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 3 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -6300,62 +17288,263 @@ mulAvxTwo_4x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 640(CX), Y7
 	VMOVDQU 672(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 704(CX), Y7
 	VMOVDQU 736(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Store 3 outputs
-	MOVQ    (DI), R10
-	VMOVDQU Y0, (R10)(R8*1)
-	VMOVDQU Y1, 32(R10)(R8*1)
-	MOVQ    24(DI), R10
-	VMOVDQU Y2, (R10)(R8*1)
-	VMOVDQU Y3, 32(R10)(R8*1)
-	MOVQ    48(DI), R10
-	VMOVDQU Y4, (R10)(R8*1)
-	VMOVDQU Y5, 32(R10)(R8*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, R8
-	DECQ R9
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y2, (R10)
+	VMOVDQU Y3, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y5, 32(R8)
+	ADDQ    $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
 	JNZ  mulAvxTwo_4x3_64_loop
 	VZEROUPPER
 
 mulAvxTwo_4x3_64_end:
 	RET
 
-// func mulAvxTwo_4x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_4x4(SB), NOSPLIT, $0-88
+// func mulGFNI_4x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x3_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), DI
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DI
+
+	// Add start offset to input
+	ADDQ R10, DX
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, CX
+
+mulGFNI_4x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z15
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z1, Z15, Z13
+	VGF2P8AFFINEQB $0x00, Z2, Z15, Z14
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z15
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z4, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z5, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z15
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z7, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z8, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (CX), Z15
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z9, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z10, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z11, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Store 3 outputs
+	VMOVDQU64 Z12, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z13, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z14, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x3_64_loop
+	VZEROUPPER
+
+mulGFNI_4x3_64_end:
+	RET
+
+// func mulGFNI_4x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x3_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), DI
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DI
+
+	// Add start offset to input
+	ADDQ R10, DX
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, CX
+
+mulGFNI_4x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (R8), Z12
+	VMOVDQU64 (R9), Z13
+	VMOVDQU64 (DI), Z14
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z15
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z1, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z2, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z15
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z4, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z5, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z15
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z7, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z8, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (CX), Z15
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z9, Z15, Z16
+	VXORPD         Z12, Z16, Z12
+	VGF2P8AFFINEQB $0x00, Z10, Z15, Z16
+	VXORPD         Z13, Z16, Z13
+	VGF2P8AFFINEQB $0x00, Z11, Z15, Z16
+	VXORPD         Z14, Z16, Z14
+
+	// Store 3 outputs
+	VMOVDQU64 Z12, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z13, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z14, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x3_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_4x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x3Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 41 YMM used
+	// Full registers estimated 32 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_4x4_end
+	JZ    mulAvxTwo_4x3Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -6364,124 +17553,455 @@ TEXT Β·mulAvxTwo_4x4(SB), NOSPLIT, $0-88
 	MOVQ  out_base+48(FP), R8
 	MOVQ  (R8), R9
 	MOVQ  24(R8), R10
-	MOVQ  48(R8), R11
-	MOVQ  72(R8), R8
-	MOVQ  start+72(FP), R12
+	MOVQ  48(R8), R8
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to output
-	ADDQ R12, R9
-	ADDQ R12, R10
-	ADDQ R12, R11
-	ADDQ R12, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, R8
 
 	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X4
-	VPBROADCASTB X4, Y4
-
-mulAvxTwo_4x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X3
+	VPBROADCASTB X3, Y3
 
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (BX), Y7
+mulAvxTwo_4x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU (R8), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 1 to 4 outputs
-	VMOVDQU (SI), Y7
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 2 to 4 outputs
-	VMOVDQU (DI), Y7
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x3Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x3Xor_end:
+	RET
+
+// func mulAvxTwo_4x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x3_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 58 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R8
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, R8
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_4x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R9), Y0
+	VMOVDQU 32(R9), Y1
+	VMOVDQU (R10), Y2
+	VMOVDQU 32(R10), Y3
+	VMOVDQU (R8), Y4
+	VMOVDQU 32(R8), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y2, (R10)
+	VMOVDQU Y3, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y5, 32(R8)
+	ADDQ    $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x4(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 41 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R8
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R8
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_4x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
 	VMOVDQU 704(CX), Y5
 	VMOVDQU 736(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
 	// Load and process 32 bytes from input 3 to 4 outputs
 	VMOVDQU (DX), Y7
@@ -6493,26 +18013,22 @@ mulAvxTwo_4x4_loop:
 	VMOVDQU 800(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	XOR3WAY( $0x00, Y5, Y6, Y0)
 	VMOVDQU 832(CX), Y5
 	VMOVDQU 864(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 	VMOVDQU 896(CX), Y5
 	VMOVDQU 928(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	XOR3WAY( $0x00, Y5, Y6, Y2)
 	VMOVDQU 960(CX), Y5
 	VMOVDQU 992(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
 	// Store 4 outputs
 	VMOVDQU Y0, (R9)
@@ -6532,8 +18048,417 @@ mulAvxTwo_4x4_loop:
 mulAvxTwo_4x4_end:
 	RET
 
+// func mulGFNI_4x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x4_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), DI
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, DI
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, CX
+
+mulGFNI_4x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z20
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z20, Z16
+	VGF2P8AFFINEQB $0x00, Z1, Z20, Z17
+	VGF2P8AFFINEQB $0x00, Z2, Z20, Z18
+	VGF2P8AFFINEQB $0x00, Z3, Z20, Z19
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z20
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z5, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z6, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z7, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (SI), Z20
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z9, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z10, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z11, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (CX), Z20
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z12, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z13, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z14, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z15, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Store 4 outputs
+	VMOVDQU64 Z16, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z17, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z18, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z19, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x4_64_loop
+	VZEROUPPER
+
+mulGFNI_4x4_64_end:
+	RET
+
+// func mulGFNI_4x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x4_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), DI
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, DI
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, CX
+
+mulGFNI_4x4_64Xor_loop:
+	// Load 4 outputs
+	VMOVDQU64 (R8), Z16
+	VMOVDQU64 (R9), Z17
+	VMOVDQU64 (R10), Z18
+	VMOVDQU64 (DI), Z19
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z20
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z1, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z2, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z3, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z20
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z5, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z6, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z7, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (SI), Z20
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z9, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z10, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z11, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (CX), Z20
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z12, Z20, Z21
+	VXORPD         Z16, Z21, Z16
+	VGF2P8AFFINEQB $0x00, Z13, Z20, Z21
+	VXORPD         Z17, Z21, Z17
+	VGF2P8AFFINEQB $0x00, Z14, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z15, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Store 4 outputs
+	VMOVDQU64 Z16, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z17, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z18, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z19, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x4_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_4x4_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x4Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 41 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x4Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R8
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R8
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_4x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU (R8), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x4Xor_end:
+	RET
+
 // func mulAvxTwo_4x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_4x5(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -6573,13 +18498,6 @@ TEXT Β·mulAvxTwo_4x5(SB), NOSPLIT, $0-88
 	VPBROADCASTB X5, Y5
 
 mulAvxTwo_4x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-
 	// Load and process 32 bytes from input 0 to 5 outputs
 	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
@@ -6590,32 +18508,27 @@ mulAvxTwo_4x5_loop:
 	VMOVDQU 32(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	VPXOR   Y6, Y7, Y0
 	VMOVDQU 64(CX), Y6
 	VMOVDQU 96(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	VPXOR   Y6, Y7, Y1
 	VMOVDQU 128(CX), Y6
 	VMOVDQU 160(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	VPXOR   Y6, Y7, Y2
 	VMOVDQU 192(CX), Y6
 	VMOVDQU 224(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	VPXOR   Y6, Y7, Y3
 	VMOVDQU 256(CX), Y6
 	VMOVDQU 288(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPXOR   Y6, Y7, Y4
 
 	// Load and process 32 bytes from input 1 to 5 outputs
 	VMOVDQU (SI), Y8
@@ -6627,32 +18540,27 @@ mulAvxTwo_4x5_loop:
 	VMOVDQU 352(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 384(CX), Y6
 	VMOVDQU 416(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 448(CX), Y6
 	VMOVDQU 480(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 512(CX), Y6
 	VMOVDQU 544(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 576(CX), Y6
 	VMOVDQU 608(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Load and process 32 bytes from input 2 to 5 outputs
 	VMOVDQU (DI), Y8
@@ -6664,32 +18572,27 @@ mulAvxTwo_4x5_loop:
 	VMOVDQU 672(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 704(CX), Y6
 	VMOVDQU 736(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 768(CX), Y6
 	VMOVDQU 800(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 832(CX), Y6
 	VMOVDQU 864(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 896(CX), Y6
 	VMOVDQU 928(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Load and process 32 bytes from input 3 to 5 outputs
 	VMOVDQU (DX), Y8
@@ -6701,32 +18604,27 @@ mulAvxTwo_4x5_loop:
 	VMOVDQU 992(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 1024(CX), Y6
 	VMOVDQU 1056(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 1088(CX), Y6
 	VMOVDQU 1120(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 1152(CX), Y6
 	VMOVDQU 1184(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 1216(CX), Y6
 	VMOVDQU 1248(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Store 5 outputs
 	VMOVDQU Y0, (R9)
@@ -6748,30 +18646,496 @@ mulAvxTwo_4x5_loop:
 mulAvxTwo_4x5_end:
 	RET
 
-// func mulAvxTwo_4x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_4x6(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+// func mulGFNI_4x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x5_64(SB), $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 59 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_4x6_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), DX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  (R8), R9
-	MOVQ  24(R8), R10
-	MOVQ  48(R8), R11
-	MOVQ  72(R8), R12
-	MOVQ  96(R8), R13
-	MOVQ  120(R8), R8
-	MOVQ  start+72(FP), R14
+	// Full registers estimated 27 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), DI
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, DI
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, CX
+
+mulGFNI_4x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z25
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z25, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z25, Z24
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (BX), Z25
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z5, Z25, Z26
+	VXORPD         Z20, Z26, Z20
+	VGF2P8AFFINEQB $0x00, Z6, Z25, Z26
+	VXORPD         Z21, Z26, Z21
+	VGF2P8AFFINEQB $0x00, Z7, Z25, Z26
+	VXORPD         Z22, Z26, Z22
+	VGF2P8AFFINEQB $0x00, Z8, Z25, Z26
+	VXORPD         Z23, Z26, Z23
+	VGF2P8AFFINEQB $0x00, Z9, Z25, Z26
+	VXORPD         Z24, Z26, Z24
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (SI), Z25
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z25, Z26
+	VXORPD         Z20, Z26, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z25, Z26
+	VXORPD         Z21, Z26, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z25, Z26
+	VXORPD         Z22, Z26, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z25, Z26
+	VXORPD         Z23, Z26, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z25, Z26
+	VXORPD         Z24, Z26, Z24
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (CX), Z25
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z15, Z25, Z26
+	VXORPD         Z20, Z26, Z20
+	VGF2P8AFFINEQB $0x00, Z16, Z25, Z26
+	VXORPD         Z21, Z26, Z21
+	VGF2P8AFFINEQB $0x00, Z17, Z25, Z26
+	VXORPD         Z22, Z26, Z22
+	VGF2P8AFFINEQB $0x00, Z18, Z25, Z26
+	VXORPD         Z23, Z26, Z23
+	VGF2P8AFFINEQB $0x00, Z19, Z25, Z26
+	VXORPD         Z24, Z26, Z24
+
+	// Store 5 outputs
+	VMOVDQU64 Z20, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z21, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z22, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z23, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z24, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x5_64_loop
+	VZEROUPPER
+
+mulGFNI_4x5_64_end:
+	RET
+
+// func mulGFNI_4x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x5_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 27 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), DI
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, DI
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, CX
+
+mulGFNI_4x5_64Xor_loop:
+	// Load 5 outputs
+	VMOVDQU64 (R8), Z20
+	VMOVDQU64 (R9), Z21
+	VMOVDQU64 (R10), Z22
+	VMOVDQU64 (R11), Z23
+	VMOVDQU64 (DI), Z24
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z25
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z25, Z26
+	VXORPD         Z20, Z26, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z25, Z26
+	VXORPD         Z21, Z26, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z25, Z26
+	VXORPD         Z22, Z26, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z25, Z26
+	VXORPD         Z23, Z26, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z25, Z26
+	VXORPD         Z24, Z26, Z24
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (BX), Z25
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z5, Z25, Z26
+	VXORPD         Z20, Z26, Z20
+	VGF2P8AFFINEQB $0x00, Z6, Z25, Z26
+	VXORPD         Z21, Z26, Z21
+	VGF2P8AFFINEQB $0x00, Z7, Z25, Z26
+	VXORPD         Z22, Z26, Z22
+	VGF2P8AFFINEQB $0x00, Z8, Z25, Z26
+	VXORPD         Z23, Z26, Z23
+	VGF2P8AFFINEQB $0x00, Z9, Z25, Z26
+	VXORPD         Z24, Z26, Z24
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (SI), Z25
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z25, Z26
+	VXORPD         Z20, Z26, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z25, Z26
+	VXORPD         Z21, Z26, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z25, Z26
+	VXORPD         Z22, Z26, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z25, Z26
+	VXORPD         Z23, Z26, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z25, Z26
+	VXORPD         Z24, Z26, Z24
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (CX), Z25
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z15, Z25, Z26
+	VXORPD         Z20, Z26, Z20
+	VGF2P8AFFINEQB $0x00, Z16, Z25, Z26
+	VXORPD         Z21, Z26, Z21
+	VGF2P8AFFINEQB $0x00, Z17, Z25, Z26
+	VXORPD         Z22, Z26, Z22
+	VGF2P8AFFINEQB $0x00, Z18, Z25, Z26
+	VXORPD         Z23, Z26, Z23
+	VGF2P8AFFINEQB $0x00, Z19, Z25, Z26
+	VXORPD         Z24, Z26, Z24
+
+	// Store 5 outputs
+	VMOVDQU64 Z20, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z21, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z22, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z23, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z24, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_4x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x5Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R8
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R8
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_4x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU (R8), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x5Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x5Xor_end:
+	RET
+
+// func mulAvxTwo_4x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x6(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 59 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x6_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R8
+	MOVQ  start+72(FP), R14
 
 	// Add start offset to output
 	ADDQ R14, R9
@@ -6791,14 +19155,6 @@ TEXT Β·mulAvxTwo_4x6(SB), NOSPLIT, $0-88
 	VPBROADCASTB X6, Y6
 
 mulAvxTwo_4x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 32 bytes from input 0 to 6 outputs
 	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
@@ -6809,38 +19165,32 @@ mulAvxTwo_4x6_loop:
 	VMOVDQU 32(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPXOR   Y7, Y8, Y0
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPXOR   Y7, Y8, Y2
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y7, Y8, Y3
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPXOR   Y7, Y8, Y4
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPXOR   Y7, Y8, Y5
 
 	// Load and process 32 bytes from input 1 to 6 outputs
 	VMOVDQU (SI), Y9
@@ -6852,38 +19202,32 @@ mulAvxTwo_4x6_loop:
 	VMOVDQU 416(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 576(CX), Y7
 	VMOVDQU 608(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 640(CX), Y7
 	VMOVDQU 672(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 704(CX), Y7
 	VMOVDQU 736(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Load and process 32 bytes from input 2 to 6 outputs
 	VMOVDQU (DI), Y9
@@ -6895,38 +19239,32 @@ mulAvxTwo_4x6_loop:
 	VMOVDQU 800(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 832(CX), Y7
 	VMOVDQU 864(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 896(CX), Y7
 	VMOVDQU 928(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 960(CX), Y7
 	VMOVDQU 992(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 1024(CX), Y7
 	VMOVDQU 1056(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 1088(CX), Y7
 	VMOVDQU 1120(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Load and process 32 bytes from input 3 to 6 outputs
 	VMOVDQU (DX), Y9
@@ -6938,38 +19276,32 @@ mulAvxTwo_4x6_loop:
 	VMOVDQU 1184(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 1216(CX), Y7
 	VMOVDQU 1248(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 1280(CX), Y7
 	VMOVDQU 1312(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 1344(CX), Y7
 	VMOVDQU 1376(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 1408(CX), Y7
 	VMOVDQU 1440(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 1472(CX), Y7
 	VMOVDQU 1504(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Store 6 outputs
 	VMOVDQU Y0, (R9)
@@ -6993,8 +19325,531 @@ mulAvxTwo_4x6_loop:
 mulAvxTwo_4x6_end:
 	RET
 
+// func mulGFNI_4x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x6_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 32 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), R12
+	MOVQ            120(DI), DI
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, DI
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, CX
+
+mulGFNI_4x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (CX), Z30
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Store 6 outputs
+	VMOVDQU64 Z24, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z25, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z26, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z27, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z28, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z29, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x6_64_loop
+	VZEROUPPER
+
+mulGFNI_4x6_64_end:
+	RET
+
+// func mulGFNI_4x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x6_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 32 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), CX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), R12
+	MOVQ            120(DI), DI
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, DI
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, CX
+
+mulGFNI_4x6_64Xor_loop:
+	// Load 6 outputs
+	VMOVDQU64 (R8), Z24
+	VMOVDQU64 (R9), Z25
+	VMOVDQU64 (R10), Z26
+	VMOVDQU64 (R11), Z27
+	VMOVDQU64 (R12), Z28
+	VMOVDQU64 (DI), Z29
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (CX), Z30
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Store 6 outputs
+	VMOVDQU64 Z24, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z25, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z26, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z27, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z28, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z29, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x6_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_4x6_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x6Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 59 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R8
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R8
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_4x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU (R13), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU (R8), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y5, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x6Xor_end:
+	RET
+
 // func mulAvxTwo_4x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
 TEXT Β·mulAvxTwo_4x7(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
@@ -7038,15 +19893,6 @@ TEXT Β·mulAvxTwo_4x7(SB), NOSPLIT, $0-88
 	VPBROADCASTB X7, Y7
 
 mulAvxTwo_4x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-
 	// Load and process 32 bytes from input 0 to 7 outputs
 	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
@@ -7057,44 +19903,37 @@ mulAvxTwo_4x7_loop:
 	VMOVDQU 32(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	VPXOR   Y8, Y9, Y0
 	VMOVDQU 64(CX), Y8
 	VMOVDQU 96(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	VPXOR   Y8, Y9, Y1
 	VMOVDQU 128(CX), Y8
 	VMOVDQU 160(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	VPXOR   Y8, Y9, Y2
 	VMOVDQU 192(CX), Y8
 	VMOVDQU 224(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	VPXOR   Y8, Y9, Y3
 	VMOVDQU 256(CX), Y8
 	VMOVDQU 288(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	VPXOR   Y8, Y9, Y4
 	VMOVDQU 320(CX), Y8
 	VMOVDQU 352(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	VPXOR   Y8, Y9, Y5
 	VMOVDQU 384(CX), Y8
 	VMOVDQU 416(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPXOR   Y8, Y9, Y6
 
 	// Load and process 32 bytes from input 1 to 7 outputs
 	VMOVDQU (SI), Y10
@@ -7106,44 +19945,37 @@ mulAvxTwo_4x7_loop:
 	VMOVDQU 480(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 512(CX), Y8
 	VMOVDQU 544(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 576(CX), Y8
 	VMOVDQU 608(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 640(CX), Y8
 	VMOVDQU 672(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 704(CX), Y8
 	VMOVDQU 736(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 768(CX), Y8
 	VMOVDQU 800(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 832(CX), Y8
 	VMOVDQU 864(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Load and process 32 bytes from input 2 to 7 outputs
 	VMOVDQU (DI), Y10
@@ -7155,44 +19987,37 @@ mulAvxTwo_4x7_loop:
 	VMOVDQU 928(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 960(CX), Y8
 	VMOVDQU 992(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 1024(CX), Y8
 	VMOVDQU 1056(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 1088(CX), Y8
 	VMOVDQU 1120(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 1152(CX), Y8
 	VMOVDQU 1184(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 1216(CX), Y8
 	VMOVDQU 1248(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 1280(CX), Y8
 	VMOVDQU 1312(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Load and process 32 bytes from input 3 to 7 outputs
 	VMOVDQU (DX), Y10
@@ -7204,44 +20029,37 @@ mulAvxTwo_4x7_loop:
 	VMOVDQU 1376(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 1408(CX), Y8
 	VMOVDQU 1440(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 1472(CX), Y8
 	VMOVDQU 1504(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 1536(CX), Y8
 	VMOVDQU 1568(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 1600(CX), Y8
 	VMOVDQU 1632(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 1664(CX), Y8
 	VMOVDQU 1696(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 1728(CX), Y8
 	VMOVDQU 1760(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Store 7 outputs
 	VMOVDQU Y0, (R9)
@@ -7267,63 +20085,623 @@ mulAvxTwo_4x7_loop:
 mulAvxTwo_4x7_end:
 	RET
 
-// func mulAvxTwo_4x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_4x8(SB), NOSPLIT, $8-88
-	// Loading no tables to registers
+// func mulGFNI_4x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x7_64(SB), $0-88
+	// Loading 23 of 28 tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 77 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_4x8_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), DX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  (R8), R9
-	MOVQ  24(R8), R10
-	MOVQ  48(R8), R11
-	MOVQ  72(R8), R12
-	MOVQ  96(R8), R13
-	MOVQ  120(R8), R14
-	MOVQ  144(R8), R15
-	MOVQ  168(R8), R8
-	MOVQ  start+72(FP), BP
+	// Full registers estimated 37 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), DX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R12
+	MOVQ            96(R8), R13
+	MOVQ            120(R8), R14
+	MOVQ            144(R8), R8
+	MOVQ            start+72(FP), R15
 
 	// Add start offset to output
-	ADDQ BP, R9
-	ADDQ BP, R10
-	ADDQ BP, R11
-	ADDQ BP, R12
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R8
 
 	// Add start offset to input
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, DX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X8
-	VPBROADCASTB X8, Y8
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, DX
+
+mulGFNI_4x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-mulAvxTwo_4x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	// Store 7 outputs
+	VMOVDQU64 Z23, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x7_64_loop
+	VZEROUPPER
+
+mulGFNI_4x7_64_end:
+	RET
+
+// func mulGFNI_4x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x7_64Xor(SB), $0-88
+	// Loading 23 of 28 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 37 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), DX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R12
+	MOVQ            96(R8), R13
+	MOVQ            120(R8), R14
+	MOVQ            144(R8), R8
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R8
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, DX
+
+mulGFNI_4x7_64Xor_loop:
+	// Load 7 outputs
+	VMOVDQU64 (R9), Z23
+	VMOVDQU64 (R10), Z24
+	VMOVDQU64 (R11), Z25
+	VMOVDQU64 (R12), Z26
+	VMOVDQU64 (R13), Z27
+	VMOVDQU64 (R14), Z28
+	VMOVDQU64 (R8), Z29
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 7 outputs
+	VMOVDQU64 Z23, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x7_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_4x7_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x7Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 68 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x7Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R8
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R8
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_4x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU (R13), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU (R14), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU (R8), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y5, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y6, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x7Xor_end:
+	RET
+
+// func mulAvxTwo_4x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x8(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 77 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x8_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R15
+	MOVQ  168(R8), R8
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X8
+	VPBROADCASTB X8, Y8
 
+mulAvxTwo_4x8_loop:
 	// Load and process 32 bytes from input 0 to 8 outputs
 	VMOVDQU (BX), Y11
 	ADDQ    $0x20, BX
@@ -7334,50 +20712,42 @@ mulAvxTwo_4x8_loop:
 	VMOVDQU 32(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	VPXOR   Y9, Y10, Y0
 	VMOVDQU 64(CX), Y9
 	VMOVDQU 96(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 128(CX), Y9
 	VMOVDQU 160(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	VPXOR   Y9, Y10, Y2
 	VMOVDQU 192(CX), Y9
 	VMOVDQU 224(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 256(CX), Y9
 	VMOVDQU 288(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	VPXOR   Y9, Y10, Y4
 	VMOVDQU 320(CX), Y9
 	VMOVDQU 352(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y9, Y10, Y5
 	VMOVDQU 384(CX), Y9
 	VMOVDQU 416(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	VPXOR   Y9, Y10, Y6
 	VMOVDQU 448(CX), Y9
 	VMOVDQU 480(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPXOR   Y9, Y10, Y7
 
 	// Load and process 32 bytes from input 1 to 8 outputs
 	VMOVDQU (SI), Y11
@@ -7389,50 +20759,42 @@ mulAvxTwo_4x8_loop:
 	VMOVDQU 544(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	XOR3WAY( $0x00, Y9, Y10, Y0)
 	VMOVDQU 576(CX), Y9
 	VMOVDQU 608(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 640(CX), Y9
 	VMOVDQU 672(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
 	VMOVDQU 704(CX), Y9
 	VMOVDQU 736(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 768(CX), Y9
 	VMOVDQU 800(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
 	VMOVDQU 832(CX), Y9
 	VMOVDQU 864(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 	VMOVDQU 896(CX), Y9
 	VMOVDQU 928(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
 	VMOVDQU 960(CX), Y9
 	VMOVDQU 992(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
 	// Load and process 32 bytes from input 2 to 8 outputs
 	VMOVDQU (DI), Y11
@@ -7444,50 +20806,42 @@ mulAvxTwo_4x8_loop:
 	VMOVDQU 1056(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	XOR3WAY( $0x00, Y9, Y10, Y0)
 	VMOVDQU 1088(CX), Y9
 	VMOVDQU 1120(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1152(CX), Y9
 	VMOVDQU 1184(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
 	VMOVDQU 1216(CX), Y9
 	VMOVDQU 1248(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1280(CX), Y9
 	VMOVDQU 1312(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
 	VMOVDQU 1344(CX), Y9
 	VMOVDQU 1376(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 	VMOVDQU 1408(CX), Y9
 	VMOVDQU 1440(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
 	VMOVDQU 1472(CX), Y9
 	VMOVDQU 1504(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
 	// Load and process 32 bytes from input 3 to 8 outputs
 	VMOVDQU (DX), Y11
@@ -7499,50 +20853,42 @@ mulAvxTwo_4x8_loop:
 	VMOVDQU 1568(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	XOR3WAY( $0x00, Y9, Y10, Y0)
 	VMOVDQU 1600(CX), Y9
 	VMOVDQU 1632(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1664(CX), Y9
 	VMOVDQU 1696(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
 	VMOVDQU 1728(CX), Y9
 	VMOVDQU 1760(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1792(CX), Y9
 	VMOVDQU 1824(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
 	VMOVDQU 1856(CX), Y9
 	VMOVDQU 1888(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 	VMOVDQU 1920(CX), Y9
 	VMOVDQU 1952(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
 	VMOVDQU 1984(CX), Y9
 	VMOVDQU 2016(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
 	// Store 8 outputs
 	VMOVDQU Y0, (R9)
@@ -7570,36 +20916,382 @@ mulAvxTwo_4x8_loop:
 mulAvxTwo_4x8_end:
 	RET
 
-// func mulAvxTwo_4x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_4x9(SB), NOSPLIT, $8-88
+// func mulGFNI_4x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x8_64(SB), $8-88
+	// Loading 22 of 32 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), DX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R12
+	MOVQ            96(R8), R13
+	MOVQ            120(R8), R14
+	MOVQ            144(R8), R15
+	MOVQ            168(R8), R8
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, DX
+
+mulGFNI_4x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x8_64_loop
+	VZEROUPPER
+
+mulGFNI_4x8_64_end:
+	RET
+
+// func mulGFNI_4x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x8_64Xor(SB), $8-88
+	// Loading 22 of 32 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), DX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R12
+	MOVQ            96(R8), R13
+	MOVQ            120(R8), R14
+	MOVQ            144(R8), R15
+	MOVQ            168(R8), R8
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, DX
+
+mulGFNI_4x8_64Xor_loop:
+	// Load 8 outputs
+	VMOVDQU64 (R9), Z22
+	VMOVDQU64 (R10), Z23
+	VMOVDQU64 (R11), Z24
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R8), Z29
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_4x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_4x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x8Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 86 YMM used
+	// Full registers estimated 77 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_4x9_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), AX
-	MOVQ  out_base+48(FP), DI
-	MOVQ  (DI), R8
-	MOVQ  24(DI), R9
-	MOVQ  48(DI), R10
-	MOVQ  72(DI), R11
-	MOVQ  96(DI), R12
-	MOVQ  120(DI), R13
-	MOVQ  144(DI), R14
-	MOVQ  168(DI), R15
-	MOVQ  192(DI), DI
+	JZ    mulAvxTwo_4x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R15
+	MOVQ  168(R8), R8
 	MOVQ  start+72(FP), BP
 
 	// Add start offset to output
-	ADDQ BP, R8
 	ADDQ BP, R9
 	ADDQ BP, R10
 	ADDQ BP, R11
@@ -7607,152 +21299,394 @@ TEXT Β·mulAvxTwo_4x9(SB), NOSPLIT, $8-88
 	ADDQ BP, R13
 	ADDQ BP, R14
 	ADDQ BP, R15
-	ADDQ BP, DI
+	ADDQ BP, R8
 
 	// Add start offset to input
-	ADDQ         BP, DX
 	ADDQ         BP, BX
 	ADDQ         BP, SI
-	ADDQ         BP, AX
+	ADDQ         BP, DI
+	ADDQ         BP, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X9
-	VPBROADCASTB X9, Y9
-	MOVQ         n+80(FP), BP
-	SHRQ         $0x05, BP
-
-mulAvxTwo_4x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	MOVQ         BP, X8
+	VPBROADCASTB X8, Y8
 
-	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (DX), Y12
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
+mulAvxTwo_4x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU (R13), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU (R14), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU (R15), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 512(CX), Y10
-	VMOVDQU 544(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU (R8), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 1 to 9 outputs
-	VMOVDQU (BX), Y12
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 576(CX), Y10
-	VMOVDQU 608(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 640(CX), Y10
-	VMOVDQU 672(CX), Y11
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 704(CX), Y10
-	VMOVDQU 736(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 768(CX), Y10
-	VMOVDQU 800(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 832(CX), Y10
-	VMOVDQU 864(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 896(CX), Y10
-	VMOVDQU 928(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 960(CX), Y10
-	VMOVDQU 992(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1024(CX), Y10
-	VMOVDQU 1056(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1088(CX), Y10
-	VMOVDQU 1120(CX), Y11
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y5, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y6, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y7, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x8Xor_end:
+	RET
+
+// func mulAvxTwo_4x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x9(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 86 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x9_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), AX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), R13
+	MOVQ  144(DI), R14
+	MOVQ  168(DI), R15
+	MOVQ  192(DI), DI
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, DI
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X9
+	VPBROADCASTB X9, Y9
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_4x9_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y2
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y3
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y4
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y5
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y6
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y7
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y8
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Load and process 32 bytes from input 2 to 9 outputs
 	VMOVDQU (SI), Y12
@@ -7764,56 +21698,47 @@ mulAvxTwo_4x9_loop:
 	VMOVDQU 1184(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	XOR3WAY( $0x00, Y10, Y11, Y0)
 	VMOVDQU 1216(CX), Y10
 	VMOVDQU 1248(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y1)
 	VMOVDQU 1280(CX), Y10
 	VMOVDQU 1312(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	XOR3WAY( $0x00, Y10, Y11, Y2)
 	VMOVDQU 1344(CX), Y10
 	VMOVDQU 1376(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	XOR3WAY( $0x00, Y10, Y11, Y3)
 	VMOVDQU 1408(CX), Y10
 	VMOVDQU 1440(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	XOR3WAY( $0x00, Y10, Y11, Y4)
 	VMOVDQU 1472(CX), Y10
 	VMOVDQU 1504(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	XOR3WAY( $0x00, Y10, Y11, Y5)
 	VMOVDQU 1536(CX), Y10
 	VMOVDQU 1568(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	XOR3WAY( $0x00, Y10, Y11, Y6)
 	VMOVDQU 1600(CX), Y10
 	VMOVDQU 1632(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y10, Y11, Y7)
 	VMOVDQU 1664(CX), Y10
 	VMOVDQU 1696(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Load and process 32 bytes from input 3 to 9 outputs
 	VMOVDQU (AX), Y12
@@ -7825,56 +21750,47 @@ mulAvxTwo_4x9_loop:
 	VMOVDQU 1760(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	XOR3WAY( $0x00, Y10, Y11, Y0)
 	VMOVDQU 1792(CX), Y10
 	VMOVDQU 1824(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y1)
 	VMOVDQU 1856(CX), Y10
 	VMOVDQU 1888(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	XOR3WAY( $0x00, Y10, Y11, Y2)
 	VMOVDQU 1920(CX), Y10
 	VMOVDQU 1952(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	XOR3WAY( $0x00, Y10, Y11, Y3)
 	VMOVDQU 1984(CX), Y10
 	VMOVDQU 2016(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	XOR3WAY( $0x00, Y10, Y11, Y4)
 	VMOVDQU 2048(CX), Y10
 	VMOVDQU 2080(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	XOR3WAY( $0x00, Y10, Y11, Y5)
 	VMOVDQU 2112(CX), Y10
 	VMOVDQU 2144(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	XOR3WAY( $0x00, Y10, Y11, Y6)
 	VMOVDQU 2176(CX), Y10
 	VMOVDQU 2208(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y10, Y11, Y7)
 	VMOVDQU 2240(CX), Y10
 	VMOVDQU 2272(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Store 9 outputs
 	VMOVDQU Y0, (R8)
@@ -7904,247 +21820,879 @@ mulAvxTwo_4x9_loop:
 mulAvxTwo_4x9_end:
 	RET
 
-// func mulAvxTwo_4x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_4x10(SB), NOSPLIT, $0-88
+// func mulGFNI_4x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x9_64(SB), $8-88
+	// Loading 21 of 36 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 47 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), AX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), R12
+	MOVQ            120(DI), R13
+	MOVQ            144(DI), R14
+	MOVQ            168(DI), R15
+	MOVQ            192(DI), DI
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, DI
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_4x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (SI), Z30
+	ADDQ                $0x40, SI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	VMOVDQU64 Z21, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_4x9_64_loop
+	VZEROUPPER
+
+mulGFNI_4x9_64_end:
+	RET
+
+// func mulGFNI_4x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x9_64Xor(SB), $8-88
+	// Loading 21 of 36 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 47 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), AX
+	MOVQ            out_base+48(FP), DI
+	MOVQ            out_base+48(FP), DI
+	MOVQ            (DI), R8
+	MOVQ            24(DI), R9
+	MOVQ            48(DI), R10
+	MOVQ            72(DI), R11
+	MOVQ            96(DI), R12
+	MOVQ            120(DI), R13
+	MOVQ            144(DI), R14
+	MOVQ            168(DI), R15
+	MOVQ            192(DI), DI
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, DI
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_4x9_64Xor_loop:
+	// Load 9 outputs
+	VMOVDQU64 (R8), Z21
+	VMOVDQU64 (R9), Z22
+	VMOVDQU64 (R10), Z23
+	VMOVDQU64 (R11), Z24
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (DI), Z29
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (SI), Z30
+	ADDQ                $0x40, SI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	VMOVDQU64 Z21, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (DI)
+	ADDQ      $0x40, DI
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_4x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_4x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x9Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 95 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 86 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_4x10_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), DX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  start+72(FP), R9
+	JZ    mulAvxTwo_4x9Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), AX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), R13
+	MOVQ  144(DI), R14
+	MOVQ  168(DI), R15
+	MOVQ  192(DI), DI
+	MOVQ  start+72(FP), BP
 
-	// Add start offset to input
-	ADDQ         R9, BX
-	ADDQ         R9, SI
-	ADDQ         R9, DI
-	ADDQ         R9, DX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X10
-	VPBROADCASTB X10, Y10
+	// Add start offset to output
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, DI
 
-mulAvxTwo_4x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X9
+	VPBROADCASTB X9, Y9
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
 
-	// Load and process 32 bytes from input 0 to 10 outputs
-	VMOVDQU (BX), Y13
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU (CX), Y11
-	VMOVDQU 32(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 64(CX), Y11
-	VMOVDQU 96(CX), Y12
+mulAvxTwo_4x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 128(CX), Y11
-	VMOVDQU 160(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 192(CX), Y11
-	VMOVDQU 224(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 256(CX), Y11
-	VMOVDQU 288(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 320(CX), Y11
-	VMOVDQU 352(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU (R12), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 384(CX), Y11
-	VMOVDQU 416(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU (R13), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 448(CX), Y11
-	VMOVDQU 480(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU (R14), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 512(CX), Y11
-	VMOVDQU 544(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU (R15), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 576(CX), Y11
-	VMOVDQU 608(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU (DI), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 1 to 10 outputs
-	VMOVDQU (SI), Y13
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 640(CX), Y11
-	VMOVDQU 672(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 704(CX), Y11
-	VMOVDQU 736(CX), Y12
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 768(CX), Y11
-	VMOVDQU 800(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 832(CX), Y11
-	VMOVDQU 864(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 896(CX), Y11
-	VMOVDQU 928(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 960(CX), Y11
-	VMOVDQU 992(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1024(CX), Y11
-	VMOVDQU 1056(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1088(CX), Y11
-	VMOVDQU 1120(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1152(CX), Y11
-	VMOVDQU 1184(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1216(CX), Y11
-	VMOVDQU 1248(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 2 to 10 outputs
-	VMOVDQU (DI), Y13
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 1280(CX), Y11
-	VMOVDQU 1312(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 1344(CX), Y11
-	VMOVDQU 1376(CX), Y12
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (AX), Y12
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y6, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y7, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y8, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_4x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x9Xor_end:
+	RET
+
+// func mulAvxTwo_4x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x10(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 95 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x10_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_4x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 1408(CX), Y11
 	VMOVDQU 1440(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 1472(CX), Y11
 	VMOVDQU 1504(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 1536(CX), Y11
 	VMOVDQU 1568(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 1600(CX), Y11
 	VMOVDQU 1632(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 1664(CX), Y11
 	VMOVDQU 1696(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 1728(CX), Y11
 	VMOVDQU 1760(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 1792(CX), Y11
 	VMOVDQU 1824(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 1856(CX), Y11
 	VMOVDQU 1888(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 3 to 10 outputs
 	VMOVDQU (DX), Y13
@@ -8156,62 +22704,52 @@ mulAvxTwo_4x10_loop:
 	VMOVDQU 1952(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 1984(CX), Y11
 	VMOVDQU 2016(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 2048(CX), Y11
 	VMOVDQU 2080(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 2112(CX), Y11
 	VMOVDQU 2144(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 2176(CX), Y11
 	VMOVDQU 2208(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 2240(CX), Y11
 	VMOVDQU 2272(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 2304(CX), Y11
 	VMOVDQU 2336(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 2368(CX), Y11
 	VMOVDQU 2400(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 2432(CX), Y11
 	VMOVDQU 2464(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 2496(CX), Y11
 	VMOVDQU 2528(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Store 10 outputs
 	MOVQ    (R8), R10
@@ -8244,651 +22782,792 @@ mulAvxTwo_4x10_loop:
 mulAvxTwo_4x10_end:
 	RET
 
-// func mulAvxTwo_5x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x1(SB), NOSPLIT, $0-88
-	// Loading all tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 14 YMM used
-	MOVQ    n+80(FP), AX
-	MOVQ    matrix_base+0(FP), CX
-	SHRQ    $0x05, AX
-	TESTQ   AX, AX
-	JZ      mulAvxTwo_5x1_end
-	VMOVDQU (CX), Y0
-	VMOVDQU 32(CX), Y1
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
-	MOVQ    in_base+24(FP), CX
-	MOVQ    (CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    48(CX), SI
-	MOVQ    72(CX), DI
-	MOVQ    96(CX), CX
-	MOVQ    out_base+48(FP), R8
-	MOVQ    (R8), R8
-	MOVQ    start+72(FP), R9
-
-	// Add start offset to output
-	ADDQ R9, R8
+// func mulGFNI_4x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x10_64(SB), $0-88
+	// Loading 20 of 40 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 52 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), DX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            start+72(FP), R9
 
 	// Add start offset to input
-	ADDQ         R9, DX
-	ADDQ         R9, BX
-	ADDQ         R9, SI
-	ADDQ         R9, DI
-	ADDQ         R9, CX
-	MOVQ         $0x0000000f, R9
-	MOVQ         R9, X11
-	VPBROADCASTB X11, Y11
-
-mulAvxTwo_5x1_loop:
-	// Clear 1 outputs
-	VPXOR Y10, Y10, Y10
-
-	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y12
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y0, Y12
-	VPSHUFB Y13, Y1, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
-
-	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y12
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y2, Y12
-	VPSHUFB Y13, Y3, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
-
-	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y12
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y4, Y12
-	VPSHUFB Y13, Y5, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
-
-	// Load and process 32 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y12
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y6, Y12
-	VPSHUFB Y13, Y7, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
-
-	// Load and process 32 bytes from input 4 to 1 outputs
-	VMOVDQU (CX), Y12
-	ADDQ    $0x20, CX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y8, Y12
-	VPSHUFB Y13, Y9, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
+	ADDQ R9, BX
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, DX
+
+mulGFNI_4x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Store 1 outputs
-	VMOVDQU Y10, (R8)
-	ADDQ    $0x20, R8
+	// Store 10 outputs
+	MOVQ      (R8), R10
+	VMOVDQU64 Z20, (R10)(R9*1)
+	MOVQ      24(R8), R10
+	VMOVDQU64 Z21, (R10)(R9*1)
+	MOVQ      48(R8), R10
+	VMOVDQU64 Z22, (R10)(R9*1)
+	MOVQ      72(R8), R10
+	VMOVDQU64 Z23, (R10)(R9*1)
+	MOVQ      96(R8), R10
+	VMOVDQU64 Z24, (R10)(R9*1)
+	MOVQ      120(R8), R10
+	VMOVDQU64 Z25, (R10)(R9*1)
+	MOVQ      144(R8), R10
+	VMOVDQU64 Z26, (R10)(R9*1)
+	MOVQ      168(R8), R10
+	VMOVDQU64 Z27, (R10)(R9*1)
+	MOVQ      192(R8), R10
+	VMOVDQU64 Z28, (R10)(R9*1)
+	MOVQ      216(R8), R10
+	VMOVDQU64 Z29, (R10)(R9*1)
 
 	// Prepare for next loop
+	ADDQ $0x40, R9
 	DECQ AX
-	JNZ  mulAvxTwo_5x1_loop
+	JNZ  mulGFNI_4x10_64_loop
 	VZEROUPPER
 
-mulAvxTwo_5x1_end:
+mulGFNI_4x10_64_end:
 	RET
 
-// func mulAvxTwo_5x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x1_64(SB), $0-88
-	// Loading no tables to registers
+// func mulGFNI_4x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_4x10_64Xor(SB), $0-88
+	// Loading 20 of 40 tables to registers
 	// Destination kept on stack
-	// Full registers estimated 14 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_5x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), AX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  out_base+48(FP), R8
-	MOVQ  start+72(FP), R9
+	// Full registers estimated 52 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_4x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), DX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            start+72(FP), R9
 
 	// Add start offset to input
-	ADDQ         R9, DX
-	ADDQ         R9, BX
-	ADDQ         R9, SI
-	ADDQ         R9, DI
-	ADDQ         R9, AX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R10
-	SHRQ         $0x06, R10
-
-mulAvxTwo_5x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
-	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y6
-	VMOVDQU 32(SI), Y5
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y6
-	VMOVDQU 32(DI), Y5
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 4 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	ADDQ R9, BX
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, DX
+
+mulGFNI_4x10_64Xor_loop:
+	// Load 10 outputs
+	MOVQ      (R8), R10
+	VMOVDQU64 (R10)(R9*1), Z20
+	MOVQ      24(R8), R10
+	VMOVDQU64 (R10)(R9*1), Z21
+	MOVQ      48(R8), R10
+	VMOVDQU64 (R10)(R9*1), Z22
+	MOVQ      72(R8), R10
+	VMOVDQU64 (R10)(R9*1), Z23
+	MOVQ      96(R8), R10
+	VMOVDQU64 (R10)(R9*1), Z24
+	MOVQ      120(R8), R10
+	VMOVDQU64 (R10)(R9*1), Z25
+	MOVQ      144(R8), R10
+	VMOVDQU64 (R10)(R9*1), Z26
+	MOVQ      168(R8), R10
+	VMOVDQU64 (R10)(R9*1), Z27
+	MOVQ      192(R8), R10
+	VMOVDQU64 (R10)(R9*1), Z28
+	MOVQ      216(R8), R10
+	VMOVDQU64 (R10)(R9*1), Z29
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Store 1 outputs
-	MOVQ    (R8), R11
-	VMOVDQU Y0, (R11)(R9*1)
-	VMOVDQU Y1, 32(R11)(R9*1)
+	// Store 10 outputs
+	MOVQ      (R8), R10
+	VMOVDQU64 Z20, (R10)(R9*1)
+	MOVQ      24(R8), R10
+	VMOVDQU64 Z21, (R10)(R9*1)
+	MOVQ      48(R8), R10
+	VMOVDQU64 Z22, (R10)(R9*1)
+	MOVQ      72(R8), R10
+	VMOVDQU64 Z23, (R10)(R9*1)
+	MOVQ      96(R8), R10
+	VMOVDQU64 Z24, (R10)(R9*1)
+	MOVQ      120(R8), R10
+	VMOVDQU64 Z25, (R10)(R9*1)
+	MOVQ      144(R8), R10
+	VMOVDQU64 Z26, (R10)(R9*1)
+	MOVQ      168(R8), R10
+	VMOVDQU64 Z27, (R10)(R9*1)
+	MOVQ      192(R8), R10
+	VMOVDQU64 Z28, (R10)(R9*1)
+	MOVQ      216(R8), R10
+	VMOVDQU64 Z29, (R10)(R9*1)
 
 	// Prepare for next loop
 	ADDQ $0x40, R9
-	DECQ R10
-	JNZ  mulAvxTwo_5x1_64_loop
+	DECQ AX
+	JNZ  mulGFNI_4x10_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_5x1_64_end:
+mulGFNI_4x10_64Xor_end:
 	RET
 
-// func mulAvxTwo_5x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x2(SB), NOSPLIT, $0-88
+// func mulAvxTwo_4x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_4x10Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 27 YMM used
+	// Destination kept on stack
+	// Full registers estimated 95 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x2_end
+	JZ    mulAvxTwo_4x10Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), DX
-	MOVQ  out_base+48(FP), R9
-	MOVQ  (R9), R10
-	MOVQ  24(R9), R9
-	MOVQ  start+72(FP), R11
-
-	// Add start offset to output
-	ADDQ R11, R10
-	ADDQ R11, R9
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  start+72(FP), R9
 
 	// Add start offset to input
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DI
-	ADDQ         R11, R8
-	ADDQ         R11, DX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X2
-	VPBROADCASTB X2, Y2
-
-mulAvxTwo_5x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X10
+	VPBROADCASTB X10, Y10
 
-	// Load and process 32 bytes from input 0 to 2 outputs
-	VMOVDQU (BX), Y5
+mulAvxTwo_4x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	MOVQ    (R8), R10
+	VMOVDQU (R10)(R9*1), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	MOVQ    24(R8), R10
+	VMOVDQU (R10)(R9*1), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	MOVQ    48(R8), R10
+	VMOVDQU (R10)(R9*1), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	MOVQ    72(R8), R10
+	VMOVDQU (R10)(R9*1), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	MOVQ    96(R8), R10
+	VMOVDQU (R10)(R9*1), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	MOVQ    120(R8), R10
+	VMOVDQU (R10)(R9*1), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	MOVQ    144(R8), R10
+	VMOVDQU (R10)(R9*1), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	MOVQ    168(R8), R10
+	VMOVDQU (R10)(R9*1), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	MOVQ    192(R8), R10
+	VMOVDQU (R10)(R9*1), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	MOVQ    216(R8), R10
+	VMOVDQU (R10)(R9*1), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 1 to 2 outputs
-	VMOVDQU (SI), Y5
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 2 to 2 outputs
-	VMOVDQU (DI), Y5
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 3 to 2 outputs
-	VMOVDQU (R8), Y5
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 384(CX), Y3
-	VMOVDQU 416(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 448(CX), Y3
-	VMOVDQU 480(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 4 to 2 outputs
-	VMOVDQU (DX), Y5
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (DX), Y13
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 512(CX), Y3
-	VMOVDQU 544(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 576(CX), Y3
-	VMOVDQU 608(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Store 2 outputs
-	VMOVDQU Y0, (R10)
-	ADDQ    $0x20, R10
-	VMOVDQU Y1, (R9)
-	ADDQ    $0x20, R9
+	// Store 10 outputs
+	MOVQ    (R8), R10
+	VMOVDQU Y0, (R10)(R9*1)
+	MOVQ    24(R8), R10
+	VMOVDQU Y1, (R10)(R9*1)
+	MOVQ    48(R8), R10
+	VMOVDQU Y2, (R10)(R9*1)
+	MOVQ    72(R8), R10
+	VMOVDQU Y3, (R10)(R9*1)
+	MOVQ    96(R8), R10
+	VMOVDQU Y4, (R10)(R9*1)
+	MOVQ    120(R8), R10
+	VMOVDQU Y5, (R10)(R9*1)
+	MOVQ    144(R8), R10
+	VMOVDQU Y6, (R10)(R9*1)
+	MOVQ    168(R8), R10
+	VMOVDQU Y7, (R10)(R9*1)
+	MOVQ    192(R8), R10
+	VMOVDQU Y8, (R10)(R9*1)
+	MOVQ    216(R8), R10
+	VMOVDQU Y9, (R10)(R9*1)
 
 	// Prepare for next loop
+	ADDQ $0x20, R9
 	DECQ AX
-	JNZ  mulAvxTwo_5x2_loop
+	JNZ  mulAvxTwo_4x10Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_5x2_end:
+mulAvxTwo_4x10Xor_end:
 	RET
 
-// func mulAvxTwo_5x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x2_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 27 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_5x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), AX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  out_base+48(FP), R8
-	MOVQ  start+72(FP), R9
+// func mulAvxTwo_5x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x1(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_5x1_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), DI
+	MOVQ    96(CX), CX
+	MOVQ    out_base+48(FP), R8
+	MOVQ    (R8), R8
+	MOVQ    start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
 
 	// Add start offset to input
 	ADDQ         R9, DX
 	ADDQ         R9, BX
 	ADDQ         R9, SI
 	ADDQ         R9, DI
-	ADDQ         R9, AX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X4
-	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R10
-	SHRQ         $0x06, R10
-
-mulAvxTwo_5x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	ADDQ         R9, CX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X11
+	VPBROADCASTB X11, Y11
 
-	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+mulAvxTwo_5x1_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y0, Y12
+	VPSHUFB Y13, Y1, Y13
+	VPXOR   Y12, Y13, Y10
 
-	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y2, Y12
+	VPSHUFB Y13, Y3, Y13
+	XOR3WAY( $0x00, Y12, Y13, Y10)
 
-	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y4, Y12
+	VPSHUFB Y13, Y5, Y13
+	XOR3WAY( $0x00, Y12, Y13, Y10)
 
-	// Load and process 64 bytes from input 3 to 2 outputs
-	VMOVDQU (DI), Y9
-	VMOVDQU 32(DI), Y11
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y6, Y12
+	VPSHUFB Y13, Y7, Y13
+	XOR3WAY( $0x00, Y12, Y13, Y10)
 
-	// Load and process 64 bytes from input 4 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (CX), Y12
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y8, Y12
+	VPSHUFB Y13, Y9, Y13
+	XOR3WAY( $0x00, Y12, Y13, Y10)
 
-	// Store 2 outputs
-	MOVQ    (R8), R11
-	VMOVDQU Y0, (R11)(R9*1)
-	VMOVDQU Y1, 32(R11)(R9*1)
-	MOVQ    24(R8), R11
-	VMOVDQU Y2, (R11)(R9*1)
-	VMOVDQU Y3, 32(R11)(R9*1)
+	// Store 1 outputs
+	VMOVDQU Y10, (R8)
+	ADDQ    $0x20, R8
 
 	// Prepare for next loop
-	ADDQ $0x40, R9
-	DECQ R10
-	JNZ  mulAvxTwo_5x2_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x1_loop
 	VZEROUPPER
 
-mulAvxTwo_5x2_64_end:
+mulAvxTwo_5x1_end:
 	RET
 
-// func mulAvxTwo_5x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x3(SB), NOSPLIT, $0-88
+// func mulAvxTwo_5x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x1_64(SB), $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 38 YMM used
+	// Full registers estimated 26 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x3_end
+	JZ    mulAvxTwo_5x1_64_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -8896,451 +23575,413 @@ TEXT Β·mulAvxTwo_5x3(SB), NOSPLIT, $0-88
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
-	MOVQ  (R9), R10
-	MOVQ  24(R9), R11
-	MOVQ  48(R9), R9
-	MOVQ  start+72(FP), R12
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R9
+	MOVQ  start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R12, R10
-	ADDQ R12, R11
-	ADDQ R12, R9
+	ADDQ R10, R9
 
 	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X3
-	VPBROADCASTB X3, Y3
-
-mulAvxTwo_5x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X2
+	VPBROADCASTB X2, Y2
 
-	// Load and process 32 bytes from input 0 to 3 outputs
+mulAvxTwo_5x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
 	VMOVDQU (BX), Y6
-	ADDQ    $0x20, BX
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU (CX), Y4
-	VMOVDQU 32(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 64(CX), Y4
-	VMOVDQU 96(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
 
-	// Load and process 32 bytes from input 1 to 3 outputs
+	// Load and process 64 bytes from input 1 to 1 outputs
 	VMOVDQU (SI), Y6
-	ADDQ    $0x20, SI
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 192(CX), Y4
-	VMOVDQU 224(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 256(CX), Y4
-	VMOVDQU 288(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 320(CX), Y4
-	VMOVDQU 352(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Load and process 32 bytes from input 2 to 3 outputs
+	// Load and process 64 bytes from input 2 to 1 outputs
 	VMOVDQU (DI), Y6
-	ADDQ    $0x20, DI
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 384(CX), Y4
-	VMOVDQU 416(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 448(CX), Y4
-	VMOVDQU 480(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 512(CX), Y4
-	VMOVDQU 544(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Load and process 32 bytes from input 3 to 3 outputs
+	// Load and process 64 bytes from input 3 to 1 outputs
 	VMOVDQU (R8), Y6
-	ADDQ    $0x20, R8
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 576(CX), Y4
-	VMOVDQU 608(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 640(CX), Y4
-	VMOVDQU 672(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 704(CX), Y4
-	VMOVDQU 736(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Load and process 32 bytes from input 4 to 3 outputs
+	// Load and process 64 bytes from input 4 to 1 outputs
 	VMOVDQU (DX), Y6
-	ADDQ    $0x20, DX
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 768(CX), Y4
-	VMOVDQU 800(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 832(CX), Y4
-	VMOVDQU 864(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 896(CX), Y4
-	VMOVDQU 928(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Store 3 outputs
-	VMOVDQU Y0, (R10)
-	ADDQ    $0x20, R10
-	VMOVDQU Y1, (R11)
-	ADDQ    $0x20, R11
-	VMOVDQU Y2, (R9)
-	ADDQ    $0x20, R9
+	// Store 1 outputs
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_5x3_loop
+	JNZ  mulAvxTwo_5x1_64_loop
 	VZEROUPPER
 
-mulAvxTwo_5x3_end:
+mulAvxTwo_5x1_64_end:
 	RET
 
-// func mulAvxTwo_5x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x3_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 38 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_5x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), AX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  out_base+48(FP), R8
-	MOVQ  start+72(FP), R9
+// func mulGFNI_5x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x1_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 8 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R8
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+
+	// Add start offset to input
+	ADDQ R9, DX
+	ADDQ R9, BX
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, CX
+
+mulGFNI_5x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z6
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z6, Z5
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z6
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z6, Z6
+	VXORPD         Z5, Z6, Z5
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z6
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z6, Z6
+	VXORPD         Z5, Z6, Z5
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z6
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z6, Z6
+	VXORPD         Z5, Z6, Z5
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (CX), Z6
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z4, Z6, Z6
+	VXORPD         Z5, Z6, Z5
+
+	// Store 1 outputs
+	VMOVDQU64 Z5, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x1_64_loop
+	VZEROUPPER
+
+mulGFNI_5x1_64_end:
+	RET
+
+// func mulGFNI_5x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 8 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R8
+	MOVQ            start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+
+	// Add start offset to input
+	ADDQ R9, DX
+	ADDQ R9, BX
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, CX
+
+mulGFNI_5x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (R8), Z5
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z6
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z6, Z6
+	VXORPD         Z5, Z6, Z5
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z6
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z6, Z6
+	VXORPD         Z5, Z6, Z5
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z6
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z6, Z6
+	VXORPD         Z5, Z6, Z5
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z6
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z6, Z6
+	VXORPD         Z5, Z6, Z5
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (CX), Z6
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z4, Z6, Z6
+	VXORPD         Z5, Z6, Z5
+
+	// Store 1 outputs
+	VMOVDQU64 Z5, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_5x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_5x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), DI
+	MOVQ    96(CX), CX
+	MOVQ    out_base+48(FP), R8
+	MOVQ    (R8), R8
+	MOVQ    start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
 
 	// Add start offset to input
 	ADDQ         R9, DX
 	ADDQ         R9, BX
 	ADDQ         R9, SI
 	ADDQ         R9, DI
-	ADDQ         R9, AX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X6
-	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R10
-	SHRQ         $0x06, R10
-
-mulAvxTwo_5x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	ADDQ         R9, CX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X11
+	VPBROADCASTB X11, Y11
 
-	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU (CX), Y7
-	VMOVDQU 32(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 64(CX), Y7
-	VMOVDQU 96(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 128(CX), Y7
-	VMOVDQU 160(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+mulAvxTwo_5x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VMOVDQU (R8), Y10
+	VPSHUFB Y12, Y0, Y12
+	VPSHUFB Y13, Y1, Y13
+	XOR3WAY( $0x00, Y12, Y13, Y10)
 
-	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 192(CX), Y7
-	VMOVDQU 224(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 256(CX), Y7
-	VMOVDQU 288(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 320(CX), Y7
-	VMOVDQU 352(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y2, Y12
+	VPSHUFB Y13, Y3, Y13
+	XOR3WAY( $0x00, Y12, Y13, Y10)
 
-	// Load and process 64 bytes from input 2 to 3 outputs
-	VMOVDQU (SI), Y11
-	VMOVDQU 32(SI), Y13
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 384(CX), Y7
-	VMOVDQU 416(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 448(CX), Y7
-	VMOVDQU 480(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 512(CX), Y7
-	VMOVDQU 544(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y4, Y12
+	VPSHUFB Y13, Y5, Y13
+	XOR3WAY( $0x00, Y12, Y13, Y10)
 
-	// Load and process 64 bytes from input 3 to 3 outputs
-	VMOVDQU (DI), Y11
-	VMOVDQU 32(DI), Y13
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 576(CX), Y7
-	VMOVDQU 608(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y7
-	VMOVDQU 672(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 704(CX), Y7
-	VMOVDQU 736(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y6, Y12
+	VPSHUFB Y13, Y7, Y13
+	XOR3WAY( $0x00, Y12, Y13, Y10)
 
-	// Load and process 64 bytes from input 4 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 768(CX), Y7
-	VMOVDQU 800(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 832(CX), Y7
-	VMOVDQU 864(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 896(CX), Y7
-	VMOVDQU 928(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (CX), Y12
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y8, Y12
+	VPSHUFB Y13, Y9, Y13
+	XOR3WAY( $0x00, Y12, Y13, Y10)
 
-	// Store 3 outputs
-	MOVQ    (R8), R11
-	VMOVDQU Y0, (R11)(R9*1)
-	VMOVDQU Y1, 32(R11)(R9*1)
-	MOVQ    24(R8), R11
-	VMOVDQU Y2, (R11)(R9*1)
-	VMOVDQU Y3, 32(R11)(R9*1)
-	MOVQ    48(R8), R11
-	VMOVDQU Y4, (R11)(R9*1)
-	VMOVDQU Y5, 32(R11)(R9*1)
+	// Store 1 outputs
+	VMOVDQU Y10, (R8)
+	ADDQ    $0x20, R8
 
 	// Prepare for next loop
-	ADDQ $0x40, R9
-	DECQ R10
-	JNZ  mulAvxTwo_5x3_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x1Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_5x3_64_end:
+mulAvxTwo_5x1Xor_end:
 	RET
 
-// func mulAvxTwo_5x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x4(SB), NOSPLIT, $0-88
+// func mulAvxTwo_5x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x1_64Xor(SB), $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 49 YMM used
+	// Full registers estimated 26 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x4_end
+	JZ    mulAvxTwo_5x1_64Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -9348,219 +23989,147 @@ TEXT Β·mulAvxTwo_5x4(SB), NOSPLIT, $0-88
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
-	MOVQ  (R9), R10
-	MOVQ  24(R9), R11
-	MOVQ  48(R9), R12
-	MOVQ  72(R9), R9
-	MOVQ  start+72(FP), R13
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R9
+	MOVQ  start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R13, R10
-	ADDQ R13, R11
-	ADDQ R13, R12
-	ADDQ R13, R9
+	ADDQ R10, R9
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X4
-	VPBROADCASTB X4, Y4
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X2
+	VPBROADCASTB X2, Y2
 
-mulAvxTwo_5x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+mulAvxTwo_5x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (R9), Y0
+	VMOVDQU 32(R9), Y1
 
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (BX), Y7
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Load and process 32 bytes from input 1 to 4 outputs
-	VMOVDQU (SI), Y7
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Load and process 32 bytes from input 2 to 4 outputs
-	VMOVDQU (DI), Y7
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Load and process 32 bytes from input 3 to 4 outputs
-	VMOVDQU (R8), Y7
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 768(CX), Y5
-	VMOVDQU 800(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 832(CX), Y5
-	VMOVDQU 864(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 896(CX), Y5
-	VMOVDQU 928(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 960(CX), Y5
-	VMOVDQU 992(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Load and process 32 bytes from input 4 to 4 outputs
-	VMOVDQU (DX), Y7
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1024(CX), Y5
-	VMOVDQU 1056(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1088(CX), Y5
-	VMOVDQU 1120(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1152(CX), Y5
-	VMOVDQU 1184(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1216(CX), Y5
-	VMOVDQU 1248(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Store 4 outputs
-	VMOVDQU Y0, (R10)
-	ADDQ    $0x20, R10
-	VMOVDQU Y1, (R11)
-	ADDQ    $0x20, R11
-	VMOVDQU Y2, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y3, (R9)
-	ADDQ    $0x20, R9
+	// Store 1 outputs
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_5x4_loop
+	JNZ  mulAvxTwo_5x1_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_5x4_end:
+mulAvxTwo_5x1_64Xor_end:
 	RET
 
-// func mulAvxTwo_5x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x5(SB), NOSPLIT, $0-88
+// func mulAvxTwo_5x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x2(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 60 YMM used
+	// Full registers estimated 27 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x5_end
+	JZ    mulAvxTwo_5x2_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -9569,253 +24138,134 @@ TEXT Β·mulAvxTwo_5x5(SB), NOSPLIT, $0-88
 	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
 	MOVQ  (R9), R10
-	MOVQ  24(R9), R11
-	MOVQ  48(R9), R12
-	MOVQ  72(R9), R13
-	MOVQ  96(R9), R9
-	MOVQ  start+72(FP), R14
+	MOVQ  24(R9), R9
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to output
-	ADDQ R14, R10
-	ADDQ R14, R11
-	ADDQ R14, R12
-	ADDQ R14, R13
-	ADDQ R14, R9
+	ADDQ R11, R10
+	ADDQ R11, R9
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X5
-	VPBROADCASTB X5, Y5
-
-mulAvxTwo_5x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X2
+	VPBROADCASTB X2, Y2
 
-	// Load and process 32 bytes from input 0 to 5 outputs
-	VMOVDQU (BX), Y8
+mulAvxTwo_5x2_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU (CX), Y6
-	VMOVDQU 32(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 64(CX), Y6
-	VMOVDQU 96(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 128(CX), Y6
-	VMOVDQU 160(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 256(CX), Y6
-	VMOVDQU 288(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y1
 
-	// Load and process 32 bytes from input 1 to 5 outputs
-	VMOVDQU (SI), Y8
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 320(CX), Y6
-	VMOVDQU 352(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 384(CX), Y6
-	VMOVDQU 416(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 448(CX), Y6
-	VMOVDQU 480(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 512(CX), Y6
-	VMOVDQU 544(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 576(CX), Y6
-	VMOVDQU 608(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 2 to 5 outputs
-	VMOVDQU (DI), Y8
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 640(CX), Y6
-	VMOVDQU 672(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 704(CX), Y6
-	VMOVDQU 736(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 768(CX), Y6
-	VMOVDQU 800(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 832(CX), Y6
-	VMOVDQU 864(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 896(CX), Y6
-	VMOVDQU 928(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 3 to 5 outputs
-	VMOVDQU (R8), Y8
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 960(CX), Y6
-	VMOVDQU 992(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1024(CX), Y6
-	VMOVDQU 1056(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1088(CX), Y6
-	VMOVDQU 1120(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1152(CX), Y6
-	VMOVDQU 1184(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1216(CX), Y6
-	VMOVDQU 1248(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 4 to 5 outputs
-	VMOVDQU (DX), Y8
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (DX), Y5
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1280(CX), Y6
-	VMOVDQU 1312(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1344(CX), Y6
-	VMOVDQU 1376(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1408(CX), Y6
-	VMOVDQU 1440(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1472(CX), Y6
-	VMOVDQU 1504(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1536(CX), Y6
-	VMOVDQU 1568(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Store 5 outputs
+	// Store 2 outputs
 	VMOVDQU Y0, (R10)
 	ADDQ    $0x20, R10
-	VMOVDQU Y1, (R11)
-	ADDQ    $0x20, R11
-	VMOVDQU Y2, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y3, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y4, (R9)
+	VMOVDQU Y1, (R9)
 	ADDQ    $0x20, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_5x5_loop
+	JNZ  mulAvxTwo_5x2_loop
 	VZEROUPPER
 
-mulAvxTwo_5x5_end:
+mulAvxTwo_5x2_end:
 	RET
 
-// func mulAvxTwo_5x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x6(SB), NOSPLIT, $0-88
+// func mulAvxTwo_5x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x2_64(SB), $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 71 YMM used
+	// Full registers estimated 49 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x6_end
+	JZ    mulAvxTwo_5x2_64_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -9823,289 +24273,388 @@ TEXT Β·mulAvxTwo_5x6(SB), NOSPLIT, $0-88
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
+	MOVQ  out_base+48(FP), R9
 	MOVQ  (R9), R10
-	MOVQ  24(R9), R11
-	MOVQ  48(R9), R12
-	MOVQ  72(R9), R13
-	MOVQ  96(R9), R14
-	MOVQ  120(R9), R9
-	MOVQ  start+72(FP), R15
+	MOVQ  24(R9), R9
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to output
-	ADDQ R15, R10
-	ADDQ R15, R11
-	ADDQ R15, R12
-	ADDQ R15, R13
-	ADDQ R15, R14
-	ADDQ R15, R9
+	ADDQ R11, R10
+	ADDQ R11, R9
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, SI
-	ADDQ         R15, DI
-	ADDQ         R15, R8
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X6
-	VPBROADCASTB X6, Y6
-
-mulAvxTwo_5x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
-	// Load and process 32 bytes from input 0 to 6 outputs
-	VMOVDQU (BX), Y9
-	ADDQ    $0x20, BX
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_5x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU (CX), Y7
-	VMOVDQU 32(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 64(CX), Y7
-	VMOVDQU 96(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 128(CX), Y7
-	VMOVDQU 160(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 192(CX), Y7
-	VMOVDQU 224(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 256(CX), Y7
-	VMOVDQU 288(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 320(CX), Y7
-	VMOVDQU 352(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
 
-	// Load and process 32 bytes from input 1 to 6 outputs
+	// Load and process 64 bytes from input 1 to 2 outputs
 	VMOVDQU (SI), Y9
-	ADDQ    $0x20, SI
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 384(CX), Y7
-	VMOVDQU 416(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 448(CX), Y7
-	VMOVDQU 480(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 512(CX), Y7
-	VMOVDQU 544(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 576(CX), Y7
-	VMOVDQU 608(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 640(CX), Y7
-	VMOVDQU 672(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 704(CX), Y7
-	VMOVDQU 736(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 2 to 6 outputs
+	// Load and process 64 bytes from input 2 to 2 outputs
 	VMOVDQU (DI), Y9
-	ADDQ    $0x20, DI
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 768(CX), Y7
-	VMOVDQU 800(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 832(CX), Y7
-	VMOVDQU 864(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 896(CX), Y7
-	VMOVDQU 928(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 960(CX), Y7
-	VMOVDQU 992(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1024(CX), Y7
-	VMOVDQU 1056(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1088(CX), Y7
-	VMOVDQU 1120(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 3 to 6 outputs
+	// Load and process 64 bytes from input 3 to 2 outputs
 	VMOVDQU (R8), Y9
-	ADDQ    $0x20, R8
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1152(CX), Y7
-	VMOVDQU 1184(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1216(CX), Y7
-	VMOVDQU 1248(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1280(CX), Y7
-	VMOVDQU 1312(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 1344(CX), Y7
-	VMOVDQU 1376(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1408(CX), Y7
-	VMOVDQU 1440(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1472(CX), Y7
-	VMOVDQU 1504(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 4 to 6 outputs
+	// Load and process 64 bytes from input 4 to 2 outputs
 	VMOVDQU (DX), Y9
-	ADDQ    $0x20, DX
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1536(CX), Y7
-	VMOVDQU 1568(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1600(CX), Y7
-	VMOVDQU 1632(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1664(CX), Y7
-	VMOVDQU 1696(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 1728(CX), Y7
-	VMOVDQU 1760(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1792(CX), Y7
-	VMOVDQU 1824(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1856(CX), Y7
-	VMOVDQU 1888(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Store 6 outputs
+	// Store 2 outputs
 	VMOVDQU Y0, (R10)
-	ADDQ    $0x20, R10
-	VMOVDQU Y1, (R11)
-	ADDQ    $0x20, R11
-	VMOVDQU Y2, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y3, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y4, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y5, (R9)
-	ADDQ    $0x20, R9
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y2, (R9)
+	VMOVDQU Y3, 32(R9)
+	ADDQ    $0x40, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_5x6_loop
+	JNZ  mulAvxTwo_5x2_64_loop
 	VZEROUPPER
 
-mulAvxTwo_5x6_end:
+mulAvxTwo_5x2_64_end:
 	RET
 
-// func mulAvxTwo_5x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x7(SB), NOSPLIT, $8-88
+// func mulGFNI_5x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x2_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R8
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+	ADDQ R10, R8
+
+	// Add start offset to input
+	ADDQ R10, DX
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, CX
+
+mulGFNI_5x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z12
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z12, Z10
+	VGF2P8AFFINEQB $0x00, Z1, Z12, Z11
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z12
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z3, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z12
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z5, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z12
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z7, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (CX), Z12
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z8, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z9, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Store 2 outputs
+	VMOVDQU64 Z10, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z11, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x2_64_loop
+	VZEROUPPER
+
+mulGFNI_5x2_64_end:
+	RET
+
+// func mulGFNI_5x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R8
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+	ADDQ R10, R8
+
+	// Add start offset to input
+	ADDQ R10, DX
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, CX
+
+mulGFNI_5x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (R9), Z10
+	VMOVDQU64 (R8), Z11
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z12
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z1, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z12
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z3, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z12
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z5, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z12
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z7, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (CX), Z12
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z8, Z12, Z13
+	VXORPD         Z10, Z13, Z10
+	VGF2P8AFFINEQB $0x00, Z9, Z12, Z13
+	VXORPD         Z11, Z13, Z11
+
+	// Store 2 outputs
+	VMOVDQU64 Z10, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z11, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x2_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_5x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x2Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 82 YMM used
+	// Full registers estimated 27 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x7_end
+	JZ    mulAvxTwo_5x2Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -10114,685 +24663,331 @@ TEXT Β·mulAvxTwo_5x7(SB), NOSPLIT, $8-88
 	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
 	MOVQ  (R9), R10
-	MOVQ  24(R9), R11
-	MOVQ  48(R9), R12
-	MOVQ  72(R9), R13
-	MOVQ  96(R9), R14
-	MOVQ  120(R9), R15
-	MOVQ  144(R9), R9
-	MOVQ  start+72(FP), BP
+	MOVQ  24(R9), R9
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to output
-	ADDQ BP, R10
-	ADDQ BP, R11
-	ADDQ BP, R12
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R9
+	ADDQ R11, R10
+	ADDQ R11, R9
 
 	// Add start offset to input
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, DX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X7
-	VPBROADCASTB X7, Y7
-
-mulAvxTwo_5x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X2
+	VPBROADCASTB X2, Y2
 
-	// Load and process 32 bytes from input 0 to 7 outputs
-	VMOVDQU (BX), Y10
+mulAvxTwo_5x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU (CX), Y8
-	VMOVDQU 32(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 64(CX), Y8
-	VMOVDQU 96(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 128(CX), Y8
-	VMOVDQU 160(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 192(CX), Y8
-	VMOVDQU 224(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 320(CX), Y8
-	VMOVDQU 352(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 384(CX), Y8
-	VMOVDQU 416(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 1 to 7 outputs
-	VMOVDQU (SI), Y10
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 448(CX), Y8
-	VMOVDQU 480(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 512(CX), Y8
-	VMOVDQU 544(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 576(CX), Y8
-	VMOVDQU 608(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 640(CX), Y8
-	VMOVDQU 672(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 704(CX), Y8
-	VMOVDQU 736(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 768(CX), Y8
-	VMOVDQU 800(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 832(CX), Y8
-	VMOVDQU 864(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 2 to 7 outputs
-	VMOVDQU (DI), Y10
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 896(CX), Y8
-	VMOVDQU 928(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 960(CX), Y8
-	VMOVDQU 992(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1024(CX), Y8
-	VMOVDQU 1056(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1088(CX), Y8
-	VMOVDQU 1120(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1152(CX), Y8
-	VMOVDQU 1184(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1216(CX), Y8
-	VMOVDQU 1248(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1280(CX), Y8
-	VMOVDQU 1312(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 3 to 7 outputs
-	VMOVDQU (R8), Y10
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1344(CX), Y8
-	VMOVDQU 1376(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1408(CX), Y8
-	VMOVDQU 1440(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1472(CX), Y8
-	VMOVDQU 1504(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1536(CX), Y8
-	VMOVDQU 1568(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1600(CX), Y8
-	VMOVDQU 1632(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1664(CX), Y8
-	VMOVDQU 1696(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1728(CX), Y8
-	VMOVDQU 1760(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 4 to 7 outputs
-	VMOVDQU (DX), Y10
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (DX), Y5
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1792(CX), Y8
-	VMOVDQU 1824(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1856(CX), Y8
-	VMOVDQU 1888(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1920(CX), Y8
-	VMOVDQU 1952(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1984(CX), Y8
-	VMOVDQU 2016(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2048(CX), Y8
-	VMOVDQU 2080(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2112(CX), Y8
-	VMOVDQU 2144(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2176(CX), Y8
-	VMOVDQU 2208(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Store 7 outputs
+	// Store 2 outputs
 	VMOVDQU Y0, (R10)
 	ADDQ    $0x20, R10
-	VMOVDQU Y1, (R11)
-	ADDQ    $0x20, R11
-	VMOVDQU Y2, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y3, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y4, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y5, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y6, (R9)
+	VMOVDQU Y1, (R9)
 	ADDQ    $0x20, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_5x7_loop
+	JNZ  mulAvxTwo_5x2Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_5x7_end:
+mulAvxTwo_5x2Xor_end:
 	RET
 
-// func mulAvxTwo_5x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x8(SB), NOSPLIT, $8-88
+// func mulAvxTwo_5x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x2_64Xor(SB), $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 93 YMM used
+	// Full registers estimated 49 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x8_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), AX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  (R8), R9
-	MOVQ  24(R8), R10
-	MOVQ  48(R8), R11
-	MOVQ  72(R8), R12
-	MOVQ  96(R8), R13
-	MOVQ  120(R8), R14
-	MOVQ  144(R8), R15
-	MOVQ  168(R8), R8
-	MOVQ  start+72(FP), BP
+	JZ    mulAvxTwo_5x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R9
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to output
-	ADDQ BP, R9
-	ADDQ BP, R10
-	ADDQ BP, R11
-	ADDQ BP, R12
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R8
+	ADDQ R11, R10
+	ADDQ R11, R9
 
 	// Add start offset to input
-	ADDQ         BP, DX
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, AX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X8
-	VPBROADCASTB X8, Y8
-	MOVQ         n+80(FP), BP
-	SHRQ         $0x05, BP
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X4
+	VPBROADCASTB X4, Y4
 
-mulAvxTwo_5x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+mulAvxTwo_5x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R10), Y0
+	VMOVDQU 32(R10), Y1
+	VMOVDQU (R9), Y2
+	VMOVDQU 32(R9), Y3
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (DX), Y11
-	ADDQ    $0x20, DX
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 1 to 8 outputs
-	VMOVDQU (BX), Y11
-	ADDQ    $0x20, BX
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 512(CX), Y9
-	VMOVDQU 544(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 576(CX), Y9
-	VMOVDQU 608(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y9
-	VMOVDQU 672(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 704(CX), Y9
-	VMOVDQU 736(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 768(CX), Y9
-	VMOVDQU 800(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 832(CX), Y9
-	VMOVDQU 864(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 896(CX), Y9
-	VMOVDQU 928(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 960(CX), Y9
-	VMOVDQU 992(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 2 to 8 outputs
-	VMOVDQU (SI), Y11
-	ADDQ    $0x20, SI
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1024(CX), Y9
-	VMOVDQU 1056(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1088(CX), Y9
-	VMOVDQU 1120(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1152(CX), Y9
-	VMOVDQU 1184(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1216(CX), Y9
-	VMOVDQU 1248(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1280(CX), Y9
-	VMOVDQU 1312(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1344(CX), Y9
-	VMOVDQU 1376(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1408(CX), Y9
-	VMOVDQU 1440(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1472(CX), Y9
-	VMOVDQU 1504(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 3 to 8 outputs
-	VMOVDQU (DI), Y11
-	ADDQ    $0x20, DI
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1536(CX), Y9
-	VMOVDQU 1568(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1600(CX), Y9
-	VMOVDQU 1632(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1664(CX), Y9
-	VMOVDQU 1696(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1728(CX), Y9
-	VMOVDQU 1760(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1792(CX), Y9
-	VMOVDQU 1824(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1856(CX), Y9
-	VMOVDQU 1888(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1920(CX), Y9
-	VMOVDQU 1952(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1984(CX), Y9
-	VMOVDQU 2016(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 4 to 8 outputs
-	VMOVDQU (AX), Y11
-	ADDQ    $0x20, AX
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 2048(CX), Y9
-	VMOVDQU 2080(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2112(CX), Y9
-	VMOVDQU 2144(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2176(CX), Y9
-	VMOVDQU 2208(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2240(CX), Y9
-	VMOVDQU 2272(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2304(CX), Y9
-	VMOVDQU 2336(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2368(CX), Y9
-	VMOVDQU 2400(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2432(CX), Y9
-	VMOVDQU 2464(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 2496(CX), Y9
-	VMOVDQU 2528(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Store 8 outputs
-	VMOVDQU Y0, (R9)
-	ADDQ    $0x20, R9
-	VMOVDQU Y1, (R10)
-	ADDQ    $0x20, R10
-	VMOVDQU Y2, (R11)
-	ADDQ    $0x20, R11
-	VMOVDQU Y3, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y4, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y5, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y6, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y7, (R8)
-	ADDQ    $0x20, R8
+	// Store 2 outputs
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y2, (R9)
+	VMOVDQU Y3, 32(R9)
+	ADDQ    $0x40, R9
 
 	// Prepare for next loop
-	DECQ BP
-	JNZ  mulAvxTwo_5x8_loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x2_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_5x8_end:
+mulAvxTwo_5x2_64Xor_end:
 	RET
 
-// func mulAvxTwo_5x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x9(SB), NOSPLIT, $0-88
+// func mulAvxTwo_5x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x3(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 104 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x9_end
+	JZ    mulAvxTwo_5x3_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -10800,375 +24995,164 @@ TEXT Β·mulAvxTwo_5x9(SB), NOSPLIT, $0-88
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
-	MOVQ  start+72(FP), R10
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R9
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
 
 	// Add start offset to input
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DI
-	ADDQ         R10, R8
-	ADDQ         R10, DX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X9
-	VPBROADCASTB X9, Y9
-
-mulAvxTwo_5x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X3
+	VPBROADCASTB X3, Y3
 
-	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (BX), Y12
+mulAvxTwo_5x3_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 512(CX), Y10
-	VMOVDQU 544(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y0
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y1
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y2
 
-	// Load and process 32 bytes from input 1 to 9 outputs
-	VMOVDQU (SI), Y12
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 576(CX), Y10
-	VMOVDQU 608(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 640(CX), Y10
-	VMOVDQU 672(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 704(CX), Y10
-	VMOVDQU 736(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 768(CX), Y10
-	VMOVDQU 800(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 832(CX), Y10
-	VMOVDQU 864(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 896(CX), Y10
-	VMOVDQU 928(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 960(CX), Y10
-	VMOVDQU 992(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1024(CX), Y10
-	VMOVDQU 1056(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1088(CX), Y10
-	VMOVDQU 1120(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 2 to 9 outputs
-	VMOVDQU (DI), Y12
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1152(CX), Y10
-	VMOVDQU 1184(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1216(CX), Y10
-	VMOVDQU 1248(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1280(CX), Y10
-	VMOVDQU 1312(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1344(CX), Y10
-	VMOVDQU 1376(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1408(CX), Y10
-	VMOVDQU 1440(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 1472(CX), Y10
-	VMOVDQU 1504(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 1536(CX), Y10
-	VMOVDQU 1568(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1600(CX), Y10
-	VMOVDQU 1632(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1664(CX), Y10
-	VMOVDQU 1696(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 3 to 9 outputs
-	VMOVDQU (R8), Y12
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1728(CX), Y10
-	VMOVDQU 1760(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1792(CX), Y10
-	VMOVDQU 1824(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1856(CX), Y10
-	VMOVDQU 1888(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1920(CX), Y10
-	VMOVDQU 1952(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1984(CX), Y10
-	VMOVDQU 2016(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2048(CX), Y10
-	VMOVDQU 2080(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2112(CX), Y10
-	VMOVDQU 2144(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2176(CX), Y10
-	VMOVDQU 2208(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2240(CX), Y10
-	VMOVDQU 2272(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 4 to 9 outputs
-	VMOVDQU (DX), Y12
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (DX), Y6
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 2304(CX), Y10
-	VMOVDQU 2336(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2368(CX), Y10
-	VMOVDQU 2400(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 2432(CX), Y10
-	VMOVDQU 2464(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 2496(CX), Y10
-	VMOVDQU 2528(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 2560(CX), Y10
-	VMOVDQU 2592(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2624(CX), Y10
-	VMOVDQU 2656(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2688(CX), Y10
-	VMOVDQU 2720(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2752(CX), Y10
-	VMOVDQU 2784(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2816(CX), Y10
-	VMOVDQU 2848(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Store 9 outputs
-	MOVQ    (R9), R11
-	VMOVDQU Y0, (R11)(R10*1)
-	MOVQ    24(R9), R11
-	VMOVDQU Y1, (R11)(R10*1)
-	MOVQ    48(R9), R11
-	VMOVDQU Y2, (R11)(R10*1)
-	MOVQ    72(R9), R11
-	VMOVDQU Y3, (R11)(R10*1)
-	MOVQ    96(R9), R11
-	VMOVDQU Y4, (R11)(R10*1)
-	MOVQ    120(R9), R11
-	VMOVDQU Y5, (R11)(R10*1)
-	MOVQ    144(R9), R11
-	VMOVDQU Y6, (R11)(R10*1)
-	MOVQ    168(R9), R11
-	VMOVDQU Y7, (R11)(R10*1)
-	MOVQ    192(R9), R11
-	VMOVDQU Y8, (R11)(R10*1)
+	// Store 3 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
 
 	// Prepare for next loop
-	ADDQ $0x20, R10
 	DECQ AX
-	JNZ  mulAvxTwo_5x9_loop
+	JNZ  mulAvxTwo_5x3_loop
 	VZEROUPPER
 
-mulAvxTwo_5x9_end:
+mulAvxTwo_5x3_end:
 	RET
 
-// func mulAvxTwo_5x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_5x10(SB), NOSPLIT, $0-88
+// func mulAvxTwo_5x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 115 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 70 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x10_end
+	JZ    mulAvxTwo_5x3_64_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -11176,1197 +25160,527 @@ TEXT Β·mulAvxTwo_5x10(SB), NOSPLIT, $0-88
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
-	MOVQ  start+72(FP), R10
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R9
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
 
 	// Add start offset to input
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DI
-	ADDQ         R10, R8
-	ADDQ         R10, DX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X10
-	VPBROADCASTB X10, Y10
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X6
+	VPBROADCASTB X6, Y6
 
-mulAvxTwo_5x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+mulAvxTwo_5x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
-	// Load and process 32 bytes from input 0 to 10 outputs
-	VMOVDQU (BX), Y13
-	ADDQ    $0x20, BX
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU (CX), Y11
-	VMOVDQU 32(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 64(CX), Y11
-	VMOVDQU 96(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 128(CX), Y11
-	VMOVDQU 160(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 192(CX), Y11
-	VMOVDQU 224(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 256(CX), Y11
-	VMOVDQU 288(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 320(CX), Y11
-	VMOVDQU 352(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 384(CX), Y11
-	VMOVDQU 416(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 448(CX), Y11
-	VMOVDQU 480(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 512(CX), Y11
-	VMOVDQU 544(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 576(CX), Y11
-	VMOVDQU 608(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Load and process 32 bytes from input 1 to 10 outputs
-	VMOVDQU (SI), Y13
-	ADDQ    $0x20, SI
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 640(CX), Y11
-	VMOVDQU 672(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 704(CX), Y11
-	VMOVDQU 736(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 768(CX), Y11
-	VMOVDQU 800(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 832(CX), Y11
-	VMOVDQU 864(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 896(CX), Y11
-	VMOVDQU 928(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 960(CX), Y11
-	VMOVDQU 992(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1024(CX), Y11
-	VMOVDQU 1056(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1088(CX), Y11
-	VMOVDQU 1120(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1152(CX), Y11
-	VMOVDQU 1184(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1216(CX), Y11
-	VMOVDQU 1248(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
-
-	// Load and process 32 bytes from input 2 to 10 outputs
-	VMOVDQU (DI), Y13
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 1280(CX), Y11
-	VMOVDQU 1312(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 1344(CX), Y11
-	VMOVDQU 1376(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 1408(CX), Y11
-	VMOVDQU 1440(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 1472(CX), Y11
-	VMOVDQU 1504(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 1536(CX), Y11
-	VMOVDQU 1568(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 1600(CX), Y11
-	VMOVDQU 1632(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1664(CX), Y11
-	VMOVDQU 1696(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1728(CX), Y11
-	VMOVDQU 1760(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1792(CX), Y11
-	VMOVDQU 1824(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1856(CX), Y11
-	VMOVDQU 1888(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Load and process 32 bytes from input 3 to 10 outputs
-	VMOVDQU (R8), Y13
-	ADDQ    $0x20, R8
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 1920(CX), Y11
-	VMOVDQU 1952(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 1984(CX), Y11
-	VMOVDQU 2016(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 2048(CX), Y11
-	VMOVDQU 2080(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 2112(CX), Y11
-	VMOVDQU 2144(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 2176(CX), Y11
-	VMOVDQU 2208(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 2240(CX), Y11
-	VMOVDQU 2272(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 2304(CX), Y11
-	VMOVDQU 2336(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 2368(CX), Y11
-	VMOVDQU 2400(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 2432(CX), Y11
-	VMOVDQU 2464(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 2496(CX), Y11
-	VMOVDQU 2528(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Load and process 32 bytes from input 4 to 10 outputs
-	VMOVDQU (DX), Y13
-	ADDQ    $0x20, DX
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 2560(CX), Y11
-	VMOVDQU 2592(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 2624(CX), Y11
-	VMOVDQU 2656(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 2688(CX), Y11
-	VMOVDQU 2720(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 2752(CX), Y11
-	VMOVDQU 2784(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 2816(CX), Y11
-	VMOVDQU 2848(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 2880(CX), Y11
-	VMOVDQU 2912(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 2944(CX), Y11
-	VMOVDQU 2976(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 3008(CX), Y11
-	VMOVDQU 3040(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 3072(CX), Y11
-	VMOVDQU 3104(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 3136(CX), Y11
-	VMOVDQU 3168(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Store 10 outputs
-	MOVQ    (R9), R11
-	VMOVDQU Y0, (R11)(R10*1)
-	MOVQ    24(R9), R11
-	VMOVDQU Y1, (R11)(R10*1)
-	MOVQ    48(R9), R11
-	VMOVDQU Y2, (R11)(R10*1)
-	MOVQ    72(R9), R11
-	VMOVDQU Y3, (R11)(R10*1)
-	MOVQ    96(R9), R11
-	VMOVDQU Y4, (R11)(R10*1)
-	MOVQ    120(R9), R11
-	VMOVDQU Y5, (R11)(R10*1)
-	MOVQ    144(R9), R11
-	VMOVDQU Y6, (R11)(R10*1)
-	MOVQ    168(R9), R11
-	VMOVDQU Y7, (R11)(R10*1)
-	MOVQ    192(R9), R11
-	VMOVDQU Y8, (R11)(R10*1)
-	MOVQ    216(R9), R11
-	VMOVDQU Y9, (R11)(R10*1)
+	// Store 3 outputs
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y2, (R11)
+	VMOVDQU Y3, 32(R11)
+	ADDQ    $0x40, R11
+	VMOVDQU Y4, (R9)
+	VMOVDQU Y5, 32(R9)
+	ADDQ    $0x40, R9
 
 	// Prepare for next loop
-	ADDQ $0x20, R10
 	DECQ AX
-	JNZ  mulAvxTwo_5x10_loop
+	JNZ  mulAvxTwo_5x3_64_loop
 	VZEROUPPER
 
-mulAvxTwo_5x10_end:
+mulAvxTwo_5x3_64_end:
 	RET
 
-// func mulAvxTwo_6x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x1(SB), NOSPLIT, $0-88
+// func mulGFNI_5x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x3_64(SB), $0-88
 	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 16 YMM used
-	MOVQ    n+80(FP), AX
-	MOVQ    matrix_base+0(FP), CX
-	SHRQ    $0x05, AX
-	TESTQ   AX, AX
-	JZ      mulAvxTwo_6x1_end
-	VMOVDQU (CX), Y0
-	VMOVDQU 32(CX), Y1
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
-	MOVQ    in_base+24(FP), CX
-	MOVQ    (CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    48(CX), SI
-	MOVQ    72(CX), DI
-	MOVQ    96(CX), R8
-	MOVQ    120(CX), CX
-	MOVQ    out_base+48(FP), R9
-	MOVQ    (R9), R9
-	MOVQ    start+72(FP), R10
+	// Full registers estimated 20 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R8
+	MOVQ            start+72(FP), R11
 
 	// Add start offset to output
-	ADDQ R10, R9
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, R8
 
 	// Add start offset to input
-	ADDQ         R10, DX
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DI
-	ADDQ         R10, R8
-	ADDQ         R10, CX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X13
-	VPBROADCASTB X13, Y13
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, CX
 
-mulAvxTwo_6x1_loop:
-	// Clear 1 outputs
-	VPXOR Y12, Y12, Y12
+mulGFNI_5x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z18
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z18, Z15
+	VGF2P8AFFINEQB $0x00, Z1, Z18, Z16
+	VGF2P8AFFINEQB $0x00, Z2, Z18, Z17
 
-	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y14
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y0, Y14
-	VPSHUFB Y15, Y1, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z18
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z4, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z5, Z18, Z19
+	VXORPD         Z17, Z19, Z17
 
-	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y14
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y2, Y14
-	VPSHUFB Y15, Y3, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z18
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z7, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z8, Z18, Z19
+	VXORPD         Z17, Z19, Z17
 
-	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y14
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y4, Y14
-	VPSHUFB Y15, Y5, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z18
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z10, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z11, Z18, Z19
+	VXORPD         Z17, Z19, Z17
 
-	// Load and process 32 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y14
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y6, Y14
-	VPSHUFB Y15, Y7, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (CX), Z18
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z12, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z13, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z14, Z18, Z19
+	VXORPD         Z17, Z19, Z17
 
-	// Load and process 32 bytes from input 4 to 1 outputs
-	VMOVDQU (R8), Y14
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y8, Y14
-	VPSHUFB Y15, Y9, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
+	// Store 3 outputs
+	VMOVDQU64 Z15, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z16, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z17, (R8)
+	ADDQ      $0x40, R8
 
-	// Load and process 32 bytes from input 5 to 1 outputs
-	VMOVDQU (CX), Y14
-	ADDQ    $0x20, CX
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y10, Y14
-	VPSHUFB Y15, Y11, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x3_64_loop
+	VZEROUPPER
 
-	// Store 1 outputs
-	VMOVDQU Y12, (R9)
-	ADDQ    $0x20, R9
+mulGFNI_5x3_64_end:
+	RET
+
+// func mulGFNI_5x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x3_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R8
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, R8
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, CX
+
+mulGFNI_5x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (R9), Z15
+	VMOVDQU64 (R10), Z16
+	VMOVDQU64 (R8), Z17
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z18
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z1, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z2, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z18
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z4, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z5, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z18
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z7, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z8, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z18
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z10, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z11, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (CX), Z18
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z12, Z18, Z19
+	VXORPD         Z15, Z19, Z15
+	VGF2P8AFFINEQB $0x00, Z13, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z14, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Store 3 outputs
+	VMOVDQU64 Z15, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z16, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z17, (R8)
+	ADDQ      $0x40, R8
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_6x1_loop
+	JNZ  mulGFNI_5x3_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_6x1_end:
+mulGFNI_5x3_64Xor_end:
 	RET
 
-// func mulAvxTwo_6x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x1_64(SB), $0-88
+// func mulAvxTwo_5x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x3Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 16 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_6x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), AX
-	MOVQ  out_base+48(FP), R9
+	JZ    mulAvxTwo_5x3Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
-	MOVQ  start+72(FP), R10
-
-	// Add start offset to input
-	ADDQ         R10, DX
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DI
-	ADDQ         R10, R8
-	ADDQ         R10, AX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R11
-	SHRQ         $0x06, R11
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R9
+	MOVQ  start+72(FP), R12
 
-mulAvxTwo_6x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+	// Add start offset to output
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
 
-	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X3
+	VPBROADCASTB X3, Y3
 
-	// Load and process 64 bytes from input 1 to 1 outputs
+mulAvxTwo_5x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
 	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
+	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 64 bytes from input 2 to 1 outputs
+	// Load and process 32 bytes from input 1 to 3 outputs
 	VMOVDQU (SI), Y6
-	VMOVDQU 32(SI), Y5
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y6
-	VMOVDQU 32(DI), Y5
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 4 to 1 outputs
-	VMOVDQU (R8), Y6
-	VMOVDQU 32(R8), Y5
-	ADDQ    $0x40, R8
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 5 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Store 1 outputs
-	MOVQ    (R9), R12
-	VMOVDQU Y0, (R12)(R10*1)
-	VMOVDQU Y1, 32(R12)(R10*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, R10
-	DECQ R11
-	JNZ  mulAvxTwo_6x1_64_loop
-	VZEROUPPER
-
-mulAvxTwo_6x1_64_end:
-	RET
-
-// func mulAvxTwo_6x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x2(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 31 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_6x2_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), DX
-	MOVQ  out_base+48(FP), R10
-	MOVQ  (R10), R11
-	MOVQ  24(R10), R10
-	MOVQ  start+72(FP), R12
-
-	// Add start offset to output
-	ADDQ R12, R11
-	ADDQ R12, R10
-
-	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X2
-	VPBROADCASTB X2, Y2
-
-mulAvxTwo_6x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
-	// Load and process 32 bytes from input 0 to 2 outputs
-	VMOVDQU (BX), Y5
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 1 to 2 outputs
-	VMOVDQU (SI), Y5
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 2 to 2 outputs
-	VMOVDQU (DI), Y5
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 3 to 2 outputs
-	VMOVDQU (R8), Y5
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 384(CX), Y3
-	VMOVDQU 416(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 448(CX), Y3
-	VMOVDQU 480(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 4 to 2 outputs
-	VMOVDQU (R9), Y5
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 512(CX), Y3
-	VMOVDQU 544(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 576(CX), Y3
-	VMOVDQU 608(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 5 to 2 outputs
-	VMOVDQU (DX), Y5
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 640(CX), Y3
-	VMOVDQU 672(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 704(CX), Y3
-	VMOVDQU 736(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Store 2 outputs
-	VMOVDQU Y0, (R11)
-	ADDQ    $0x20, R11
-	VMOVDQU Y1, (R10)
-	ADDQ    $0x20, R10
-
-	// Prepare for next loop
-	DECQ AX
-	JNZ  mulAvxTwo_6x2_loop
-	VZEROUPPER
-
-mulAvxTwo_6x2_end:
-	RET
-
-// func mulAvxTwo_6x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x2_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 31 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_6x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), AX
-	MOVQ  out_base+48(FP), R9
-	MOVQ  out_base+48(FP), R9
-	MOVQ  start+72(FP), R10
-
-	// Add start offset to input
-	ADDQ         R10, DX
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DI
-	ADDQ         R10, R8
-	ADDQ         R10, AX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X4
-	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R11
-	SHRQ         $0x06, R11
-
-mulAvxTwo_6x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
-	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
-
-	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
-
-	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
-
-	// Load and process 64 bytes from input 3 to 2 outputs
-	VMOVDQU (DI), Y9
-	VMOVDQU 32(DI), Y11
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
-
-	// Load and process 64 bytes from input 4 to 2 outputs
-	VMOVDQU (R8), Y9
-	VMOVDQU 32(R8), Y11
-	ADDQ    $0x40, R8
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
-
-	// Load and process 64 bytes from input 5 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
-
-	// Store 2 outputs
-	MOVQ    (R9), R12
-	VMOVDQU Y0, (R12)(R10*1)
-	VMOVDQU Y1, 32(R12)(R10*1)
-	MOVQ    24(R9), R12
-	VMOVDQU Y2, (R12)(R10*1)
-	VMOVDQU Y3, 32(R12)(R10*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, R10
-	DECQ R11
-	JNZ  mulAvxTwo_6x2_64_loop
-	VZEROUPPER
-
-mulAvxTwo_6x2_64_end:
-	RET
-
-// func mulAvxTwo_6x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x3(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 44 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_6x3_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), DX
-	MOVQ  out_base+48(FP), R10
-	MOVQ  (R10), R11
-	MOVQ  24(R10), R12
-	MOVQ  48(R10), R10
-	MOVQ  start+72(FP), R13
-
-	// Add start offset to output
-	ADDQ R13, R11
-	ADDQ R13, R12
-	ADDQ R13, R10
-
-	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X3
-	VPBROADCASTB X3, Y3
-
-mulAvxTwo_6x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-
-	// Load and process 32 bytes from input 0 to 3 outputs
-	VMOVDQU (BX), Y6
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU (CX), Y4
-	VMOVDQU 32(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 64(CX), Y4
-	VMOVDQU 96(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
-
-	// Load and process 32 bytes from input 1 to 3 outputs
-	VMOVDQU (SI), Y6
-	ADDQ    $0x20, SI
+	ADDQ    $0x20, SI
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -12374,20 +25688,17 @@ mulAvxTwo_6x3_loop:
 	VMOVDQU 224(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 256(CX), Y4
 	VMOVDQU 288(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 320(CX), Y4
 	VMOVDQU 352(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 2 to 3 outputs
 	VMOVDQU (DI), Y6
@@ -12399,20 +25710,17 @@ mulAvxTwo_6x3_loop:
 	VMOVDQU 416(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 448(CX), Y4
 	VMOVDQU 480(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 512(CX), Y4
 	VMOVDQU 544(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 3 to 3 outputs
 	VMOVDQU (R8), Y6
@@ -12424,24 +25732,21 @@ mulAvxTwo_6x3_loop:
 	VMOVDQU 608(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 640(CX), Y4
 	VMOVDQU 672(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 704(CX), Y4
 	VMOVDQU 736(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 4 to 3 outputs
-	VMOVDQU (R9), Y6
-	ADDQ    $0x20, R9
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -12449,110 +25754,86 @@ mulAvxTwo_6x3_loop:
 	VMOVDQU 800(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 832(CX), Y4
 	VMOVDQU 864(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 896(CX), Y4
 	VMOVDQU 928(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
-
-	// Load and process 32 bytes from input 5 to 3 outputs
-	VMOVDQU (DX), Y6
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 960(CX), Y4
-	VMOVDQU 992(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1024(CX), Y4
-	VMOVDQU 1056(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1088(CX), Y4
-	VMOVDQU 1120(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Store 3 outputs
-	VMOVDQU Y0, (R11)
-	ADDQ    $0x20, R11
-	VMOVDQU Y1, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y2, (R10)
+	VMOVDQU Y0, (R10)
 	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_6x3_loop
+	JNZ  mulAvxTwo_5x3Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_6x3_end:
+mulAvxTwo_5x3Xor_end:
 	RET
 
-// func mulAvxTwo_6x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x3_64(SB), $0-88
+// func mulAvxTwo_5x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x3_64Xor(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 44 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 70 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_6x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), AX
+	JZ    mulAvxTwo_5x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
 	MOVQ  out_base+48(FP), R9
-	MOVQ  start+72(FP), R10
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R9
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
 
 	// Add start offset to input
-	ADDQ         R10, DX
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DI
-	ADDQ         R10, R8
-	ADDQ         R10, AX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X6
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R11
-	SHRQ         $0x06, R11
 
-mulAvxTwo_6x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+mulAvxTwo_5x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R10), Y0
+	VMOVDQU 32(R10), Y1
+	VMOVDQU (R11), Y2
+	VMOVDQU 32(R11), Y3
+	VMOVDQU (R9), Y4
+	VMOVDQU 32(R9), Y5
 
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -12565,35 +25846,29 @@ mulAvxTwo_6x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -12606,35 +25881,29 @@ mulAvxTwo_6x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 2 to 3 outputs
-	VMOVDQU (SI), Y11
-	VMOVDQU 32(SI), Y13
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -12647,35 +25916,29 @@ mulAvxTwo_6x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 3 to 3 outputs
-	VMOVDQU (DI), Y11
-	VMOVDQU 32(DI), Y13
-	ADDQ    $0x40, DI
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -12688,35 +25951,29 @@ mulAvxTwo_6x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 640(CX), Y7
 	VMOVDQU 672(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 704(CX), Y7
 	VMOVDQU 736(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 4 to 3 outputs
-	VMOVDQU (R8), Y11
-	VMOVDQU 32(R8), Y13
-	ADDQ    $0x40, R8
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -12729,141 +25986,85 @@ mulAvxTwo_6x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 832(CX), Y7
 	VMOVDQU 864(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 896(CX), Y7
 	VMOVDQU 928(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
-
-	// Load and process 64 bytes from input 5 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 960(CX), Y7
-	VMOVDQU 992(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1024(CX), Y7
-	VMOVDQU 1056(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1088(CX), Y7
-	VMOVDQU 1120(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Store 3 outputs
-	MOVQ    (R9), R12
-	VMOVDQU Y0, (R12)(R10*1)
-	VMOVDQU Y1, 32(R12)(R10*1)
-	MOVQ    24(R9), R12
-	VMOVDQU Y2, (R12)(R10*1)
-	VMOVDQU Y3, 32(R12)(R10*1)
-	MOVQ    48(R9), R12
-	VMOVDQU Y4, (R12)(R10*1)
-	VMOVDQU Y5, 32(R12)(R10*1)
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y2, (R11)
+	VMOVDQU Y3, 32(R11)
+	ADDQ    $0x40, R11
+	VMOVDQU Y4, (R9)
+	VMOVDQU Y5, 32(R9)
+	ADDQ    $0x40, R9
 
 	// Prepare for next loop
-	ADDQ $0x40, R10
-	DECQ R11
-	JNZ  mulAvxTwo_6x3_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x3_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_6x3_64_end:
+mulAvxTwo_5x3_64Xor_end:
 	RET
 
-// func mulAvxTwo_6x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x4(SB), NOSPLIT, $0-88
+// func mulAvxTwo_5x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x4(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 57 YMM used
+	// Full registers estimated 49 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_6x4_end
+	JZ    mulAvxTwo_5x4_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), DX
-	MOVQ  out_base+48(FP), R10
-	MOVQ  (R10), R11
-	MOVQ  24(R10), R12
-	MOVQ  48(R10), R13
-	MOVQ  72(R10), R10
-	MOVQ  start+72(FP), R14
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R9
+	MOVQ  start+72(FP), R13
 
 	// Add start offset to output
-	ADDQ R14, R11
-	ADDQ R14, R12
-	ADDQ R14, R13
-	ADDQ R14, R10
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R9
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X4
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X4
 	VPBROADCASTB X4, Y4
 
-mulAvxTwo_6x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
+mulAvxTwo_5x4_loop:
 	// Load and process 32 bytes from input 0 to 4 outputs
 	VMOVDQU (BX), Y7
 	ADDQ    $0x20, BX
@@ -12874,26 +26075,22 @@ mulAvxTwo_6x4_loop:
 	VMOVDQU 32(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	VPXOR   Y5, Y6, Y0
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y5, Y6, Y1
 	VMOVDQU 128(CX), Y5
 	VMOVDQU 160(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	VPXOR   Y5, Y6, Y2
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPXOR   Y5, Y6, Y3
 
 	// Load and process 32 bytes from input 1 to 4 outputs
 	VMOVDQU (SI), Y7
@@ -12905,26 +26102,22 @@ mulAvxTwo_6x4_loop:
 	VMOVDQU 288(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	XOR3WAY( $0x00, Y5, Y6, Y0)
 	VMOVDQU 320(CX), Y5
 	VMOVDQU 352(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 	VMOVDQU 384(CX), Y5
 	VMOVDQU 416(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	XOR3WAY( $0x00, Y5, Y6, Y2)
 	VMOVDQU 448(CX), Y5
 	VMOVDQU 480(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
 	// Load and process 32 bytes from input 2 to 4 outputs
 	VMOVDQU (DI), Y7
@@ -12936,26 +26129,22 @@ mulAvxTwo_6x4_loop:
 	VMOVDQU 544(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	XOR3WAY( $0x00, Y5, Y6, Y0)
 	VMOVDQU 576(CX), Y5
 	VMOVDQU 608(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 	VMOVDQU 640(CX), Y5
 	VMOVDQU 672(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	XOR3WAY( $0x00, Y5, Y6, Y2)
 	VMOVDQU 704(CX), Y5
 	VMOVDQU 736(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
 	// Load and process 32 bytes from input 3 to 4 outputs
 	VMOVDQU (R8), Y7
@@ -12967,30 +26156,26 @@ mulAvxTwo_6x4_loop:
 	VMOVDQU 800(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	XOR3WAY( $0x00, Y5, Y6, Y0)
 	VMOVDQU 832(CX), Y5
 	VMOVDQU 864(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 	VMOVDQU 896(CX), Y5
 	VMOVDQU 928(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	XOR3WAY( $0x00, Y5, Y6, Y2)
 	VMOVDQU 960(CX), Y5
 	VMOVDQU 992(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
 	// Load and process 32 bytes from input 4 to 4 outputs
-	VMOVDQU (R9), Y7
-	ADDQ    $0x20, R9
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y7, Y8
 	VPAND   Y4, Y7, Y7
 	VPAND   Y4, Y8, Y8
@@ -12998,128 +26183,558 @@ mulAvxTwo_6x4_loop:
 	VMOVDQU 1056(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	XOR3WAY( $0x00, Y5, Y6, Y0)
 	VMOVDQU 1088(CX), Y5
 	VMOVDQU 1120(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 	VMOVDQU 1152(CX), Y5
 	VMOVDQU 1184(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	XOR3WAY( $0x00, Y5, Y6, Y2)
 	VMOVDQU 1216(CX), Y5
 	VMOVDQU 1248(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 5 to 4 outputs
+	// Store 4 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x4_loop
+	VZEROUPPER
+
+mulAvxTwo_5x4_end:
+	RET
+
+// func mulGFNI_5x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x4_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R8
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R8
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, CX
+
+mulGFNI_5x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z24
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z24, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z24, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z24, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z24, Z23
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z24
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z5, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z6, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z7, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (SI), Z24
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z9, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (DI), Z24
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z13, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z14, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z15, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (CX), Z24
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z16, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z17, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z18, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z19, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Store 4 outputs
+	VMOVDQU64 Z20, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z21, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z22, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z23, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x4_64_loop
+	VZEROUPPER
+
+mulGFNI_5x4_64_end:
+	RET
+
+// func mulGFNI_5x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x4_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R8
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R8
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, CX
+
+mulGFNI_5x4_64Xor_loop:
+	// Load 4 outputs
+	VMOVDQU64 (R9), Z20
+	VMOVDQU64 (R10), Z21
+	VMOVDQU64 (R11), Z22
+	VMOVDQU64 (R8), Z23
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z24
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z24
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z5, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z6, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z7, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (SI), Z24
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z9, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (DI), Z24
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z13, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z14, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z15, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (CX), Z24
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z16, Z24, Z25
+	VXORPD         Z20, Z25, Z20
+	VGF2P8AFFINEQB $0x00, Z17, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z18, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z19, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Store 4 outputs
+	VMOVDQU64 Z20, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z21, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z22, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z23, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x4_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_5x4_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x4Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 49 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x4Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R9
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R9
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_5x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU (R9), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 4 to 4 outputs
 	VMOVDQU (DX), Y7
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y7, Y8
 	VPAND   Y4, Y7, Y7
 	VPAND   Y4, Y8, Y8
-	VMOVDQU 1280(CX), Y5
-	VMOVDQU 1312(CX), Y6
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1344(CX), Y5
-	VMOVDQU 1376(CX), Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1408(CX), Y5
-	VMOVDQU 1440(CX), Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1472(CX), Y5
-	VMOVDQU 1504(CX), Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
 	// Store 4 outputs
-	VMOVDQU Y0, (R11)
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
 	ADDQ    $0x20, R11
-	VMOVDQU Y1, (R12)
+	VMOVDQU Y2, (R12)
 	ADDQ    $0x20, R12
-	VMOVDQU Y2, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y3, (R10)
-	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_6x4_loop
+	JNZ  mulAvxTwo_5x4Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_6x4_end:
+mulAvxTwo_5x4Xor_end:
 	RET
 
-// func mulAvxTwo_6x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x5(SB), NOSPLIT, $0-88
+// func mulAvxTwo_5x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x5(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 70 YMM used
+	// Full registers estimated 60 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_6x5_end
+	JZ    mulAvxTwo_5x5_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), DX
-	MOVQ  out_base+48(FP), R10
-	MOVQ  (R10), R11
-	MOVQ  24(R10), R12
-	MOVQ  48(R10), R13
-	MOVQ  72(R10), R14
-	MOVQ  96(R10), R10
-	MOVQ  start+72(FP), R15
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R9
+	MOVQ  start+72(FP), R14
 
 	// Add start offset to output
-	ADDQ R15, R11
-	ADDQ R15, R12
-	ADDQ R15, R13
-	ADDQ R15, R14
-	ADDQ R15, R10
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R9
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, SI
-	ADDQ         R15, DI
-	ADDQ         R15, R8
-	ADDQ         R15, R9
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X5
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X5
 	VPBROADCASTB X5, Y5
 
-mulAvxTwo_6x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-
+mulAvxTwo_5x5_loop:
 	// Load and process 32 bytes from input 0 to 5 outputs
 	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
@@ -13130,32 +26745,27 @@ mulAvxTwo_6x5_loop:
 	VMOVDQU 32(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	VPXOR   Y6, Y7, Y0
 	VMOVDQU 64(CX), Y6
 	VMOVDQU 96(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	VPXOR   Y6, Y7, Y1
 	VMOVDQU 128(CX), Y6
 	VMOVDQU 160(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	VPXOR   Y6, Y7, Y2
 	VMOVDQU 192(CX), Y6
 	VMOVDQU 224(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	VPXOR   Y6, Y7, Y3
 	VMOVDQU 256(CX), Y6
 	VMOVDQU 288(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPXOR   Y6, Y7, Y4
 
 	// Load and process 32 bytes from input 1 to 5 outputs
 	VMOVDQU (SI), Y8
@@ -13167,32 +26777,27 @@ mulAvxTwo_6x5_loop:
 	VMOVDQU 352(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 384(CX), Y6
 	VMOVDQU 416(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 448(CX), Y6
 	VMOVDQU 480(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 512(CX), Y6
 	VMOVDQU 544(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 576(CX), Y6
 	VMOVDQU 608(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Load and process 32 bytes from input 2 to 5 outputs
 	VMOVDQU (DI), Y8
@@ -13204,32 +26809,27 @@ mulAvxTwo_6x5_loop:
 	VMOVDQU 672(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 704(CX), Y6
 	VMOVDQU 736(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 768(CX), Y6
 	VMOVDQU 800(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 832(CX), Y6
 	VMOVDQU 864(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 896(CX), Y6
 	VMOVDQU 928(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Load and process 32 bytes from input 3 to 5 outputs
 	VMOVDQU (R8), Y8
@@ -13241,36 +26841,31 @@ mulAvxTwo_6x5_loop:
 	VMOVDQU 992(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 1024(CX), Y6
 	VMOVDQU 1056(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 1088(CX), Y6
 	VMOVDQU 1120(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 1152(CX), Y6
 	VMOVDQU 1184(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 1216(CX), Y6
 	VMOVDQU 1248(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Load and process 32 bytes from input 4 to 5 outputs
-	VMOVDQU (R9), Y8
-	ADDQ    $0x20, R9
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y8, Y9
 	VPAND   Y5, Y8, Y8
 	VPAND   Y5, Y9, Y9
@@ -13278,145 +26873,635 @@ mulAvxTwo_6x5_loop:
 	VMOVDQU 1312(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	XOR3WAY( $0x00, Y6, Y7, Y0)
 	VMOVDQU 1344(CX), Y6
 	VMOVDQU 1376(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	XOR3WAY( $0x00, Y6, Y7, Y1)
 	VMOVDQU 1408(CX), Y6
 	VMOVDQU 1440(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	XOR3WAY( $0x00, Y6, Y7, Y2)
 	VMOVDQU 1472(CX), Y6
 	VMOVDQU 1504(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	XOR3WAY( $0x00, Y6, Y7, Y3)
 	VMOVDQU 1536(CX), Y6
 	VMOVDQU 1568(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
-
-	// Load and process 32 bytes from input 5 to 5 outputs
-	VMOVDQU (DX), Y8
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1600(CX), Y6
-	VMOVDQU 1632(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1664(CX), Y6
-	VMOVDQU 1696(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1728(CX), Y6
-	VMOVDQU 1760(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1792(CX), Y6
-	VMOVDQU 1824(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1856(CX), Y6
-	VMOVDQU 1888(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
 	// Store 5 outputs
-	VMOVDQU Y0, (R11)
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
 	ADDQ    $0x20, R11
-	VMOVDQU Y1, (R12)
+	VMOVDQU Y2, (R12)
 	ADDQ    $0x20, R12
-	VMOVDQU Y2, (R13)
+	VMOVDQU Y3, (R13)
 	ADDQ    $0x20, R13
-	VMOVDQU Y3, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y4, (R10)
-	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R9)
+	ADDQ    $0x20, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_6x5_loop
+	JNZ  mulAvxTwo_5x5_loop
 	VZEROUPPER
 
-mulAvxTwo_6x5_end:
+mulAvxTwo_5x5_end:
 	RET
 
-// func mulAvxTwo_6x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x6(SB), NOSPLIT, $8-88
-	// Loading no tables to registers
+// func mulGFNI_5x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x5_64(SB), $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 83 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_6x6_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), DX
-	MOVQ  out_base+48(FP), R10
-	MOVQ  (R10), R11
-	MOVQ  24(R10), R12
-	MOVQ  48(R10), R13
-	MOVQ  72(R10), R14
-	MOVQ  96(R10), R15
-	MOVQ  120(R10), R10
-	MOVQ  start+72(FP), BP
+	// Full registers estimated 32 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R12
+	MOVQ            96(R8), R8
+	MOVQ            start+72(FP), R13
 
 	// Add start offset to output
-	ADDQ BP, R11
-	ADDQ BP, R12
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R10
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R8
 
 	// Add start offset to input
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, R9
-	ADDQ         BP, DX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X6
-	VPBROADCASTB X6, Y6
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, CX
+
+mulGFNI_5x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (CX), Z30
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
 
-mulAvxTwo_6x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	// Store 5 outputs
+	VMOVDQU64 Z25, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z26, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z27, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z28, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z29, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x5_64_loop
+	VZEROUPPER
+
+mulGFNI_5x5_64_end:
+	RET
+
+// func mulGFNI_5x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x5_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 32 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), CX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R12
+	MOVQ            96(R8), R8
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R8
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, CX
+
+mulGFNI_5x5_64Xor_loop:
+	// Load 5 outputs
+	VMOVDQU64 (R9), Z25
+	VMOVDQU64 (R10), Z26
+	VMOVDQU64 (R11), Z27
+	VMOVDQU64 (R12), Z28
+	VMOVDQU64 (R8), Z29
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (CX), Z30
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Store 5 outputs
+	VMOVDQU64 Z25, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z26, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z27, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z28, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z29, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_5x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x5Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 60 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R9
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R9
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_5x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU (R13), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU (R9), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y4, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x5Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x5Xor_end:
+	RET
+
+// func mulAvxTwo_5x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x6(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 71 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x6_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R14
+	MOVQ  120(R9), R9
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R9
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X6
+	VPBROADCASTB X6, Y6
 
+mulAvxTwo_5x6_loop:
 	// Load and process 32 bytes from input 0 to 6 outputs
 	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
@@ -13427,38 +27512,32 @@ mulAvxTwo_6x6_loop:
 	VMOVDQU 32(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPXOR   Y7, Y8, Y0
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPXOR   Y7, Y8, Y2
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y7, Y8, Y3
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPXOR   Y7, Y8, Y4
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPXOR   Y7, Y8, Y5
 
 	// Load and process 32 bytes from input 1 to 6 outputs
 	VMOVDQU (SI), Y9
@@ -13470,38 +27549,32 @@ mulAvxTwo_6x6_loop:
 	VMOVDQU 416(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 576(CX), Y7
 	VMOVDQU 608(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 640(CX), Y7
 	VMOVDQU 672(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 704(CX), Y7
 	VMOVDQU 736(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Load and process 32 bytes from input 2 to 6 outputs
 	VMOVDQU (DI), Y9
@@ -13513,38 +27586,32 @@ mulAvxTwo_6x6_loop:
 	VMOVDQU 800(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 832(CX), Y7
 	VMOVDQU 864(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 896(CX), Y7
 	VMOVDQU 928(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 960(CX), Y7
 	VMOVDQU 992(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 1024(CX), Y7
 	VMOVDQU 1056(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 1088(CX), Y7
 	VMOVDQU 1120(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Load and process 32 bytes from input 3 to 6 outputs
 	VMOVDQU (R8), Y9
@@ -13556,42 +27623,36 @@ mulAvxTwo_6x6_loop:
 	VMOVDQU 1184(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 1216(CX), Y7
 	VMOVDQU 1248(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 1280(CX), Y7
 	VMOVDQU 1312(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 1344(CX), Y7
 	VMOVDQU 1376(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 1408(CX), Y7
 	VMOVDQU 1440(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 1472(CX), Y7
 	VMOVDQU 1504(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Load and process 32 bytes from input 4 to 6 outputs
-	VMOVDQU (R9), Y9
-	ADDQ    $0x20, R9
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPAND   Y6, Y9, Y9
 	VPAND   Y6, Y10, Y10
@@ -13599,265 +27660,787 @@ mulAvxTwo_6x6_loop:
 	VMOVDQU 1568(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	XOR3WAY( $0x00, Y7, Y8, Y0)
 	VMOVDQU 1600(CX), Y7
 	VMOVDQU 1632(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 1664(CX), Y7
 	VMOVDQU 1696(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	XOR3WAY( $0x00, Y7, Y8, Y2)
 	VMOVDQU 1728(CX), Y7
 	VMOVDQU 1760(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 	VMOVDQU 1792(CX), Y7
 	VMOVDQU 1824(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	XOR3WAY( $0x00, Y7, Y8, Y4)
 	VMOVDQU 1856(CX), Y7
 	VMOVDQU 1888(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 5 to 6 outputs
-	VMOVDQU (DX), Y9
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1920(CX), Y7
-	VMOVDQU 1952(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1984(CX), Y7
-	VMOVDQU 2016(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2048(CX), Y7
-	VMOVDQU 2080(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2112(CX), Y7
-	VMOVDQU 2144(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2176(CX), Y7
-	VMOVDQU 2208(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 2240(CX), Y7
-	VMOVDQU 2272(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
 	// Store 6 outputs
-	VMOVDQU Y0, (R11)
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
 	ADDQ    $0x20, R11
-	VMOVDQU Y1, (R12)
+	VMOVDQU Y2, (R12)
 	ADDQ    $0x20, R12
-	VMOVDQU Y2, (R13)
+	VMOVDQU Y3, (R13)
 	ADDQ    $0x20, R13
-	VMOVDQU Y3, (R14)
+	VMOVDQU Y4, (R14)
 	ADDQ    $0x20, R14
-	VMOVDQU Y4, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y5, (R10)
-	ADDQ    $0x20, R10
+	VMOVDQU Y5, (R9)
+	ADDQ    $0x20, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_6x6_loop
+	JNZ  mulAvxTwo_5x6_loop
 	VZEROUPPER
 
-mulAvxTwo_6x6_end:
+mulAvxTwo_5x6_end:
 	RET
 
-// func mulAvxTwo_6x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x7(SB), NOSPLIT, $8-88
+// func mulGFNI_5x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x6_64(SB), $0-88
+	// Loading 24 of 30 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R12
+	MOVQ            72(R9), R13
+	MOVQ            96(R9), R14
+	MOVQ            120(R9), R9
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R9
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, DX
+
+mulGFNI_5x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 6 outputs
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x6_64_loop
+	VZEROUPPER
+
+mulGFNI_5x6_64_end:
+	RET
+
+// func mulGFNI_5x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x6_64Xor(SB), $0-88
+	// Loading 24 of 30 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R12
+	MOVQ            72(R9), R13
+	MOVQ            96(R9), R14
+	MOVQ            120(R9), R9
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R9
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, DX
+
+mulGFNI_5x6_64Xor_loop:
+	// Load 6 outputs
+	VMOVDQU64 (R10), Z24
+	VMOVDQU64 (R11), Z25
+	VMOVDQU64 (R12), Z26
+	VMOVDQU64 (R13), Z27
+	VMOVDQU64 (R14), Z28
+	VMOVDQU64 (R9), Z29
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 6 outputs
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x6_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_5x6_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x6Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 96 YMM used
+	// Full registers estimated 71 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_6x7_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), AX
+	JZ    mulAvxTwo_5x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
 	MOVQ  (R9), R10
 	MOVQ  24(R9), R11
 	MOVQ  48(R9), R12
 	MOVQ  72(R9), R13
 	MOVQ  96(R9), R14
-	MOVQ  120(R9), R15
-	MOVQ  144(R9), R9
-	MOVQ  start+72(FP), BP
+	MOVQ  120(R9), R9
+	MOVQ  start+72(FP), R15
 
 	// Add start offset to output
-	ADDQ BP, R10
-	ADDQ BP, R11
-	ADDQ BP, R12
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R9
 
 	// Add start offset to input
-	ADDQ         BP, DX
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, AX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X7
-	VPBROADCASTB X7, Y7
-	MOVQ         n+80(FP), BP
-	SHRQ         $0x05, BP
-
-mulAvxTwo_6x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X6
+	VPBROADCASTB X6, Y6
 
-	// Load and process 32 bytes from input 0 to 7 outputs
-	VMOVDQU (DX), Y10
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU (CX), Y8
-	VMOVDQU 32(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 64(CX), Y8
-	VMOVDQU 96(CX), Y9
+mulAvxTwo_5x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 128(CX), Y8
-	VMOVDQU 160(CX), Y9
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 192(CX), Y8
-	VMOVDQU 224(CX), Y9
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU (R13), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 320(CX), Y8
-	VMOVDQU 352(CX), Y9
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU (R14), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 384(CX), Y8
-	VMOVDQU 416(CX), Y9
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU (R9), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 1 to 7 outputs
-	VMOVDQU (BX), Y10
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 448(CX), Y8
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (R8), Y9
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y4, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y5, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x6Xor_end:
+	RET
+
+// func mulAvxTwo_5x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x7(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 82 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x7_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R14
+	MOVQ  120(R9), R15
+	MOVQ  144(R9), R9
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R9
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_5x7_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y0
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y1
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y2
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y3
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y4
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y5
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y6
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
 	VMOVDQU 480(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 512(CX), Y8
 	VMOVDQU 544(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 576(CX), Y8
 	VMOVDQU 608(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 640(CX), Y8
 	VMOVDQU 672(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 704(CX), Y8
 	VMOVDQU 736(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 768(CX), Y8
 	VMOVDQU 800(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 832(CX), Y8
 	VMOVDQU 864(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Load and process 32 bytes from input 2 to 7 outputs
-	VMOVDQU (SI), Y10
-	ADDQ    $0x20, SI
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
 	VPSRLQ  $0x04, Y10, Y11
 	VPAND   Y7, Y10, Y10
 	VPAND   Y7, Y11, Y11
@@ -13865,48 +28448,41 @@ mulAvxTwo_6x7_loop:
 	VMOVDQU 928(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 960(CX), Y8
 	VMOVDQU 992(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 1024(CX), Y8
 	VMOVDQU 1056(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 1088(CX), Y8
 	VMOVDQU 1120(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 1152(CX), Y8
 	VMOVDQU 1184(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 1216(CX), Y8
 	VMOVDQU 1248(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 1280(CX), Y8
 	VMOVDQU 1312(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Load and process 32 bytes from input 3 to 7 outputs
-	VMOVDQU (DI), Y10
-	ADDQ    $0x20, DI
+	VMOVDQU (R8), Y10
+	ADDQ    $0x20, R8
 	VPSRLQ  $0x04, Y10, Y11
 	VPAND   Y7, Y10, Y10
 	VPAND   Y7, Y11, Y11
@@ -13914,48 +28490,41 @@ mulAvxTwo_6x7_loop:
 	VMOVDQU 1376(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 1408(CX), Y8
 	VMOVDQU 1440(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 1472(CX), Y8
 	VMOVDQU 1504(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 1536(CX), Y8
 	VMOVDQU 1568(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 1600(CX), Y8
 	VMOVDQU 1632(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 1664(CX), Y8
 	VMOVDQU 1696(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 1728(CX), Y8
 	VMOVDQU 1760(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Load and process 32 bytes from input 4 to 7 outputs
-	VMOVDQU (R8), Y10
-	ADDQ    $0x20, R8
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y10, Y11
 	VPAND   Y7, Y10, Y10
 	VPAND   Y7, Y11, Y11
@@ -13963,93 +28532,37 @@ mulAvxTwo_6x7_loop:
 	VMOVDQU 1824(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	XOR3WAY( $0x00, Y8, Y9, Y0)
 	VMOVDQU 1856(CX), Y8
 	VMOVDQU 1888(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	XOR3WAY( $0x00, Y8, Y9, Y1)
 	VMOVDQU 1920(CX), Y8
 	VMOVDQU 1952(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	XOR3WAY( $0x00, Y8, Y9, Y2)
 	VMOVDQU 1984(CX), Y8
 	VMOVDQU 2016(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	XOR3WAY( $0x00, Y8, Y9, Y3)
 	VMOVDQU 2048(CX), Y8
 	VMOVDQU 2080(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	XOR3WAY( $0x00, Y8, Y9, Y4)
 	VMOVDQU 2112(CX), Y8
 	VMOVDQU 2144(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	XOR3WAY( $0x00, Y8, Y9, Y5)
 	VMOVDQU 2176(CX), Y8
 	VMOVDQU 2208(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
-
-	// Load and process 32 bytes from input 5 to 7 outputs
-	VMOVDQU (AX), Y10
-	ADDQ    $0x20, AX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 2240(CX), Y8
-	VMOVDQU 2272(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 2304(CX), Y8
-	VMOVDQU 2336(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 2368(CX), Y8
-	VMOVDQU 2400(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 2432(CX), Y8
-	VMOVDQU 2464(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2496(CX), Y8
-	VMOVDQU 2528(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2560(CX), Y8
-	VMOVDQU 2592(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2624(CX), Y8
-	VMOVDQU 2656(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
 	// Store 7 outputs
 	VMOVDQU Y0, (R10)
@@ -14068,224 +28581,862 @@ mulAvxTwo_6x7_loop:
 	ADDQ    $0x20, R9
 
 	// Prepare for next loop
-	DECQ BP
-	JNZ  mulAvxTwo_6x7_loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x7_loop
 	VZEROUPPER
 
-mulAvxTwo_6x7_end:
+mulAvxTwo_5x7_end:
 	RET
 
-// func mulAvxTwo_6x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x8(SB), NOSPLIT, $0-88
+// func mulGFNI_5x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x7_64(SB), $8-88
+	// Loading 23 of 35 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 44 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R12
+	MOVQ            72(R9), R13
+	MOVQ            96(R9), R14
+	MOVQ            120(R9), R15
+	MOVQ            144(R9), R9
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R9
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, DX
+
+mulGFNI_5x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 7 outputs
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x7_64_loop
+	VZEROUPPER
+
+mulGFNI_5x7_64_end:
+	RET
+
+// func mulGFNI_5x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x7_64Xor(SB), $8-88
+	// Loading 23 of 35 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 44 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R12
+	MOVQ            72(R9), R13
+	MOVQ            96(R9), R14
+	MOVQ            120(R9), R15
+	MOVQ            144(R9), R9
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R9
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, DX
+
+mulGFNI_5x7_64Xor_loop:
+	// Load 7 outputs
+	VMOVDQU64 (R10), Z23
+	VMOVDQU64 (R11), Z24
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R9), Z29
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 7 outputs
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_5x7_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_5x7_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x7Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 109 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 82 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_6x8_end
+	JZ    mulAvxTwo_5x7Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), DX
-	MOVQ  out_base+48(FP), R10
-	MOVQ  start+72(FP), R11
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R14
+	MOVQ  120(R9), R15
+	MOVQ  144(R9), R9
+	MOVQ  start+72(FP), BP
 
-	// Add start offset to input
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DI
-	ADDQ         R11, R8
-	ADDQ         R11, R9
-	ADDQ         R11, DX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X8
-	VPBROADCASTB X8, Y8
+	// Add start offset to output
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R9
 
-mulAvxTwo_6x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X7
+	VPBROADCASTB X7, Y7
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (BX), Y11
+mulAvxTwo_5x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU (R13), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU (R14), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU (R15), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU (R9), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 1 to 8 outputs
-	VMOVDQU (SI), Y11
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 512(CX), Y9
-	VMOVDQU 544(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 576(CX), Y9
-	VMOVDQU 608(CX), Y10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y9
-	VMOVDQU 672(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 704(CX), Y9
-	VMOVDQU 736(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 768(CX), Y9
-	VMOVDQU 800(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 832(CX), Y9
-	VMOVDQU 864(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 896(CX), Y9
-	VMOVDQU 928(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 960(CX), Y9
-	VMOVDQU 992(CX), Y10
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 2 to 8 outputs
-	VMOVDQU (DI), Y11
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1024(CX), Y9
-	VMOVDQU 1056(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1088(CX), Y9
-	VMOVDQU 1120(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y4, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y5, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y6, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x7Xor_end:
+	RET
+
+// func mulAvxTwo_5x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x8(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 93 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x8_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), AX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R15
+	MOVQ  168(R8), R8
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X8
+	VPBROADCASTB X8, Y8
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_5x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1152(CX), Y9
 	VMOVDQU 1184(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
 	VMOVDQU 1216(CX), Y9
 	VMOVDQU 1248(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1280(CX), Y9
 	VMOVDQU 1312(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
 	VMOVDQU 1344(CX), Y9
 	VMOVDQU 1376(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 	VMOVDQU 1408(CX), Y9
 	VMOVDQU 1440(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
 	VMOVDQU 1472(CX), Y9
 	VMOVDQU 1504(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
 	// Load and process 32 bytes from input 3 to 8 outputs
-	VMOVDQU (R8), Y11
-	ADDQ    $0x20, R8
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y8, Y11, Y11
 	VPAND   Y8, Y12, Y12
@@ -14293,54 +29444,46 @@ mulAvxTwo_6x8_loop:
 	VMOVDQU 1568(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	XOR3WAY( $0x00, Y9, Y10, Y0)
 	VMOVDQU 1600(CX), Y9
 	VMOVDQU 1632(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1664(CX), Y9
 	VMOVDQU 1696(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
 	VMOVDQU 1728(CX), Y9
 	VMOVDQU 1760(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1792(CX), Y9
 	VMOVDQU 1824(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
 	VMOVDQU 1856(CX), Y9
 	VMOVDQU 1888(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 	VMOVDQU 1920(CX), Y9
 	VMOVDQU 1952(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
 	VMOVDQU 1984(CX), Y9
 	VMOVDQU 2016(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
 	// Load and process 32 bytes from input 4 to 8 outputs
-	VMOVDQU (R9), Y11
-	ADDQ    $0x20, R9
+	VMOVDQU (AX), Y11
+	ADDQ    $0x20, AX
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y8, Y11, Y11
 	VPAND   Y8, Y12, Y12
@@ -14348,237 +29491,871 @@ mulAvxTwo_6x8_loop:
 	VMOVDQU 2080(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	XOR3WAY( $0x00, Y9, Y10, Y0)
 	VMOVDQU 2112(CX), Y9
 	VMOVDQU 2144(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 2176(CX), Y9
 	VMOVDQU 2208(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	XOR3WAY( $0x00, Y9, Y10, Y2)
 	VMOVDQU 2240(CX), Y9
 	VMOVDQU 2272(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 2304(CX), Y9
 	VMOVDQU 2336(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	XOR3WAY( $0x00, Y9, Y10, Y4)
 	VMOVDQU 2368(CX), Y9
 	VMOVDQU 2400(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 	VMOVDQU 2432(CX), Y9
 	VMOVDQU 2464(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	XOR3WAY( $0x00, Y9, Y10, Y6)
 	VMOVDQU 2496(CX), Y9
 	VMOVDQU 2528(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 5 to 8 outputs
+	// Store 8 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y5, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y6, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y7, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_5x8_loop
+	VZEROUPPER
+
+mulAvxTwo_5x8_end:
+	RET
+
+// func mulGFNI_5x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x8_64(SB), $8-88
+	// Loading 22 of 40 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), AX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R12
+	MOVQ            96(R8), R13
+	MOVQ            120(R8), R14
+	MOVQ            144(R8), R15
+	MOVQ            168(R8), R8
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_5x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (SI), Z30
+	ADDQ                $0x40, SI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_5x8_64_loop
+	VZEROUPPER
+
+mulGFNI_5x8_64_end:
+	RET
+
+// func mulGFNI_5x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x8_64Xor(SB), $8-88
+	// Loading 22 of 40 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), AX
+	MOVQ            out_base+48(FP), R8
+	MOVQ            out_base+48(FP), R8
+	MOVQ            (R8), R9
+	MOVQ            24(R8), R10
+	MOVQ            48(R8), R11
+	MOVQ            72(R8), R12
+	MOVQ            96(R8), R13
+	MOVQ            120(R8), R14
+	MOVQ            144(R8), R15
+	MOVQ            168(R8), R8
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_5x8_64Xor_loop:
+	// Load 8 outputs
+	VMOVDQU64 (R9), Z22
+	VMOVDQU64 (R10), Z23
+	VMOVDQU64 (R11), Z24
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R8), Z29
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (SI), Z30
+	ADDQ                $0x40, SI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	VMOVDQU64 Z22, (R9)
+	ADDQ      $0x40, R9
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R8)
+	ADDQ      $0x40, R8
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_5x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_5x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x8Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 93 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x8Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), AX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R15
+	MOVQ  168(R8), R8
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X8
+	VPBROADCASTB X8, Y8
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_5x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
 	VMOVDQU (DX), Y11
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y8, Y11, Y11
 	VPAND   Y8, Y12, Y12
-	VMOVDQU 2560(CX), Y9
-	VMOVDQU 2592(CX), Y10
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2624(CX), Y9
-	VMOVDQU 2656(CX), Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2688(CX), Y9
-	VMOVDQU 2720(CX), Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2752(CX), Y9
-	VMOVDQU 2784(CX), Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2816(CX), Y9
-	VMOVDQU 2848(CX), Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU (R13), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2880(CX), Y9
-	VMOVDQU 2912(CX), Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU (R14), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2944(CX), Y9
-	VMOVDQU 2976(CX), Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU (R15), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 3008(CX), Y9
-	VMOVDQU 3040(CX), Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU (R8), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
-
-	// Store 8 outputs
-	MOVQ    (R10), R12
-	VMOVDQU Y0, (R12)(R11*1)
-	MOVQ    24(R10), R12
-	VMOVDQU Y1, (R12)(R11*1)
-	MOVQ    48(R10), R12
-	VMOVDQU Y2, (R12)(R11*1)
-	MOVQ    72(R10), R12
-	VMOVDQU Y3, (R12)(R11*1)
-	MOVQ    96(R10), R12
-	VMOVDQU Y4, (R12)(R11*1)
-	MOVQ    120(R10), R12
-	VMOVDQU Y5, (R12)(R11*1)
-	MOVQ    144(R10), R12
-	VMOVDQU Y6, (R12)(R11*1)
-	MOVQ    168(R10), R12
-	VMOVDQU Y7, (R12)(R11*1)
-
-	// Prepare for next loop
-	ADDQ $0x20, R11
-	DECQ AX
-	JNZ  mulAvxTwo_6x8_loop
-	VZEROUPPER
-
-mulAvxTwo_6x8_end:
-	RET
-
-// func mulAvxTwo_6x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x9(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 122 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_6x9_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), DX
-	MOVQ  out_base+48(FP), R10
-	MOVQ  start+72(FP), R11
-
-	// Add start offset to input
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DI
-	ADDQ         R11, R8
-	ADDQ         R11, R9
-	ADDQ         R11, DX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X9
-	VPBROADCASTB X9, Y9
-
-mulAvxTwo_6x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (BX), Y12
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (BX), Y11
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (AX), Y11
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y5, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y6, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y7, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_5x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x8Xor_end:
+	RET
+
+// func mulAvxTwo_5x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x9(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 104 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x9_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_5x9_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
 	VMOVDQU 128(CX), Y10
 	VMOVDQU 160(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	VPXOR   Y10, Y11, Y2
 	VMOVDQU 192(CX), Y10
 	VMOVDQU 224(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	VPXOR   Y10, Y11, Y3
 	VMOVDQU 256(CX), Y10
 	VMOVDQU 288(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	VPXOR   Y10, Y11, Y4
 	VMOVDQU 320(CX), Y10
 	VMOVDQU 352(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	VPXOR   Y10, Y11, Y5
 	VMOVDQU 384(CX), Y10
 	VMOVDQU 416(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	VPXOR   Y10, Y11, Y6
 	VMOVDQU 448(CX), Y10
 	VMOVDQU 480(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	VPXOR   Y10, Y11, Y7
 	VMOVDQU 512(CX), Y10
 	VMOVDQU 544(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPXOR   Y10, Y11, Y8
 
 	// Load and process 32 bytes from input 1 to 9 outputs
 	VMOVDQU (SI), Y12
@@ -14590,56 +30367,47 @@ mulAvxTwo_6x9_loop:
 	VMOVDQU 608(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	XOR3WAY( $0x00, Y10, Y11, Y0)
 	VMOVDQU 640(CX), Y10
 	VMOVDQU 672(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y1)
 	VMOVDQU 704(CX), Y10
 	VMOVDQU 736(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	XOR3WAY( $0x00, Y10, Y11, Y2)
 	VMOVDQU 768(CX), Y10
 	VMOVDQU 800(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	XOR3WAY( $0x00, Y10, Y11, Y3)
 	VMOVDQU 832(CX), Y10
 	VMOVDQU 864(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	XOR3WAY( $0x00, Y10, Y11, Y4)
 	VMOVDQU 896(CX), Y10
 	VMOVDQU 928(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	XOR3WAY( $0x00, Y10, Y11, Y5)
 	VMOVDQU 960(CX), Y10
 	VMOVDQU 992(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	XOR3WAY( $0x00, Y10, Y11, Y6)
 	VMOVDQU 1024(CX), Y10
 	VMOVDQU 1056(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y10, Y11, Y7)
 	VMOVDQU 1088(CX), Y10
 	VMOVDQU 1120(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Load and process 32 bytes from input 2 to 9 outputs
 	VMOVDQU (DI), Y12
@@ -14651,56 +30419,47 @@ mulAvxTwo_6x9_loop:
 	VMOVDQU 1184(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	XOR3WAY( $0x00, Y10, Y11, Y0)
 	VMOVDQU 1216(CX), Y10
 	VMOVDQU 1248(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y1)
 	VMOVDQU 1280(CX), Y10
 	VMOVDQU 1312(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	XOR3WAY( $0x00, Y10, Y11, Y2)
 	VMOVDQU 1344(CX), Y10
 	VMOVDQU 1376(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	XOR3WAY( $0x00, Y10, Y11, Y3)
 	VMOVDQU 1408(CX), Y10
 	VMOVDQU 1440(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	XOR3WAY( $0x00, Y10, Y11, Y4)
 	VMOVDQU 1472(CX), Y10
 	VMOVDQU 1504(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	XOR3WAY( $0x00, Y10, Y11, Y5)
 	VMOVDQU 1536(CX), Y10
 	VMOVDQU 1568(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	XOR3WAY( $0x00, Y10, Y11, Y6)
 	VMOVDQU 1600(CX), Y10
 	VMOVDQU 1632(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y10, Y11, Y7)
 	VMOVDQU 1664(CX), Y10
 	VMOVDQU 1696(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Load and process 32 bytes from input 3 to 9 outputs
 	VMOVDQU (R8), Y12
@@ -14712,60 +30471,51 @@ mulAvxTwo_6x9_loop:
 	VMOVDQU 1760(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	XOR3WAY( $0x00, Y10, Y11, Y0)
 	VMOVDQU 1792(CX), Y10
 	VMOVDQU 1824(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y1)
 	VMOVDQU 1856(CX), Y10
 	VMOVDQU 1888(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	XOR3WAY( $0x00, Y10, Y11, Y2)
 	VMOVDQU 1920(CX), Y10
 	VMOVDQU 1952(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	XOR3WAY( $0x00, Y10, Y11, Y3)
 	VMOVDQU 1984(CX), Y10
 	VMOVDQU 2016(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	XOR3WAY( $0x00, Y10, Y11, Y4)
 	VMOVDQU 2048(CX), Y10
 	VMOVDQU 2080(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	XOR3WAY( $0x00, Y10, Y11, Y5)
 	VMOVDQU 2112(CX), Y10
 	VMOVDQU 2144(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	XOR3WAY( $0x00, Y10, Y11, Y6)
 	VMOVDQU 2176(CX), Y10
 	VMOVDQU 2208(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y10, Y11, Y7)
 	VMOVDQU 2240(CX), Y10
 	VMOVDQU 2272(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
 	// Load and process 32 bytes from input 4 to 9 outputs
-	VMOVDQU (R9), Y12
-	ADDQ    $0x20, R9
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y12, Y13
 	VPAND   Y9, Y12, Y12
 	VPAND   Y9, Y13, Y13
@@ -14773,325 +30523,948 @@ mulAvxTwo_6x9_loop:
 	VMOVDQU 2336(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	XOR3WAY( $0x00, Y10, Y11, Y0)
 	VMOVDQU 2368(CX), Y10
 	VMOVDQU 2400(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	XOR3WAY( $0x00, Y10, Y11, Y1)
 	VMOVDQU 2432(CX), Y10
 	VMOVDQU 2464(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	XOR3WAY( $0x00, Y10, Y11, Y2)
 	VMOVDQU 2496(CX), Y10
 	VMOVDQU 2528(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	XOR3WAY( $0x00, Y10, Y11, Y3)
 	VMOVDQU 2560(CX), Y10
 	VMOVDQU 2592(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	XOR3WAY( $0x00, Y10, Y11, Y4)
 	VMOVDQU 2624(CX), Y10
 	VMOVDQU 2656(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	XOR3WAY( $0x00, Y10, Y11, Y5)
 	VMOVDQU 2688(CX), Y10
 	VMOVDQU 2720(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	XOR3WAY( $0x00, Y10, Y11, Y6)
 	VMOVDQU 2752(CX), Y10
 	VMOVDQU 2784(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	XOR3WAY( $0x00, Y10, Y11, Y7)
 	VMOVDQU 2816(CX), Y10
 	VMOVDQU 2848(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 5 to 9 outputs
-	VMOVDQU (DX), Y12
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 2880(CX), Y10
-	VMOVDQU 2912(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2944(CX), Y10
-	VMOVDQU 2976(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 3008(CX), Y10
-	VMOVDQU 3040(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 3072(CX), Y10
-	VMOVDQU 3104(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 3136(CX), Y10
-	VMOVDQU 3168(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 3200(CX), Y10
-	VMOVDQU 3232(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 3264(CX), Y10
-	VMOVDQU 3296(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 3328(CX), Y10
-	VMOVDQU 3360(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 3392(CX), Y10
-	VMOVDQU 3424(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	// Store 9 outputs
+	MOVQ    (R9), R11
+	VMOVDQU Y0, (R11)(R10*1)
+	MOVQ    24(R9), R11
+	VMOVDQU Y1, (R11)(R10*1)
+	MOVQ    48(R9), R11
+	VMOVDQU Y2, (R11)(R10*1)
+	MOVQ    72(R9), R11
+	VMOVDQU Y3, (R11)(R10*1)
+	MOVQ    96(R9), R11
+	VMOVDQU Y4, (R11)(R10*1)
+	MOVQ    120(R9), R11
+	VMOVDQU Y5, (R11)(R10*1)
+	MOVQ    144(R9), R11
+	VMOVDQU Y6, (R11)(R10*1)
+	MOVQ    168(R9), R11
+	VMOVDQU Y7, (R11)(R10*1)
+	MOVQ    192(R9), R11
+	VMOVDQU Y8, (R11)(R10*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R10
+	DECQ AX
+	JNZ  mulAvxTwo_5x9_loop
+	VZEROUPPER
+
+mulAvxTwo_5x9_end:
+	RET
+
+// func mulGFNI_5x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x9_64(SB), $0-88
+	// Loading 21 of 45 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 56 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to input
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, DX
+
+mulGFNI_5x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
 	// Store 9 outputs
-	MOVQ    (R10), R12
-	VMOVDQU Y0, (R12)(R11*1)
-	MOVQ    24(R10), R12
-	VMOVDQU Y1, (R12)(R11*1)
-	MOVQ    48(R10), R12
-	VMOVDQU Y2, (R12)(R11*1)
-	MOVQ    72(R10), R12
-	VMOVDQU Y3, (R12)(R11*1)
-	MOVQ    96(R10), R12
-	VMOVDQU Y4, (R12)(R11*1)
-	MOVQ    120(R10), R12
-	VMOVDQU Y5, (R12)(R11*1)
-	MOVQ    144(R10), R12
-	VMOVDQU Y6, (R12)(R11*1)
-	MOVQ    168(R10), R12
-	VMOVDQU Y7, (R12)(R11*1)
-	MOVQ    192(R10), R12
-	VMOVDQU Y8, (R12)(R11*1)
+	MOVQ      (R9), R11
+	VMOVDQU64 Z21, (R11)(R10*1)
+	MOVQ      24(R9), R11
+	VMOVDQU64 Z22, (R11)(R10*1)
+	MOVQ      48(R9), R11
+	VMOVDQU64 Z23, (R11)(R10*1)
+	MOVQ      72(R9), R11
+	VMOVDQU64 Z24, (R11)(R10*1)
+	MOVQ      96(R9), R11
+	VMOVDQU64 Z25, (R11)(R10*1)
+	MOVQ      120(R9), R11
+	VMOVDQU64 Z26, (R11)(R10*1)
+	MOVQ      144(R9), R11
+	VMOVDQU64 Z27, (R11)(R10*1)
+	MOVQ      168(R9), R11
+	VMOVDQU64 Z28, (R11)(R10*1)
+	MOVQ      192(R9), R11
+	VMOVDQU64 Z29, (R11)(R10*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R11
+	ADDQ $0x40, R10
 	DECQ AX
-	JNZ  mulAvxTwo_6x9_loop
+	JNZ  mulGFNI_5x9_64_loop
 	VZEROUPPER
 
-mulAvxTwo_6x9_end:
+mulGFNI_5x9_64_end:
 	RET
 
-// func mulAvxTwo_6x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_6x10(SB), NOSPLIT, $0-88
+// func mulGFNI_5x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x9_64Xor(SB), $0-88
+	// Loading 21 of 45 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 56 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to input
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, DX
+
+mulGFNI_5x9_64Xor_loop:
+	// Load 9 outputs
+	MOVQ      (R9), R11
+	VMOVDQU64 (R11)(R10*1), Z21
+	MOVQ      24(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z22
+	MOVQ      48(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z23
+	MOVQ      72(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z24
+	MOVQ      96(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z25
+	MOVQ      120(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z26
+	MOVQ      144(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z27
+	MOVQ      168(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z28
+	MOVQ      192(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z29
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R9), R11
+	VMOVDQU64 Z21, (R11)(R10*1)
+	MOVQ      24(R9), R11
+	VMOVDQU64 Z22, (R11)(R10*1)
+	MOVQ      48(R9), R11
+	VMOVDQU64 Z23, (R11)(R10*1)
+	MOVQ      72(R9), R11
+	VMOVDQU64 Z24, (R11)(R10*1)
+	MOVQ      96(R9), R11
+	VMOVDQU64 Z25, (R11)(R10*1)
+	MOVQ      120(R9), R11
+	VMOVDQU64 Z26, (R11)(R10*1)
+	MOVQ      144(R9), R11
+	VMOVDQU64 Z27, (R11)(R10*1)
+	MOVQ      168(R9), R11
+	VMOVDQU64 Z28, (R11)(R10*1)
+	MOVQ      192(R9), R11
+	VMOVDQU64 Z29, (R11)(R10*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R10
+	DECQ AX
+	JNZ  mulGFNI_5x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_5x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x9Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 135 YMM used
+	// Full registers estimated 104 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_6x10_end
+	JZ    mulAvxTwo_5x9Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), DX
-	MOVQ  out_base+48(FP), R10
-	MOVQ  start+72(FP), R11
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  start+72(FP), R10
 
 	// Add start offset to input
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DI
-	ADDQ         R11, R8
-	ADDQ         R11, R9
-	ADDQ         R11, DX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X10
-	VPBROADCASTB X10, Y10
-
-mulAvxTwo_6x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X9
+	VPBROADCASTB X9, Y9
 
-	// Load and process 32 bytes from input 0 to 10 outputs
-	VMOVDQU (BX), Y13
+mulAvxTwo_5x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU (CX), Y11
-	VMOVDQU 32(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 64(CX), Y11
-	VMOVDQU 96(CX), Y12
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	MOVQ    (R9), R11
+	VMOVDQU (R11)(R10*1), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 128(CX), Y11
-	VMOVDQU 160(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	MOVQ    24(R9), R11
+	VMOVDQU (R11)(R10*1), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 192(CX), Y11
-	VMOVDQU 224(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	MOVQ    48(R9), R11
+	VMOVDQU (R11)(R10*1), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 256(CX), Y11
-	VMOVDQU 288(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	MOVQ    72(R9), R11
+	VMOVDQU (R11)(R10*1), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 320(CX), Y11
-	VMOVDQU 352(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	MOVQ    96(R9), R11
+	VMOVDQU (R11)(R10*1), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 384(CX), Y11
-	VMOVDQU 416(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	MOVQ    120(R9), R11
+	VMOVDQU (R11)(R10*1), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 448(CX), Y11
-	VMOVDQU 480(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	MOVQ    144(R9), R11
+	VMOVDQU (R11)(R10*1), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 512(CX), Y11
-	VMOVDQU 544(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	MOVQ    168(R9), R11
+	VMOVDQU (R11)(R10*1), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 576(CX), Y11
-	VMOVDQU 608(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	MOVQ    192(R9), R11
+	VMOVDQU (R11)(R10*1), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 1 to 10 outputs
-	VMOVDQU (SI), Y13
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 640(CX), Y11
-	VMOVDQU 672(CX), Y12
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 704(CX), Y11
-	VMOVDQU 736(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 768(CX), Y11
-	VMOVDQU 800(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 832(CX), Y11
-	VMOVDQU 864(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 896(CX), Y11
-	VMOVDQU 928(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 960(CX), Y11
-	VMOVDQU 992(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1024(CX), Y11
-	VMOVDQU 1056(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1088(CX), Y11
-	VMOVDQU 1120(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1152(CX), Y11
-	VMOVDQU 1184(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1216(CX), Y11
-	VMOVDQU 1248(CX), Y12
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	MOVQ    (R9), R11
+	VMOVDQU Y0, (R11)(R10*1)
+	MOVQ    24(R9), R11
+	VMOVDQU Y1, (R11)(R10*1)
+	MOVQ    48(R9), R11
+	VMOVDQU Y2, (R11)(R10*1)
+	MOVQ    72(R9), R11
+	VMOVDQU Y3, (R11)(R10*1)
+	MOVQ    96(R9), R11
+	VMOVDQU Y4, (R11)(R10*1)
+	MOVQ    120(R9), R11
+	VMOVDQU Y5, (R11)(R10*1)
+	MOVQ    144(R9), R11
+	VMOVDQU Y6, (R11)(R10*1)
+	MOVQ    168(R9), R11
+	VMOVDQU Y7, (R11)(R10*1)
+	MOVQ    192(R9), R11
+	VMOVDQU Y8, (R11)(R10*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R10
+	DECQ AX
+	JNZ  mulAvxTwo_5x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x9Xor_end:
+	RET
+
+// func mulAvxTwo_5x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x10(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 115 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x10_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_5x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 2 to 10 outputs
 	VMOVDQU (DI), Y13
@@ -15103,62 +31476,52 @@ mulAvxTwo_6x10_loop:
 	VMOVDQU 1312(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 1344(CX), Y11
 	VMOVDQU 1376(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 1408(CX), Y11
 	VMOVDQU 1440(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 1472(CX), Y11
 	VMOVDQU 1504(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 1536(CX), Y11
 	VMOVDQU 1568(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 1600(CX), Y11
 	VMOVDQU 1632(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 1664(CX), Y11
 	VMOVDQU 1696(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 1728(CX), Y11
 	VMOVDQU 1760(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 1792(CX), Y11
 	VMOVDQU 1824(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 1856(CX), Y11
 	VMOVDQU 1888(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 3 to 10 outputs
 	VMOVDQU (R8), Y13
@@ -15170,66 +31533,56 @@ mulAvxTwo_6x10_loop:
 	VMOVDQU 1952(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 1984(CX), Y11
 	VMOVDQU 2016(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 2048(CX), Y11
 	VMOVDQU 2080(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 2112(CX), Y11
 	VMOVDQU 2144(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 2176(CX), Y11
 	VMOVDQU 2208(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 2240(CX), Y11
 	VMOVDQU 2272(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 2304(CX), Y11
 	VMOVDQU 2336(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 2368(CX), Y11
 	VMOVDQU 2400(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 2432(CX), Y11
 	VMOVDQU 2464(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 2496(CX), Y11
 	VMOVDQU 2528(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 4 to 10 outputs
-	VMOVDQU (R9), Y13
-	ADDQ    $0x20, R9
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y10, Y13, Y13
 	VPAND   Y10, Y14, Y14
@@ -15237,352 +31590,1026 @@ mulAvxTwo_6x10_loop:
 	VMOVDQU 2592(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 2624(CX), Y11
 	VMOVDQU 2656(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 2688(CX), Y11
 	VMOVDQU 2720(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 2752(CX), Y11
 	VMOVDQU 2784(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 2816(CX), Y11
 	VMOVDQU 2848(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 2880(CX), Y11
 	VMOVDQU 2912(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 2944(CX), Y11
 	VMOVDQU 2976(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 3008(CX), Y11
 	VMOVDQU 3040(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 3072(CX), Y11
 	VMOVDQU 3104(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 3136(CX), Y11
 	VMOVDQU 3168(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 5 to 10 outputs
-	VMOVDQU (DX), Y13
-	ADDQ    $0x20, DX
+	// Store 10 outputs
+	MOVQ    (R9), R11
+	VMOVDQU Y0, (R11)(R10*1)
+	MOVQ    24(R9), R11
+	VMOVDQU Y1, (R11)(R10*1)
+	MOVQ    48(R9), R11
+	VMOVDQU Y2, (R11)(R10*1)
+	MOVQ    72(R9), R11
+	VMOVDQU Y3, (R11)(R10*1)
+	MOVQ    96(R9), R11
+	VMOVDQU Y4, (R11)(R10*1)
+	MOVQ    120(R9), R11
+	VMOVDQU Y5, (R11)(R10*1)
+	MOVQ    144(R9), R11
+	VMOVDQU Y6, (R11)(R10*1)
+	MOVQ    168(R9), R11
+	VMOVDQU Y7, (R11)(R10*1)
+	MOVQ    192(R9), R11
+	VMOVDQU Y8, (R11)(R10*1)
+	MOVQ    216(R9), R11
+	VMOVDQU Y9, (R11)(R10*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R10
+	DECQ AX
+	JNZ  mulAvxTwo_5x10_loop
+	VZEROUPPER
+
+mulAvxTwo_5x10_end:
+	RET
+
+// func mulGFNI_5x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x10_64(SB), $0-88
+	// Loading 20 of 50 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 62 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to input
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, DX
+
+mulGFNI_5x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R9), R11
+	VMOVDQU64 Z20, (R11)(R10*1)
+	MOVQ      24(R9), R11
+	VMOVDQU64 Z21, (R11)(R10*1)
+	MOVQ      48(R9), R11
+	VMOVDQU64 Z22, (R11)(R10*1)
+	MOVQ      72(R9), R11
+	VMOVDQU64 Z23, (R11)(R10*1)
+	MOVQ      96(R9), R11
+	VMOVDQU64 Z24, (R11)(R10*1)
+	MOVQ      120(R9), R11
+	VMOVDQU64 Z25, (R11)(R10*1)
+	MOVQ      144(R9), R11
+	VMOVDQU64 Z26, (R11)(R10*1)
+	MOVQ      168(R9), R11
+	VMOVDQU64 Z27, (R11)(R10*1)
+	MOVQ      192(R9), R11
+	VMOVDQU64 Z28, (R11)(R10*1)
+	MOVQ      216(R9), R11
+	VMOVDQU64 Z29, (R11)(R10*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R10
+	DECQ AX
+	JNZ  mulGFNI_5x10_64_loop
+	VZEROUPPER
+
+mulGFNI_5x10_64_end:
+	RET
+
+// func mulGFNI_5x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_5x10_64Xor(SB), $0-88
+	// Loading 20 of 50 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 62 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_5x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), DX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to input
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, DX
+
+mulGFNI_5x10_64Xor_loop:
+	// Load 10 outputs
+	MOVQ      (R9), R11
+	VMOVDQU64 (R11)(R10*1), Z20
+	MOVQ      24(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z21
+	MOVQ      48(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z22
+	MOVQ      72(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z23
+	MOVQ      96(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z24
+	MOVQ      120(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z25
+	MOVQ      144(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z26
+	MOVQ      168(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z27
+	MOVQ      192(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z28
+	MOVQ      216(R9), R11
+	VMOVDQU64 (R11)(R10*1), Z29
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R9), R11
+	VMOVDQU64 Z20, (R11)(R10*1)
+	MOVQ      24(R9), R11
+	VMOVDQU64 Z21, (R11)(R10*1)
+	MOVQ      48(R9), R11
+	VMOVDQU64 Z22, (R11)(R10*1)
+	MOVQ      72(R9), R11
+	VMOVDQU64 Z23, (R11)(R10*1)
+	MOVQ      96(R9), R11
+	VMOVDQU64 Z24, (R11)(R10*1)
+	MOVQ      120(R9), R11
+	VMOVDQU64 Z25, (R11)(R10*1)
+	MOVQ      144(R9), R11
+	VMOVDQU64 Z26, (R11)(R10*1)
+	MOVQ      168(R9), R11
+	VMOVDQU64 Z27, (R11)(R10*1)
+	MOVQ      192(R9), R11
+	VMOVDQU64 Z28, (R11)(R10*1)
+	MOVQ      216(R9), R11
+	VMOVDQU64 Z29, (R11)(R10*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R10
+	DECQ AX
+	JNZ  mulGFNI_5x10_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_5x10_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_5x10Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 115 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_5x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y10, Y13, Y13
 	VPAND   Y10, Y14, Y14
-	VMOVDQU 3200(CX), Y11
-	VMOVDQU 3232(CX), Y12
+	MOVQ    (R9), R11
+	VMOVDQU (R11)(R10*1), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 3264(CX), Y11
-	VMOVDQU 3296(CX), Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	MOVQ    24(R9), R11
+	VMOVDQU (R11)(R10*1), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 3328(CX), Y11
-	VMOVDQU 3360(CX), Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	MOVQ    48(R9), R11
+	VMOVDQU (R11)(R10*1), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 3392(CX), Y11
-	VMOVDQU 3424(CX), Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	MOVQ    72(R9), R11
+	VMOVDQU (R11)(R10*1), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 3456(CX), Y11
-	VMOVDQU 3488(CX), Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	MOVQ    96(R9), R11
+	VMOVDQU (R11)(R10*1), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 3520(CX), Y11
-	VMOVDQU 3552(CX), Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	MOVQ    120(R9), R11
+	VMOVDQU (R11)(R10*1), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 3584(CX), Y11
-	VMOVDQU 3616(CX), Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	MOVQ    144(R9), R11
+	VMOVDQU (R11)(R10*1), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 3648(CX), Y11
-	VMOVDQU 3680(CX), Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	MOVQ    168(R9), R11
+	VMOVDQU (R11)(R10*1), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 3712(CX), Y11
-	VMOVDQU 3744(CX), Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	MOVQ    192(R9), R11
+	VMOVDQU (R11)(R10*1), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 3776(CX), Y11
-	VMOVDQU 3808(CX), Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	MOVQ    216(R9), R11
+	VMOVDQU (R11)(R10*1), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
-
-	// Store 10 outputs
-	MOVQ    (R10), R12
-	VMOVDQU Y0, (R12)(R11*1)
-	MOVQ    24(R10), R12
-	VMOVDQU Y1, (R12)(R11*1)
-	MOVQ    48(R10), R12
-	VMOVDQU Y2, (R12)(R11*1)
-	MOVQ    72(R10), R12
-	VMOVDQU Y3, (R12)(R11*1)
-	MOVQ    96(R10), R12
-	VMOVDQU Y4, (R12)(R11*1)
-	MOVQ    120(R10), R12
-	VMOVDQU Y5, (R12)(R11*1)
-	MOVQ    144(R10), R12
-	VMOVDQU Y6, (R12)(R11*1)
-	MOVQ    168(R10), R12
-	VMOVDQU Y7, (R12)(R11*1)
-	MOVQ    192(R10), R12
-	VMOVDQU Y8, (R12)(R11*1)
-	MOVQ    216(R10), R12
-	VMOVDQU Y9, (R12)(R11*1)
-
-	// Prepare for next loop
-	ADDQ $0x20, R11
-	DECQ AX
-	JNZ  mulAvxTwo_6x10_loop
-	VZEROUPPER
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-mulAvxTwo_6x10_end:
-	RET
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-// func mulAvxTwo_7x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x1(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 18 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_7x1_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), DX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  (R11), R11
-	MOVQ  start+72(FP), R12
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 10 outputs
+	MOVQ    (R9), R11
+	VMOVDQU Y0, (R11)(R10*1)
+	MOVQ    24(R9), R11
+	VMOVDQU Y1, (R11)(R10*1)
+	MOVQ    48(R9), R11
+	VMOVDQU Y2, (R11)(R10*1)
+	MOVQ    72(R9), R11
+	VMOVDQU Y3, (R11)(R10*1)
+	MOVQ    96(R9), R11
+	VMOVDQU Y4, (R11)(R10*1)
+	MOVQ    120(R9), R11
+	VMOVDQU Y5, (R11)(R10*1)
+	MOVQ    144(R9), R11
+	VMOVDQU Y6, (R11)(R10*1)
+	MOVQ    168(R9), R11
+	VMOVDQU Y7, (R11)(R10*1)
+	MOVQ    192(R9), R11
+	VMOVDQU Y8, (R11)(R10*1)
+	MOVQ    216(R9), R11
+	VMOVDQU Y9, (R11)(R10*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R10
+	DECQ AX
+	JNZ  mulAvxTwo_5x10Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x10Xor_end:
+	RET
+
+// func mulAvxTwo_6x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x1(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 16 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_6x1_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), DI
+	MOVQ    96(CX), R8
+	MOVQ    120(CX), CX
+	MOVQ    out_base+48(FP), R9
+	MOVQ    (R9), R9
+	MOVQ    start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R12, R11
+	ADDQ R10, R9
 
 	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, R10
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X1
-	VPBROADCASTB X1, Y1
-
-mulAvxTwo_7x1_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
+	ADDQ         R10, DX
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, CX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X13
+	VPBROADCASTB X13, Y13
 
+mulAvxTwo_6x1_loop:
 	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (BX), Y4
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU (CX), Y2
-	VMOVDQU 32(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VMOVDQU (DX), Y14
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y0, Y14
+	VPSHUFB Y15, Y1, Y15
+	VPXOR   Y14, Y15, Y12
 
 	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (SI), Y4
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VMOVDQU (BX), Y14
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y2, Y14
+	VPSHUFB Y15, Y3, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
 
 	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (DI), Y4
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 128(CX), Y2
-	VMOVDQU 160(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VMOVDQU (SI), Y14
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y4, Y14
+	VPSHUFB Y15, Y5, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
 
 	// Load and process 32 bytes from input 3 to 1 outputs
-	VMOVDQU (R8), Y4
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 192(CX), Y2
-	VMOVDQU 224(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VMOVDQU (DI), Y14
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y6, Y14
+	VPSHUFB Y15, Y7, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
 
 	// Load and process 32 bytes from input 4 to 1 outputs
-	VMOVDQU (R9), Y4
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 256(CX), Y2
-	VMOVDQU 288(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VMOVDQU (R8), Y14
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y8, Y14
+	VPSHUFB Y15, Y9, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
 
 	// Load and process 32 bytes from input 5 to 1 outputs
-	VMOVDQU (R10), Y4
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 320(CX), Y2
-	VMOVDQU 352(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
-
-	// Load and process 32 bytes from input 6 to 1 outputs
-	VMOVDQU (DX), Y4
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 384(CX), Y2
-	VMOVDQU 416(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VMOVDQU (CX), Y14
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y10, Y14
+	VPSHUFB Y15, Y11, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
 
 	// Store 1 outputs
-	VMOVDQU Y0, (R11)
-	ADDQ    $0x20, R11
+	VMOVDQU Y12, (R9)
+	ADDQ    $0x20, R9
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_7x1_loop
+	JNZ  mulAvxTwo_6x1_loop
 	VZEROUPPER
 
-mulAvxTwo_7x1_end:
+mulAvxTwo_6x1_end:
 	RET
 
-// func mulAvxTwo_7x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x1_64(SB), $0-88
+// func mulAvxTwo_6x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x1_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 18 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 30 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_7x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), AX
+	JZ    mulAvxTwo_6x1_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
 	MOVQ  out_base+48(FP), R10
 	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R10
 	MOVQ  start+72(FP), R11
 
+	// Add start offset to output
+	ADDQ R11, R10
+
 	// Add start offset to input
-	ADDQ         R11, DX
 	ADDQ         R11, BX
 	ADDQ         R11, SI
 	ADDQ         R11, DI
 	ADDQ         R11, R8
 	ADDQ         R11, R9
-	ADDQ         R11, AX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X2
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X2
 	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R12
-	SHRQ         $0x06, R12
-
-mulAvxTwo_7x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
 
+mulAvxTwo_6x1_64_loop:
 	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -15595,15 +32622,13 @@ mulAvxTwo_7x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
 
 	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -15616,15 +32641,13 @@ mulAvxTwo_7x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
 	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y6
-	VMOVDQU 32(SI), Y5
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -15637,15 +32660,13 @@ mulAvxTwo_7x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
 	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y6
-	VMOVDQU 32(DI), Y5
-	ADDQ    $0x40, DI
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -15658,15 +32679,13 @@ mulAvxTwo_7x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
 	// Load and process 64 bytes from input 4 to 1 outputs
-	VMOVDQU (R8), Y6
-	VMOVDQU 32(R8), Y5
-	ADDQ    $0x40, R8
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -15679,15 +32698,13 @@ mulAvxTwo_7x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
 	// Load and process 64 bytes from input 5 to 1 outputs
-	VMOVDQU (R9), Y6
-	VMOVDQU 32(R9), Y5
-	ADDQ    $0x40, R9
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -15700,109 +32717,548 @@ mulAvxTwo_7x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 6 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 384(CX), Y3
-	VMOVDQU 416(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
 	// Store 1 outputs
-	MOVQ    (R10), R13
-	VMOVDQU Y0, (R13)(R11*1)
-	VMOVDQU Y1, 32(R13)(R11*1)
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
 
 	// Prepare for next loop
-	ADDQ $0x40, R11
-	DECQ R12
-	JNZ  mulAvxTwo_7x1_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x1_64_loop
 	VZEROUPPER
 
-mulAvxTwo_7x1_64_end:
+mulAvxTwo_6x1_64_end:
 	RET
 
-// func mulAvxTwo_7x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x2(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+// func mulGFNI_6x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x1_64(SB), $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 35 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_7x2_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), DX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  (R11), R12
-	MOVQ  24(R11), R11
-	MOVQ  start+72(FP), R13
+	// Full registers estimated 9 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), CX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R9
+	MOVQ            start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R13, R12
-	ADDQ R13, R11
+	ADDQ R10, R9
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X2
-	VPBROADCASTB X2, Y2
+	ADDQ R10, DX
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, CX
 
-mulAvxTwo_7x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+mulGFNI_6x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z7
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z7, Z6
 
-	// Load and process 32 bytes from input 0 to 2 outputs
-	VMOVDQU (BX), Y5
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z7
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z7
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z7
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z7
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (CX), Z7
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z5, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Store 1 outputs
+	VMOVDQU64 Z6, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_6x1_64_loop
+	VZEROUPPER
+
+mulGFNI_6x1_64_end:
+	RET
+
+// func mulGFNI_6x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 9 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), CX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R9
+	MOVQ            start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+
+	// Add start offset to input
+	ADDQ R10, DX
+	ADDQ R10, BX
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, CX
+
+mulGFNI_6x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (R9), Z6
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z7
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z7
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z7
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z7
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z7
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (CX), Z7
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z5, Z7, Z7
+	VXORPD         Z6, Z7, Z6
+
+	// Store 1 outputs
+	VMOVDQU64 Z6, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_6x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_6x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 16 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_6x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), DI
+	MOVQ    96(CX), R8
+	MOVQ    120(CX), CX
+	MOVQ    out_base+48(FP), R9
+	MOVQ    (R9), R9
+	MOVQ    start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+
+	// Add start offset to input
+	ADDQ         R10, DX
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, CX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X13
+	VPBROADCASTB X13, Y13
+
+mulAvxTwo_6x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y14
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VMOVDQU (R9), Y12
+	VPSHUFB Y14, Y0, Y14
+	VPSHUFB Y15, Y1, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y14
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y2, Y14
+	VPSHUFB Y15, Y3, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y14
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y4, Y14
+	VPSHUFB Y15, Y5, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (DI), Y14
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y6, Y14
+	VPSHUFB Y15, Y7, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R8), Y14
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y8, Y14
+	VPSHUFB Y15, Y9, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (CX), Y14
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y10, Y14
+	VPSHUFB Y15, Y11, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
+
+	// Store 1 outputs
+	VMOVDQU Y12, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_6x1Xor_end:
+	RET
+
+// func mulAvxTwo_6x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x1_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 30 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_6x1_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R10
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R10
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, R9
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_6x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (R10), Y0
+	VMOVDQU 32(R10), Y1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x1_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_6x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x2(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 31 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_6x2_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R10
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+	ADDQ R12, R10
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_6x2_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y1
 
 	// Load and process 32 bytes from input 1 to 2 outputs
 	VMOVDQU (SI), Y5
@@ -15814,14 +33270,12 @@ mulAvxTwo_7x2_loop:
 	VMOVDQU 160(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 192(CX), Y3
 	VMOVDQU 224(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 2 to 2 outputs
 	VMOVDQU (DI), Y5
@@ -15833,14 +33287,12 @@ mulAvxTwo_7x2_loop:
 	VMOVDQU 288(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 320(CX), Y3
 	VMOVDQU 352(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 3 to 2 outputs
 	VMOVDQU (R8), Y5
@@ -15852,14 +33304,12 @@ mulAvxTwo_7x2_loop:
 	VMOVDQU 416(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 448(CX), Y3
 	VMOVDQU 480(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 4 to 2 outputs
 	VMOVDQU (R9), Y5
@@ -15871,18 +33321,16 @@ mulAvxTwo_7x2_loop:
 	VMOVDQU 544(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 576(CX), Y3
 	VMOVDQU 608(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 5 to 2 outputs
-	VMOVDQU (R10), Y5
-	ADDQ    $0x20, R10
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y5, Y6
 	VPAND   Y2, Y5, Y5
 	VPAND   Y2, Y6, Y6
@@ -15890,96 +33338,71 @@ mulAvxTwo_7x2_loop:
 	VMOVDQU 672(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 704(CX), Y3
 	VMOVDQU 736(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 6 to 2 outputs
-	VMOVDQU (DX), Y5
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 768(CX), Y3
-	VMOVDQU 800(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 832(CX), Y3
-	VMOVDQU 864(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Store 2 outputs
-	VMOVDQU Y0, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y1, (R11)
+	VMOVDQU Y0, (R11)
 	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_7x2_loop
+	JNZ  mulAvxTwo_6x2_loop
 	VZEROUPPER
 
-mulAvxTwo_7x2_end:
+mulAvxTwo_6x2_end:
 	RET
 
-// func mulAvxTwo_7x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x2_64(SB), $0-88
+// func mulAvxTwo_6x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x2_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 35 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 57 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_7x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), AX
+	JZ    mulAvxTwo_6x2_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
 	MOVQ  out_base+48(FP), R10
 	MOVQ  out_base+48(FP), R10
-	MOVQ  start+72(FP), R11
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R10
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+	ADDQ R12, R10
 
 	// Add start offset to input
-	ADDQ         R11, DX
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DI
-	ADDQ         R11, R8
-	ADDQ         R11, R9
-	ADDQ         R11, AX
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, DX
 	MOVQ         $0x0000000f, R12
 	MOVQ         R12, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R12
-	SHRQ         $0x06, R12
-
-mulAvxTwo_7x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
 
+mulAvxTwo_6x2_64_loop:
 	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -15992,25 +33415,21 @@ mulAvxTwo_7x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
 
 	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -16023,25 +33442,21 @@ mulAvxTwo_7x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -16054,25 +33469,21 @@ mulAvxTwo_7x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 320(CX), Y5
 	VMOVDQU 352(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 3 to 2 outputs
-	VMOVDQU (DI), Y9
-	VMOVDQU 32(DI), Y11
-	ADDQ    $0x40, DI
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -16085,25 +33496,21 @@ mulAvxTwo_7x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 448(CX), Y5
 	VMOVDQU 480(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 4 to 2 outputs
-	VMOVDQU (R8), Y9
-	VMOVDQU 32(R8), Y11
-	ADDQ    $0x40, R8
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -16116,25 +33523,21 @@ mulAvxTwo_7x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 576(CX), Y5
 	VMOVDQU 608(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 5 to 2 outputs
-	VMOVDQU (R9), Y9
-	VMOVDQU 32(R9), Y11
-	ADDQ    $0x40, R9
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -16147,117 +33550,679 @@ mulAvxTwo_7x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 704(CX), Y5
 	VMOVDQU 736(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
-
-	// Load and process 64 bytes from input 6 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 768(CX), Y5
-	VMOVDQU 800(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 832(CX), Y5
-	VMOVDQU 864(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Store 2 outputs
-	MOVQ    (R10), R13
-	VMOVDQU Y0, (R13)(R11*1)
-	VMOVDQU Y1, 32(R13)(R11*1)
-	MOVQ    24(R10), R13
-	VMOVDQU Y2, (R13)(R11*1)
-	VMOVDQU Y3, 32(R13)(R11*1)
+	VMOVDQU Y0, (R11)
+	VMOVDQU Y1, 32(R11)
+	ADDQ    $0x40, R11
+	VMOVDQU Y2, (R10)
+	VMOVDQU Y3, 32(R10)
+	ADDQ    $0x40, R10
 
 	// Prepare for next loop
-	ADDQ $0x40, R11
-	DECQ R12
-	JNZ  mulAvxTwo_7x2_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x2_64_loop
 	VZEROUPPER
 
-mulAvxTwo_7x2_64_end:
+mulAvxTwo_6x2_64_end:
 	RET
 
-// func mulAvxTwo_7x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x3(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+// func mulGFNI_6x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x2_64(SB), $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 50 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	// Full registers estimated 16 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), CX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R9
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R10
+	ADDQ R11, R9
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, CX
+
+mulGFNI_6x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z14
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z14, Z12
+	VGF2P8AFFINEQB $0x00, Z1, Z14, Z13
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z14
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z3, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z14
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z5, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z14
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z7, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z14
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z9, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (CX), Z14
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z10, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z11, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Store 2 outputs
+	VMOVDQU64 Z12, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z13, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_6x2_64_loop
+	VZEROUPPER
+
+mulGFNI_6x2_64_end:
+	RET
+
+// func mulGFNI_6x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 16 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), CX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R9
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R10
+	ADDQ R11, R9
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, CX
+
+mulGFNI_6x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (R10), Z12
+	VMOVDQU64 (R9), Z13
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z14
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z1, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z14
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z3, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z14
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z5, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z14
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z7, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z14
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z9, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (CX), Z14
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z10, Z14, Z15
+	VXORPD         Z12, Z15, Z12
+	VGF2P8AFFINEQB $0x00, Z11, Z14, Z15
+	VXORPD         Z13, Z15, Z13
+
+	// Store 2 outputs
+	VMOVDQU64 Z12, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z13, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_6x2_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_6x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x2Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 31 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_7x3_end
+	JZ    mulAvxTwo_6x2Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), DX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  (R11), R12
-	MOVQ  24(R11), R13
-	MOVQ  48(R11), R11
-	MOVQ  start+72(FP), R14
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R10
+	MOVQ  start+72(FP), R12
 
 	// Add start offset to output
-	ADDQ R14, R12
-	ADDQ R14, R13
-	ADDQ R14, R11
+	ADDQ R12, R11
+	ADDQ R12, R10
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X3
-	VPBROADCASTB X3, Y3
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X2
+	VPBROADCASTB X2, Y2
 
-mulAvxTwo_7x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
+mulAvxTwo_6x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R11), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y5
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 5 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 640(CX), Y3
+	VMOVDQU 672(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 704(CX), Y3
+	VMOVDQU 736(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_6x2Xor_end:
+	RET
+
+// func mulAvxTwo_6x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 57 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_6x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R10
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+	ADDQ R12, R10
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_6x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R11), Y0
+	VMOVDQU 32(R11), Y1
+	VMOVDQU (R10), Y2
+	VMOVDQU 32(R10), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R11)
+	VMOVDQU Y1, 32(R11)
+	ADDQ    $0x40, R11
+	VMOVDQU Y2, (R10)
+	VMOVDQU Y3, 32(R10)
+	ADDQ    $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x2_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_6x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x3(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 44 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_6x3_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R10
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R10
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X3
+	VPBROADCASTB X3, Y3
 
+mulAvxTwo_6x3_loop:
 	// Load and process 32 bytes from input 0 to 3 outputs
 	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
@@ -16268,20 +34233,17 @@ mulAvxTwo_7x3_loop:
 	VMOVDQU 32(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	VPXOR   Y4, Y5, Y0
 	VMOVDQU 64(CX), Y4
 	VMOVDQU 96(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	VPXOR   Y4, Y5, Y1
 	VMOVDQU 128(CX), Y4
 	VMOVDQU 160(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y4, Y5, Y2
 
 	// Load and process 32 bytes from input 1 to 3 outputs
 	VMOVDQU (SI), Y6
@@ -16293,20 +34255,17 @@ mulAvxTwo_7x3_loop:
 	VMOVDQU 224(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 256(CX), Y4
 	VMOVDQU 288(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 320(CX), Y4
 	VMOVDQU 352(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 2 to 3 outputs
 	VMOVDQU (DI), Y6
@@ -16318,20 +34277,17 @@ mulAvxTwo_7x3_loop:
 	VMOVDQU 416(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 448(CX), Y4
 	VMOVDQU 480(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 512(CX), Y4
 	VMOVDQU 544(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 3 to 3 outputs
 	VMOVDQU (R8), Y6
@@ -16343,20 +34299,17 @@ mulAvxTwo_7x3_loop:
 	VMOVDQU 608(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 640(CX), Y4
 	VMOVDQU 672(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 704(CX), Y4
 	VMOVDQU 736(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 4 to 3 outputs
 	VMOVDQU (R9), Y6
@@ -16368,24 +34321,21 @@ mulAvxTwo_7x3_loop:
 	VMOVDQU 800(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 832(CX), Y4
 	VMOVDQU 864(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 896(CX), Y4
 	VMOVDQU 928(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 5 to 3 outputs
-	VMOVDQU (R10), Y6
-	ADDQ    $0x20, R10
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -16393,112 +34343,80 @@ mulAvxTwo_7x3_loop:
 	VMOVDQU 992(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 1024(CX), Y4
 	VMOVDQU 1056(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 1088(CX), Y4
 	VMOVDQU 1120(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
-
-	// Load and process 32 bytes from input 6 to 3 outputs
-	VMOVDQU (DX), Y6
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 1152(CX), Y4
-	VMOVDQU 1184(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1216(CX), Y4
-	VMOVDQU 1248(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1280(CX), Y4
-	VMOVDQU 1312(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Store 3 outputs
-	VMOVDQU Y0, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y1, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y2, (R11)
+	VMOVDQU Y0, (R11)
 	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_7x3_loop
+	JNZ  mulAvxTwo_6x3_loop
 	VZEROUPPER
 
-mulAvxTwo_7x3_end:
+mulAvxTwo_6x3_end:
 	RET
 
-// func mulAvxTwo_7x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x3_64(SB), $0-88
+// func mulAvxTwo_6x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 50 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 82 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_7x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), AX
+	JZ    mulAvxTwo_6x3_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
 	MOVQ  out_base+48(FP), R10
 	MOVQ  out_base+48(FP), R10
-	MOVQ  start+72(FP), R11
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R10
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R10
 
 	// Add start offset to input
-	ADDQ         R11, DX
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DI
-	ADDQ         R11, R8
-	ADDQ         R11, R9
-	ADDQ         R11, AX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X6
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R12
-	SHRQ         $0x06, R12
-
-mulAvxTwo_7x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
 
+mulAvxTwo_6x3_64_loop:
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -16511,35 +34429,29 @@ mulAvxTwo_7x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -16552,35 +34464,29 @@ mulAvxTwo_7x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 2 to 3 outputs
-	VMOVDQU (SI), Y11
-	VMOVDQU 32(SI), Y13
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -16593,35 +34499,29 @@ mulAvxTwo_7x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 3 to 3 outputs
-	VMOVDQU (DI), Y11
-	VMOVDQU 32(DI), Y13
-	ADDQ    $0x40, DI
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -16634,35 +34534,29 @@ mulAvxTwo_7x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 640(CX), Y7
 	VMOVDQU 672(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 704(CX), Y7
 	VMOVDQU 736(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 4 to 3 outputs
-	VMOVDQU (R8), Y11
-	VMOVDQU 32(R8), Y13
-	ADDQ    $0x40, R8
+	VMOVDQU (R9), Y11
+	VMOVDQU 32(R9), Y13
+	ADDQ    $0x40, R9
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -16675,35 +34569,29 @@ mulAvxTwo_7x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 832(CX), Y7
 	VMOVDQU 864(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 896(CX), Y7
 	VMOVDQU 928(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 5 to 3 outputs
-	VMOVDQU (R9), Y11
-	VMOVDQU 32(R9), Y13
-	ADDQ    $0x40, R9
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -16716,7241 +34604,47101 @@ mulAvxTwo_7x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1024(CX), Y7
 	VMOVDQU 1056(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1088(CX), Y7
 	VMOVDQU 1120(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Load and process 64 bytes from input 6 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 1152(CX), Y7
-	VMOVDQU 1184(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1216(CX), Y7
-	VMOVDQU 1248(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1280(CX), Y7
-	VMOVDQU 1312(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	// Store 3 outputs
+	VMOVDQU Y0, (R11)
+	VMOVDQU Y1, 32(R11)
+	ADDQ    $0x40, R11
+	VMOVDQU Y2, (R12)
+	VMOVDQU Y3, 32(R12)
+	ADDQ    $0x40, R12
+	VMOVDQU Y4, (R10)
+	VMOVDQU Y5, 32(R10)
+	ADDQ    $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x3_64_loop
+	VZEROUPPER
+
+mulAvxTwo_6x3_64_end:
+	RET
+
+// func mulGFNI_6x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x3_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 23 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), CX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R9
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, CX
+
+mulGFNI_6x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z21
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z1, Z21, Z19
+	VGF2P8AFFINEQB $0x00, Z2, Z21, Z20
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z21
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z4, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z5, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z21
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z7, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z8, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z21
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z10, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z11, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z21
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z13, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z14, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (CX), Z21
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z15, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z16, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z17, Z21, Z22
+	VXORPD         Z20, Z22, Z20
 
 	// Store 3 outputs
-	MOVQ    (R10), R13
-	VMOVDQU Y0, (R13)(R11*1)
-	VMOVDQU Y1, 32(R13)(R11*1)
-	MOVQ    24(R10), R13
-	VMOVDQU Y2, (R13)(R11*1)
-	VMOVDQU Y3, 32(R13)(R11*1)
-	MOVQ    48(R10), R13
-	VMOVDQU Y4, (R13)(R11*1)
-	VMOVDQU Y5, 32(R13)(R11*1)
+	VMOVDQU64 Z18, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z19, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z20, (R9)
+	ADDQ      $0x40, R9
 
 	// Prepare for next loop
-	ADDQ $0x40, R11
-	DECQ R12
-	JNZ  mulAvxTwo_7x3_64_loop
+	DECQ AX
+	JNZ  mulGFNI_6x3_64_loop
 	VZEROUPPER
 
-mulAvxTwo_7x3_64_end:
+mulGFNI_6x3_64_end:
 	RET
 
-// func mulAvxTwo_7x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x4(SB), NOSPLIT, $0-88
+// func mulGFNI_6x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x3_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 23 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), CX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R9
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, CX
+
+mulGFNI_6x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (R10), Z18
+	VMOVDQU64 (R11), Z19
+	VMOVDQU64 (R9), Z20
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z21
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z1, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z2, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z21
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z4, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z5, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z21
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z7, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z8, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z21
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z10, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z11, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z21
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z13, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z14, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (CX), Z21
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z15, Z21, Z22
+	VXORPD         Z18, Z22, Z18
+	VGF2P8AFFINEQB $0x00, Z16, Z21, Z22
+	VXORPD         Z19, Z22, Z19
+	VGF2P8AFFINEQB $0x00, Z17, Z21, Z22
+	VXORPD         Z20, Z22, Z20
+
+	// Store 3 outputs
+	VMOVDQU64 Z18, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z19, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z20, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_6x3_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_6x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x3Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 65 YMM used
+	// Full registers estimated 44 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_7x4_end
+	JZ    mulAvxTwo_6x3Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), DX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  (R11), R12
-	MOVQ  24(R11), R13
-	MOVQ  48(R11), R14
-	MOVQ  72(R11), R11
-	MOVQ  start+72(FP), R15
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R10
+	MOVQ  start+72(FP), R13
 
 	// Add start offset to output
-	ADDQ R15, R12
-	ADDQ R15, R13
-	ADDQ R15, R14
-	ADDQ R15, R11
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R10
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, SI
-	ADDQ         R15, DI
-	ADDQ         R15, R8
-	ADDQ         R15, R9
-	ADDQ         R15, R10
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X4
-	VPBROADCASTB X4, Y4
-
-mulAvxTwo_7x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X3
+	VPBROADCASTB X3, Y3
 
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (BX), Y7
+mulAvxTwo_6x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R11), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU (R12), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 1 to 4 outputs
-	VMOVDQU (SI), Y7
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 2 to 4 outputs
-	VMOVDQU (DI), Y7
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 3 to 4 outputs
-	VMOVDQU (R8), Y7
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 768(CX), Y5
-	VMOVDQU 800(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 832(CX), Y5
-	VMOVDQU 864(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 896(CX), Y5
-	VMOVDQU 928(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 960(CX), Y5
-	VMOVDQU 992(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 4 to 4 outputs
-	VMOVDQU (R9), Y7
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y6
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1024(CX), Y5
-	VMOVDQU 1056(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1088(CX), Y5
-	VMOVDQU 1120(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1152(CX), Y5
-	VMOVDQU 1184(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1216(CX), Y5
-	VMOVDQU 1248(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 5 to 4 outputs
-	VMOVDQU (R10), Y7
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1280(CX), Y5
-	VMOVDQU 1312(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1344(CX), Y5
-	VMOVDQU 1376(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1408(CX), Y5
-	VMOVDQU 1440(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1472(CX), Y5
-	VMOVDQU 1504(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 6 to 4 outputs
-	VMOVDQU (DX), Y7
+	// Load and process 32 bytes from input 5 to 3 outputs
+	VMOVDQU (DX), Y6
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1536(CX), Y5
-	VMOVDQU 1568(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1600(CX), Y5
-	VMOVDQU 1632(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 960(CX), Y4
+	VMOVDQU 992(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1664(CX), Y5
-	VMOVDQU 1696(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1024(CX), Y4
+	VMOVDQU 1056(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1728(CX), Y5
-	VMOVDQU 1760(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1088(CX), Y4
+	VMOVDQU 1120(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Store 4 outputs
-	VMOVDQU Y0, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y1, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y2, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y3, (R11)
+	// Store 3 outputs
+	VMOVDQU Y0, (R11)
 	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_7x4_loop
+	JNZ  mulAvxTwo_6x3Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_7x4_end:
+mulAvxTwo_6x3Xor_end:
 	RET
 
-// func mulAvxTwo_7x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x5(SB), NOSPLIT, $8-88
+// func mulAvxTwo_6x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x3_64Xor(SB), $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 80 YMM used
+	// Full registers estimated 82 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_7x5_end
+	JZ    mulAvxTwo_6x3_64Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), DX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  (R11), R12
-	MOVQ  24(R11), R13
-	MOVQ  48(R11), R14
-	MOVQ  72(R11), R15
-	MOVQ  96(R11), R11
-	MOVQ  start+72(FP), BP
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R10
+	MOVQ  start+72(FP), R13
 
 	// Add start offset to output
-	ADDQ BP, R12
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R11
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R10
 
 	// Add start offset to input
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, R9
-	ADDQ         BP, R10
-	ADDQ         BP, DX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X5
-	VPBROADCASTB X5, Y5
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X6
+	VPBROADCASTB X6, Y6
 
-mulAvxTwo_7x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
+mulAvxTwo_6x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R11), Y0
+	VMOVDQU 32(R11), Y1
+	VMOVDQU (R12), Y2
+	VMOVDQU 32(R12), Y3
+	VMOVDQU (R10), Y4
+	VMOVDQU 32(R10), Y5
 
-	// Load and process 32 bytes from input 0 to 5 outputs
-	VMOVDQU (BX), Y8
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU (CX), Y6
-	VMOVDQU 32(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 64(CX), Y6
-	VMOVDQU 96(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 128(CX), Y6
-	VMOVDQU 160(CX), Y7
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y11
+	VMOVDQU 32(R9), Y13
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R11)
+	VMOVDQU Y1, 32(R11)
+	ADDQ    $0x40, R11
+	VMOVDQU Y2, (R12)
+	VMOVDQU Y3, 32(R12)
+	ADDQ    $0x40, R12
+	VMOVDQU Y4, (R10)
+	VMOVDQU Y5, 32(R10)
+	ADDQ    $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_6x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x4(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 57 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_6x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R13
+	MOVQ  72(R10), R10
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R10
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_6x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
+	VPXOR   Y5, Y6, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 256(CX), Y6
-	VMOVDQU 288(CX), Y7
+	VPXOR   Y5, Y6, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPXOR   Y5, Y6, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y3
 
-	// Load and process 32 bytes from input 1 to 5 outputs
-	VMOVDQU (SI), Y8
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 320(CX), Y6
-	VMOVDQU 352(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 384(CX), Y6
-	VMOVDQU 416(CX), Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 448(CX), Y6
-	VMOVDQU 480(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 512(CX), Y6
-	VMOVDQU 544(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 576(CX), Y6
-	VMOVDQU 608(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 2 to 5 outputs
-	VMOVDQU (DI), Y8
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 640(CX), Y6
-	VMOVDQU 672(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 704(CX), Y6
-	VMOVDQU 736(CX), Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 768(CX), Y6
-	VMOVDQU 800(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 832(CX), Y6
-	VMOVDQU 864(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 896(CX), Y6
-	VMOVDQU 928(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 3 to 5 outputs
-	VMOVDQU (R8), Y8
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 960(CX), Y6
-	VMOVDQU 992(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1024(CX), Y6
-	VMOVDQU 1056(CX), Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1088(CX), Y6
-	VMOVDQU 1120(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1152(CX), Y6
-	VMOVDQU 1184(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1216(CX), Y6
-	VMOVDQU 1248(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 4 to 5 outputs
-	VMOVDQU (R9), Y8
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R9), Y7
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1280(CX), Y6
-	VMOVDQU 1312(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1344(CX), Y6
-	VMOVDQU 1376(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1408(CX), Y6
-	VMOVDQU 1440(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1472(CX), Y6
-	VMOVDQU 1504(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1536(CX), Y6
-	VMOVDQU 1568(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
-
-	// Load and process 32 bytes from input 5 to 5 outputs
-	VMOVDQU (R10), Y8
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1600(CX), Y6
-	VMOVDQU 1632(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1664(CX), Y6
-	VMOVDQU 1696(CX), Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1728(CX), Y6
-	VMOVDQU 1760(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1792(CX), Y6
-	VMOVDQU 1824(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1856(CX), Y6
-	VMOVDQU 1888(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 6 to 5 outputs
-	VMOVDQU (DX), Y8
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (DX), Y7
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1920(CX), Y6
-	VMOVDQU 1952(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1984(CX), Y6
-	VMOVDQU 2016(CX), Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 2048(CX), Y6
-	VMOVDQU 2080(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 2112(CX), Y6
-	VMOVDQU 2144(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 2176(CX), Y6
-	VMOVDQU 2208(CX), Y7
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Store 5 outputs
-	VMOVDQU Y0, (R12)
+	// Store 4 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R12)
 	ADDQ    $0x20, R12
-	VMOVDQU Y1, (R13)
+	VMOVDQU Y2, (R13)
 	ADDQ    $0x20, R13
-	VMOVDQU Y2, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y3, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y4, (R11)
-	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_7x5_loop
+	JNZ  mulAvxTwo_6x4_loop
 	VZEROUPPER
 
-mulAvxTwo_7x5_end:
+mulAvxTwo_6x4_end:
 	RET
 
-// func mulAvxTwo_7x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x6(SB), NOSPLIT, $8-88
-	// Loading no tables to registers
+// func mulGFNI_6x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x4_64(SB), $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 95 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_7x6_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), AX
+	// Full registers estimated 30 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), CX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R12
+	MOVQ            72(R9), R9
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R9
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, CX
+
+mulGFNI_6x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z28
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z28, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z28, Z27
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z28
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (SI), Z28
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (DI), Z28
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R8), Z28
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z16, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z17, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z18, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z19, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (CX), Z28
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z20, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z21, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z22, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z23, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Store 4 outputs
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_6x4_64_loop
+	VZEROUPPER
+
+mulGFNI_6x4_64_end:
+	RET
+
+// func mulGFNI_6x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x4_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 30 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), CX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R12
+	MOVQ            72(R9), R9
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R9
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, CX
+
+mulGFNI_6x4_64Xor_loop:
+	// Load 4 outputs
+	VMOVDQU64 (R10), Z24
+	VMOVDQU64 (R11), Z25
+	VMOVDQU64 (R12), Z26
+	VMOVDQU64 (R9), Z27
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z28
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z28
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (SI), Z28
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (DI), Z28
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R8), Z28
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z16, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z17, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z18, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z19, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (CX), Z28
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z20, Z28, Z29
+	VXORPD         Z24, Z29, Z24
+	VGF2P8AFFINEQB $0x00, Z21, Z28, Z29
+	VXORPD         Z25, Z29, Z25
+	VGF2P8AFFINEQB $0x00, Z22, Z28, Z29
+	VXORPD         Z26, Z29, Z26
+	VGF2P8AFFINEQB $0x00, Z23, Z28, Z29
+	VXORPD         Z27, Z29, Z27
+
+	// Store 4 outputs
+	VMOVDQU64 Z24, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R9)
+	ADDQ      $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_6x4_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_6x4_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x4Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 57 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_6x4Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
 	MOVQ  out_base+48(FP), R10
 	MOVQ  (R10), R11
 	MOVQ  24(R10), R12
 	MOVQ  48(R10), R13
-	MOVQ  72(R10), R14
-	MOVQ  96(R10), R15
-	MOVQ  120(R10), R10
-	MOVQ  start+72(FP), BP
+	MOVQ  72(R10), R10
+	MOVQ  start+72(FP), R14
 
 	// Add start offset to output
-	ADDQ BP, R11
-	ADDQ BP, R12
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R10
 
 	// Add start offset to input
-	ADDQ         BP, DX
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, R9
-	ADDQ         BP, AX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X6
-	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), BP
-	SHRQ         $0x05, BP
-
-mulAvxTwo_7x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
-	// Load and process 32 bytes from input 0 to 6 outputs
-	VMOVDQU (DX), Y9
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU (CX), Y7
-	VMOVDQU 32(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 64(CX), Y7
-	VMOVDQU 96(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 128(CX), Y7
-	VMOVDQU 160(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 192(CX), Y7
-	VMOVDQU 224(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 256(CX), Y7
-	VMOVDQU 288(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 320(CX), Y7
-	VMOVDQU 352(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X4
+	VPBROADCASTB X4, Y4
 
-	// Load and process 32 bytes from input 1 to 6 outputs
-	VMOVDQU (BX), Y9
+mulAvxTwo_6x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 384(CX), Y7
-	VMOVDQU 416(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 448(CX), Y7
-	VMOVDQU 480(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 512(CX), Y7
-	VMOVDQU 544(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 576(CX), Y7
-	VMOVDQU 608(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 640(CX), Y7
-	VMOVDQU 672(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 704(CX), Y7
-	VMOVDQU 736(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R11), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU (R12), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU (R13), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 2 to 6 outputs
-	VMOVDQU (SI), Y9
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 768(CX), Y7
-	VMOVDQU 800(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 832(CX), Y7
-	VMOVDQU 864(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 896(CX), Y7
-	VMOVDQU 928(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 960(CX), Y7
-	VMOVDQU 992(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1024(CX), Y7
-	VMOVDQU 1056(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1088(CX), Y7
-	VMOVDQU 1120(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 3 to 6 outputs
-	VMOVDQU (DI), Y9
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1152(CX), Y7
-	VMOVDQU 1184(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1216(CX), Y7
-	VMOVDQU 1248(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1280(CX), Y7
-	VMOVDQU 1312(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 1344(CX), Y7
-	VMOVDQU 1376(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1408(CX), Y7
-	VMOVDQU 1440(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1472(CX), Y7
-	VMOVDQU 1504(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 4 to 6 outputs
-	VMOVDQU (R8), Y9
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1536(CX), Y7
-	VMOVDQU 1568(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1600(CX), Y7
-	VMOVDQU 1632(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1664(CX), Y7
-	VMOVDQU 1696(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 1728(CX), Y7
-	VMOVDQU 1760(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1792(CX), Y7
-	VMOVDQU 1824(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1856(CX), Y7
-	VMOVDQU 1888(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 5 to 6 outputs
-	VMOVDQU (R9), Y9
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R9), Y7
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1920(CX), Y7
-	VMOVDQU 1952(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1984(CX), Y7
-	VMOVDQU 2016(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2048(CX), Y7
-	VMOVDQU 2080(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2112(CX), Y7
-	VMOVDQU 2144(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2176(CX), Y7
-	VMOVDQU 2208(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 2240(CX), Y7
-	VMOVDQU 2272(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 6 to 6 outputs
-	VMOVDQU (AX), Y9
-	ADDQ    $0x20, AX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 2304(CX), Y7
-	VMOVDQU 2336(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 2368(CX), Y7
-	VMOVDQU 2400(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2432(CX), Y7
-	VMOVDQU 2464(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2496(CX), Y7
-	VMOVDQU 2528(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2560(CX), Y7
-	VMOVDQU 2592(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 2624(CX), Y7
-	VMOVDQU 2656(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Store 6 outputs
+	// Store 4 outputs
 	VMOVDQU Y0, (R11)
 	ADDQ    $0x20, R11
 	VMOVDQU Y1, (R12)
 	ADDQ    $0x20, R12
 	VMOVDQU Y2, (R13)
 	ADDQ    $0x20, R13
-	VMOVDQU Y3, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y4, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y5, (R10)
+	VMOVDQU Y3, (R10)
 	ADDQ    $0x20, R10
 
 	// Prepare for next loop
-	DECQ BP
-	JNZ  mulAvxTwo_7x6_loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x4Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_7x6_end:
+mulAvxTwo_6x4Xor_end:
 	RET
 
-// func mulAvxTwo_7x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x7(SB), NOSPLIT, $0-88
+// func mulAvxTwo_6x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x5(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 110 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 70 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_7x7_end
+	JZ    mulAvxTwo_6x5_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), DX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  start+72(FP), R12
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R13
+	MOVQ  72(R10), R14
+	MOVQ  96(R10), R10
+	MOVQ  start+72(FP), R15
 
-	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, R10
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X7
-	VPBROADCASTB X7, Y7
+	// Add start offset to output
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R10
 
-mulAvxTwo_7x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X5
+	VPBROADCASTB X5, Y5
 
-	// Load and process 32 bytes from input 0 to 7 outputs
-	VMOVDQU (BX), Y10
+mulAvxTwo_6x5_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU (CX), Y8
-	VMOVDQU 32(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 64(CX), Y8
-	VMOVDQU 96(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 128(CX), Y8
-	VMOVDQU 160(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 192(CX), Y8
-	VMOVDQU 224(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 320(CX), Y8
-	VMOVDQU 352(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 384(CX), Y8
-	VMOVDQU 416(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y4
 
-	// Load and process 32 bytes from input 1 to 7 outputs
-	VMOVDQU (SI), Y10
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 448(CX), Y8
-	VMOVDQU 480(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 512(CX), Y8
-	VMOVDQU 544(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 576(CX), Y8
-	VMOVDQU 608(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 640(CX), Y8
-	VMOVDQU 672(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 704(CX), Y8
-	VMOVDQU 736(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 768(CX), Y8
-	VMOVDQU 800(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 832(CX), Y8
-	VMOVDQU 864(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 2 to 7 outputs
-	VMOVDQU (DI), Y10
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 896(CX), Y8
-	VMOVDQU 928(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 960(CX), Y8
-	VMOVDQU 992(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1024(CX), Y8
-	VMOVDQU 1056(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1088(CX), Y8
-	VMOVDQU 1120(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1152(CX), Y8
-	VMOVDQU 1184(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1216(CX), Y8
-	VMOVDQU 1248(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1280(CX), Y8
-	VMOVDQU 1312(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 3 to 7 outputs
-	VMOVDQU (R8), Y10
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1344(CX), Y8
-	VMOVDQU 1376(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1408(CX), Y8
-	VMOVDQU 1440(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1472(CX), Y8
-	VMOVDQU 1504(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1536(CX), Y8
-	VMOVDQU 1568(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1600(CX), Y8
-	VMOVDQU 1632(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1664(CX), Y8
-	VMOVDQU 1696(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1728(CX), Y8
-	VMOVDQU 1760(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 4 to 7 outputs
-	VMOVDQU (R9), Y10
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R9), Y8
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1792(CX), Y8
-	VMOVDQU 1824(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1856(CX), Y8
-	VMOVDQU 1888(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1920(CX), Y8
-	VMOVDQU 1952(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1984(CX), Y8
-	VMOVDQU 2016(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2048(CX), Y8
-	VMOVDQU 2080(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2112(CX), Y8
-	VMOVDQU 2144(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2176(CX), Y8
-	VMOVDQU 2208(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 5 to 7 outputs
-	VMOVDQU (R10), Y10
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y2, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y3, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y4, (R10)
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 2240(CX), Y8
-	VMOVDQU 2272(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 2304(CX), Y8
-	VMOVDQU 2336(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 2368(CX), Y8
-	VMOVDQU 2400(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 2432(CX), Y8
-	VMOVDQU 2464(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2496(CX), Y8
-	VMOVDQU 2528(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2560(CX), Y8
-	VMOVDQU 2592(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2624(CX), Y8
-	VMOVDQU 2656(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
 
-	// Load and process 32 bytes from input 6 to 7 outputs
-	VMOVDQU (DX), Y10
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 2688(CX), Y8
-	VMOVDQU 2720(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 2752(CX), Y8
-	VMOVDQU 2784(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 2816(CX), Y8
-	VMOVDQU 2848(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 2880(CX), Y8
-	VMOVDQU 2912(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2944(CX), Y8
-	VMOVDQU 2976(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 3008(CX), Y8
-	VMOVDQU 3040(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 3072(CX), Y8
-	VMOVDQU 3104(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x5_loop
+	VZEROUPPER
 
-	// Store 7 outputs
-	MOVQ    (R11), R13
-	VMOVDQU Y0, (R13)(R12*1)
-	MOVQ    24(R11), R13
-	VMOVDQU Y1, (R13)(R12*1)
-	MOVQ    48(R11), R13
-	VMOVDQU Y2, (R13)(R12*1)
-	MOVQ    72(R11), R13
-	VMOVDQU Y3, (R13)(R12*1)
-	MOVQ    96(R11), R13
-	VMOVDQU Y4, (R13)(R12*1)
-	MOVQ    120(R11), R13
-	VMOVDQU Y5, (R13)(R12*1)
-	MOVQ    144(R11), R13
-	VMOVDQU Y6, (R13)(R12*1)
+mulAvxTwo_6x5_end:
+	RET
+
+// func mulGFNI_6x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x5_64(SB), $0-88
+	// Loading 25 of 30 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 37 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R12
+	MOVQ            48(R10), R13
+	MOVQ            72(R10), R14
+	MOVQ            96(R10), R10
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R10
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, DX
+
+mulGFNI_6x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R10)
+	ADDQ      $0x40, R10
 
 	// Prepare for next loop
-	ADDQ $0x20, R12
 	DECQ AX
-	JNZ  mulAvxTwo_7x7_loop
+	JNZ  mulGFNI_6x5_64_loop
 	VZEROUPPER
 
-mulAvxTwo_7x7_end:
+mulGFNI_6x5_64_end:
 	RET
 
-// func mulAvxTwo_7x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x8(SB), NOSPLIT, $0-88
+// func mulGFNI_6x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x5_64Xor(SB), $0-88
+	// Loading 25 of 30 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 37 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R12
+	MOVQ            48(R10), R13
+	MOVQ            72(R10), R14
+	MOVQ            96(R10), R10
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R10
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, DX
+
+mulGFNI_6x5_64Xor_loop:
+	// Load 5 outputs
+	VMOVDQU64 (R11), Z25
+	VMOVDQU64 (R12), Z26
+	VMOVDQU64 (R13), Z27
+	VMOVDQU64 (R14), Z28
+	VMOVDQU64 (R10), Z29
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	VMOVDQU64 Z25, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R10)
+	ADDQ      $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_6x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_6x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x5Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 125 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 70 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_7x8_end
+	JZ    mulAvxTwo_6x5Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), DX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  start+72(FP), R12
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R13
+	MOVQ  72(R10), R14
+	MOVQ  96(R10), R10
+	MOVQ  start+72(FP), R15
 
-	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, R10
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X8
-	VPBROADCASTB X8, Y8
+	// Add start offset to output
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R10
 
-mulAvxTwo_7x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X5
+	VPBROADCASTB X5, Y5
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (BX), Y11
+mulAvxTwo_6x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (R11), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU (R12), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU (R13), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU (R14), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU (R10), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 1 to 8 outputs
-	VMOVDQU (SI), Y11
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 512(CX), Y9
-	VMOVDQU 544(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 576(CX), Y9
-	VMOVDQU 608(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y9
-	VMOVDQU 672(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 704(CX), Y9
-	VMOVDQU 736(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 768(CX), Y9
-	VMOVDQU 800(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 832(CX), Y9
-	VMOVDQU 864(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 896(CX), Y9
-	VMOVDQU 928(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 960(CX), Y9
-	VMOVDQU 992(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 2 to 8 outputs
-	VMOVDQU (DI), Y11
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1024(CX), Y9
-	VMOVDQU 1056(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1088(CX), Y9
-	VMOVDQU 1120(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1152(CX), Y9
-	VMOVDQU 1184(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1216(CX), Y9
-	VMOVDQU 1248(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1280(CX), Y9
-	VMOVDQU 1312(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1344(CX), Y9
-	VMOVDQU 1376(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1408(CX), Y9
-	VMOVDQU 1440(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1472(CX), Y9
-	VMOVDQU 1504(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 3 to 8 outputs
-	VMOVDQU (R8), Y11
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1536(CX), Y9
-	VMOVDQU 1568(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1600(CX), Y9
-	VMOVDQU 1632(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1664(CX), Y9
-	VMOVDQU 1696(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1728(CX), Y9
-	VMOVDQU 1760(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1792(CX), Y9
-	VMOVDQU 1824(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1856(CX), Y9
-	VMOVDQU 1888(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1920(CX), Y9
-	VMOVDQU 1952(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1984(CX), Y9
-	VMOVDQU 2016(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 4 to 8 outputs
-	VMOVDQU (R9), Y11
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R9), Y8
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 2048(CX), Y9
-	VMOVDQU 2080(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2112(CX), Y9
-	VMOVDQU 2144(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2176(CX), Y9
-	VMOVDQU 2208(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2240(CX), Y9
-	VMOVDQU 2272(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2304(CX), Y9
-	VMOVDQU 2336(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2368(CX), Y9
-	VMOVDQU 2400(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2432(CX), Y9
-	VMOVDQU 2464(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 2496(CX), Y9
-	VMOVDQU 2528(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
-
-	// Load and process 32 bytes from input 5 to 8 outputs
-	VMOVDQU (R10), Y11
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 2560(CX), Y9
-	VMOVDQU 2592(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2624(CX), Y9
-	VMOVDQU 2656(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2688(CX), Y9
-	VMOVDQU 2720(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2752(CX), Y9
-	VMOVDQU 2784(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2816(CX), Y9
-	VMOVDQU 2848(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2880(CX), Y9
-	VMOVDQU 2912(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2944(CX), Y9
-	VMOVDQU 2976(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 3008(CX), Y9
-	VMOVDQU 3040(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 6 to 8 outputs
-	VMOVDQU (DX), Y11
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (DX), Y8
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 3072(CX), Y9
-	VMOVDQU 3104(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 3136(CX), Y9
-	VMOVDQU 3168(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 3200(CX), Y9
-	VMOVDQU 3232(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 3264(CX), Y9
-	VMOVDQU 3296(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 3328(CX), Y9
-	VMOVDQU 3360(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 3392(CX), Y9
-	VMOVDQU 3424(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 3456(CX), Y9
-	VMOVDQU 3488(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 3520(CX), Y9
-	VMOVDQU 3552(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Store 8 outputs
-	MOVQ    (R11), R13
-	VMOVDQU Y0, (R13)(R12*1)
-	MOVQ    24(R11), R13
-	VMOVDQU Y1, (R13)(R12*1)
-	MOVQ    48(R11), R13
-	VMOVDQU Y2, (R13)(R12*1)
-	MOVQ    72(R11), R13
-	VMOVDQU Y3, (R13)(R12*1)
-	MOVQ    96(R11), R13
-	VMOVDQU Y4, (R13)(R12*1)
-	MOVQ    120(R11), R13
-	VMOVDQU Y5, (R13)(R12*1)
-	MOVQ    144(R11), R13
-	VMOVDQU Y6, (R13)(R12*1)
-	MOVQ    168(R11), R13
-	VMOVDQU Y7, (R13)(R12*1)
+	// Store 5 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y2, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y3, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
 
 	// Prepare for next loop
-	ADDQ $0x20, R12
 	DECQ AX
-	JNZ  mulAvxTwo_7x8_loop
+	JNZ  mulAvxTwo_6x5Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_7x8_end:
+mulAvxTwo_6x5Xor_end:
 	RET
 
-// func mulAvxTwo_7x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x9(SB), NOSPLIT, $0-88
+// func mulAvxTwo_6x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x6(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 140 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 83 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_7x9_end
+	JZ    mulAvxTwo_6x6_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), DX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  start+72(FP), R12
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R13
+	MOVQ  72(R10), R14
+	MOVQ  96(R10), R15
+	MOVQ  120(R10), R10
+	MOVQ  start+72(FP), BP
 
-	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, R10
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X9
-	VPBROADCASTB X9, Y9
+	// Add start offset to output
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R10
 
-mulAvxTwo_7x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
+	VPBROADCASTB X6, Y6
 
-	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (BX), Y12
+mulAvxTwo_6x6_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 512(CX), Y10
-	VMOVDQU 544(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y3
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y5
 
-	// Load and process 32 bytes from input 1 to 9 outputs
-	VMOVDQU (SI), Y12
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 576(CX), Y10
-	VMOVDQU 608(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 640(CX), Y10
-	VMOVDQU 672(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 704(CX), Y10
-	VMOVDQU 736(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 768(CX), Y10
-	VMOVDQU 800(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 832(CX), Y10
-	VMOVDQU 864(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 896(CX), Y10
-	VMOVDQU 928(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 960(CX), Y10
-	VMOVDQU 992(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1024(CX), Y10
-	VMOVDQU 1056(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1088(CX), Y10
-	VMOVDQU 1120(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 2 to 9 outputs
-	VMOVDQU (DI), Y12
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1152(CX), Y10
-	VMOVDQU 1184(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1216(CX), Y10
-	VMOVDQU 1248(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1280(CX), Y10
-	VMOVDQU 1312(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1344(CX), Y10
-	VMOVDQU 1376(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1408(CX), Y10
-	VMOVDQU 1440(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 1472(CX), Y10
-	VMOVDQU 1504(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 1536(CX), Y10
-	VMOVDQU 1568(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1600(CX), Y10
-	VMOVDQU 1632(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1664(CX), Y10
-	VMOVDQU 1696(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 3 to 9 outputs
-	VMOVDQU (R8), Y12
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (R8), Y9
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1728(CX), Y10
-	VMOVDQU 1760(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1792(CX), Y10
-	VMOVDQU 1824(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1856(CX), Y10
-	VMOVDQU 1888(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1920(CX), Y10
-	VMOVDQU 1952(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1984(CX), Y10
-	VMOVDQU 2016(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2048(CX), Y10
-	VMOVDQU 2080(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2112(CX), Y10
-	VMOVDQU 2144(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2176(CX), Y10
-	VMOVDQU 2208(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2240(CX), Y10
-	VMOVDQU 2272(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 4 to 9 outputs
-	VMOVDQU (R9), Y12
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (R9), Y9
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 2304(CX), Y10
-	VMOVDQU 2336(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2368(CX), Y10
-	VMOVDQU 2400(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 2432(CX), Y10
-	VMOVDQU 2464(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 2496(CX), Y10
-	VMOVDQU 2528(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 2560(CX), Y10
-	VMOVDQU 2592(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2624(CX), Y10
-	VMOVDQU 2656(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2688(CX), Y10
-	VMOVDQU 2720(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2752(CX), Y10
-	VMOVDQU 2784(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2816(CX), Y10
-	VMOVDQU 2848(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
-
-	// Load and process 32 bytes from input 5 to 9 outputs
-	VMOVDQU (R10), Y12
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 2880(CX), Y10
-	VMOVDQU 2912(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2944(CX), Y10
-	VMOVDQU 2976(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 3008(CX), Y10
-	VMOVDQU 3040(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 3072(CX), Y10
-	VMOVDQU 3104(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 3136(CX), Y10
-	VMOVDQU 3168(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 3200(CX), Y10
-	VMOVDQU 3232(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 3264(CX), Y10
-	VMOVDQU 3296(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 3328(CX), Y10
-	VMOVDQU 3360(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 3392(CX), Y10
-	VMOVDQU 3424(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 6 to 9 outputs
-	VMOVDQU (DX), Y12
+	// Load and process 32 bytes from input 5 to 6 outputs
+	VMOVDQU (DX), Y9
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 3456(CX), Y10
-	VMOVDQU 3488(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 3520(CX), Y10
-	VMOVDQU 3552(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 3584(CX), Y10
-	VMOVDQU 3616(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 3648(CX), Y10
-	VMOVDQU 3680(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 3712(CX), Y10
-	VMOVDQU 3744(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 3776(CX), Y10
-	VMOVDQU 3808(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 3840(CX), Y10
-	VMOVDQU 3872(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 3904(CX), Y10
-	VMOVDQU 3936(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 3968(CX), Y10
-	VMOVDQU 4000(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Store 9 outputs
-	MOVQ    (R11), R13
-	VMOVDQU Y0, (R13)(R12*1)
-	MOVQ    24(R11), R13
-	VMOVDQU Y1, (R13)(R12*1)
-	MOVQ    48(R11), R13
-	VMOVDQU Y2, (R13)(R12*1)
-	MOVQ    72(R11), R13
-	VMOVDQU Y3, (R13)(R12*1)
-	MOVQ    96(R11), R13
-	VMOVDQU Y4, (R13)(R12*1)
-	MOVQ    120(R11), R13
-	VMOVDQU Y5, (R13)(R12*1)
-	MOVQ    144(R11), R13
-	VMOVDQU Y6, (R13)(R12*1)
-	MOVQ    168(R11), R13
-	VMOVDQU Y7, (R13)(R12*1)
-	MOVQ    192(R11), R13
-	VMOVDQU Y8, (R13)(R12*1)
+	// Store 6 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y2, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y3, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y4, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y5, (R10)
+	ADDQ    $0x20, R10
 
 	// Prepare for next loop
-	ADDQ $0x20, R12
 	DECQ AX
-	JNZ  mulAvxTwo_7x9_loop
+	JNZ  mulAvxTwo_6x6_loop
 	VZEROUPPER
 
-mulAvxTwo_7x9_end:
+mulAvxTwo_6x6_end:
 	RET
 
-// func mulAvxTwo_7x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_7x10(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 155 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_7x10_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), DX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  start+72(FP), R12
+// func mulGFNI_6x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x6_64(SB), $8-88
+	// Loading 24 of 36 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 44 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R12
+	MOVQ            48(R10), R13
+	MOVQ            72(R10), R14
+	MOVQ            96(R10), R15
+	MOVQ            120(R10), R10
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R10
 
 	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, R10
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X10
-	VPBROADCASTB X10, Y10
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, DX
+
+mulGFNI_6x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-mulAvxTwo_7x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
-
-	// Load and process 32 bytes from input 0 to 10 outputs
-	VMOVDQU (BX), Y13
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU (CX), Y11
-	VMOVDQU 32(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 64(CX), Y11
-	VMOVDQU 96(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 128(CX), Y11
-	VMOVDQU 160(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 192(CX), Y11
-	VMOVDQU 224(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 256(CX), Y11
-	VMOVDQU 288(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 320(CX), Y11
-	VMOVDQU 352(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 384(CX), Y11
-	VMOVDQU 416(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 448(CX), Y11
-	VMOVDQU 480(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 512(CX), Y11
-	VMOVDQU 544(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 576(CX), Y11
-	VMOVDQU 608(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
-
-	// Load and process 32 bytes from input 1 to 10 outputs
-	VMOVDQU (SI), Y13
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 640(CX), Y11
-	VMOVDQU 672(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 704(CX), Y11
-	VMOVDQU 736(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 768(CX), Y11
-	VMOVDQU 800(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 832(CX), Y11
-	VMOVDQU 864(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 896(CX), Y11
-	VMOVDQU 928(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 960(CX), Y11
-	VMOVDQU 992(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1024(CX), Y11
-	VMOVDQU 1056(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1088(CX), Y11
-	VMOVDQU 1120(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1152(CX), Y11
-	VMOVDQU 1184(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1216(CX), Y11
-	VMOVDQU 1248(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	// Store 6 outputs
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R10)
+	ADDQ      $0x40, R10
 
-	// Load and process 32 bytes from input 2 to 10 outputs
-	VMOVDQU (DI), Y13
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 1280(CX), Y11
-	VMOVDQU 1312(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 1344(CX), Y11
-	VMOVDQU 1376(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 1408(CX), Y11
-	VMOVDQU 1440(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 1472(CX), Y11
-	VMOVDQU 1504(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 1536(CX), Y11
-	VMOVDQU 1568(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 1600(CX), Y11
-	VMOVDQU 1632(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1664(CX), Y11
-	VMOVDQU 1696(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1728(CX), Y11
-	VMOVDQU 1760(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1792(CX), Y11
-	VMOVDQU 1824(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1856(CX), Y11
-	VMOVDQU 1888(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_6x6_64_loop
+	VZEROUPPER
 
-	// Load and process 32 bytes from input 3 to 10 outputs
-	VMOVDQU (R8), Y13
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 1920(CX), Y11
-	VMOVDQU 1952(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 1984(CX), Y11
-	VMOVDQU 2016(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 2048(CX), Y11
-	VMOVDQU 2080(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 2112(CX), Y11
-	VMOVDQU 2144(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 2176(CX), Y11
-	VMOVDQU 2208(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 2240(CX), Y11
-	VMOVDQU 2272(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 2304(CX), Y11
-	VMOVDQU 2336(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 2368(CX), Y11
-	VMOVDQU 2400(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 2432(CX), Y11
-	VMOVDQU 2464(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 2496(CX), Y11
-	VMOVDQU 2528(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+mulGFNI_6x6_64_end:
+	RET
 
-	// Load and process 32 bytes from input 4 to 10 outputs
-	VMOVDQU (R9), Y13
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 2560(CX), Y11
-	VMOVDQU 2592(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 2624(CX), Y11
-	VMOVDQU 2656(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 2688(CX), Y11
-	VMOVDQU 2720(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 2752(CX), Y11
-	VMOVDQU 2784(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 2816(CX), Y11
-	VMOVDQU 2848(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 2880(CX), Y11
-	VMOVDQU 2912(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 2944(CX), Y11
-	VMOVDQU 2976(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 3008(CX), Y11
-	VMOVDQU 3040(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 3072(CX), Y11
-	VMOVDQU 3104(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 3136(CX), Y11
-	VMOVDQU 3168(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+// func mulGFNI_6x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x6_64Xor(SB), $8-88
+	// Loading 24 of 36 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 44 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R12
+	MOVQ            48(R10), R13
+	MOVQ            72(R10), R14
+	MOVQ            96(R10), R15
+	MOVQ            120(R10), R10
+	MOVQ            start+72(FP), BP
 
-	// Load and process 32 bytes from input 5 to 10 outputs
-	VMOVDQU (R10), Y13
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 3200(CX), Y11
-	VMOVDQU 3232(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 3264(CX), Y11
-	VMOVDQU 3296(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 3328(CX), Y11
-	VMOVDQU 3360(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 3392(CX), Y11
-	VMOVDQU 3424(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 3456(CX), Y11
-	VMOVDQU 3488(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 3520(CX), Y11
-	VMOVDQU 3552(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 3584(CX), Y11
-	VMOVDQU 3616(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 3648(CX), Y11
-	VMOVDQU 3680(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 3712(CX), Y11
-	VMOVDQU 3744(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 3776(CX), Y11
-	VMOVDQU 3808(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	// Add start offset to output
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R10
 
-	// Load and process 32 bytes from input 6 to 10 outputs
-	VMOVDQU (DX), Y13
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 3840(CX), Y11
-	VMOVDQU 3872(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 3904(CX), Y11
-	VMOVDQU 3936(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 3968(CX), Y11
-	VMOVDQU 4000(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 4032(CX), Y11
-	VMOVDQU 4064(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 4096(CX), Y11
-	VMOVDQU 4128(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 4160(CX), Y11
-	VMOVDQU 4192(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 4224(CX), Y11
-	VMOVDQU 4256(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 4288(CX), Y11
-	VMOVDQU 4320(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 4352(CX), Y11
-	VMOVDQU 4384(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 4416(CX), Y11
-	VMOVDQU 4448(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, DX
+
+mulGFNI_6x6_64Xor_loop:
+	// Load 6 outputs
+	VMOVDQU64 (R11), Z24
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R10), Z29
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Store 10 outputs
-	MOVQ    (R11), R13
-	VMOVDQU Y0, (R13)(R12*1)
-	MOVQ    24(R11), R13
-	VMOVDQU Y1, (R13)(R12*1)
-	MOVQ    48(R11), R13
-	VMOVDQU Y2, (R13)(R12*1)
-	MOVQ    72(R11), R13
-	VMOVDQU Y3, (R13)(R12*1)
-	MOVQ    96(R11), R13
-	VMOVDQU Y4, (R13)(R12*1)
-	MOVQ    120(R11), R13
-	VMOVDQU Y5, (R13)(R12*1)
-	MOVQ    144(R11), R13
-	VMOVDQU Y6, (R13)(R12*1)
-	MOVQ    168(R11), R13
-	VMOVDQU Y7, (R13)(R12*1)
-	MOVQ    192(R11), R13
-	VMOVDQU Y8, (R13)(R12*1)
-	MOVQ    216(R11), R13
-	VMOVDQU Y9, (R13)(R12*1)
+	// Store 6 outputs
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R10)
+	ADDQ      $0x40, R10
 
 	// Prepare for next loop
-	ADDQ $0x20, R12
 	DECQ AX
-	JNZ  mulAvxTwo_7x10_loop
+	JNZ  mulGFNI_6x6_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_7x10_end:
+mulGFNI_6x6_64Xor_end:
 	RET
 
-// func mulAvxTwo_8x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x1(SB), NOSPLIT, $0-88
+// func mulAvxTwo_6x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x6Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 20 YMM used
+	// Full registers estimated 83 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x1_end
+	JZ    mulAvxTwo_6x6Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), DX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  (R12), R12
-	MOVQ  start+72(FP), R13
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R13
+	MOVQ  72(R10), R14
+	MOVQ  96(R10), R15
+	MOVQ  120(R10), R10
+	MOVQ  start+72(FP), BP
 
 	// Add start offset to output
-	ADDQ R13, R12
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R10
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, R11
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X1
-	VPBROADCASTB X1, Y1
-
-mulAvxTwo_8x1_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
+	VPBROADCASTB X6, Y6
 
-	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (BX), Y4
+mulAvxTwo_6x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU (CX), Y2
-	VMOVDQU 32(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (R11), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU (R12), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU (R13), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU (R14), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU (R15), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU (R10), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (SI), Y4
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (DI), Y4
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 128(CX), Y2
-	VMOVDQU 160(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
-
-	// Load and process 32 bytes from input 3 to 1 outputs
-	VMOVDQU (R8), Y4
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 192(CX), Y2
-	VMOVDQU 224(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
-
-	// Load and process 32 bytes from input 4 to 1 outputs
-	VMOVDQU (R9), Y4
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 256(CX), Y2
-	VMOVDQU 288(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 5 to 1 outputs
-	VMOVDQU (R10), Y4
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 320(CX), Y2
-	VMOVDQU 352(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (R8), Y9
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 6 to 1 outputs
-	VMOVDQU (R11), Y4
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 384(CX), Y2
-	VMOVDQU 416(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (R9), Y9
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 7 to 1 outputs
-	VMOVDQU (DX), Y4
+	// Load and process 32 bytes from input 5 to 6 outputs
+	VMOVDQU (DX), Y9
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 448(CX), Y2
-	VMOVDQU 480(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Store 1 outputs
-	VMOVDQU Y0, (R12)
+	// Store 6 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R12)
 	ADDQ    $0x20, R12
+	VMOVDQU Y2, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y3, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y4, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y5, (R10)
+	ADDQ    $0x20, R10
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_8x1_loop
+	JNZ  mulAvxTwo_6x6Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_8x1_end:
+mulAvxTwo_6x6Xor_end:
 	RET
 
-// func mulAvxTwo_8x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x1_64(SB), $0-88
+// func mulAvxTwo_6x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x7(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 20 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 96 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x1_64_end
+	JZ    mulAvxTwo_6x7_end
 	MOVQ  in_base+24(FP), AX
 	MOVQ  (AX), DX
 	MOVQ  24(AX), BX
 	MOVQ  48(AX), SI
 	MOVQ  72(AX), DI
 	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), AX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  out_base+48(FP), R11
-	MOVQ  start+72(FP), R12
-
-	// Add start offset to input
-	ADDQ         R12, DX
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, R10
-	ADDQ         R12, AX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R13
-	SHRQ         $0x06, R13
-
-mulAvxTwo_8x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
-	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y6
-	VMOVDQU 32(SI), Y5
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y6
-	VMOVDQU 32(DI), Y5
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 4 to 1 outputs
-	VMOVDQU (R8), Y6
-	VMOVDQU 32(R8), Y5
-	ADDQ    $0x40, R8
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 5 to 1 outputs
-	VMOVDQU (R9), Y6
-	VMOVDQU 32(R9), Y5
-	ADDQ    $0x40, R9
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 6 to 1 outputs
-	VMOVDQU (R10), Y6
-	VMOVDQU 32(R10), Y5
-	ADDQ    $0x40, R10
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 384(CX), Y3
-	VMOVDQU 416(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 7 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 448(CX), Y3
-	VMOVDQU 480(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Store 1 outputs
-	MOVQ    (R11), R14
-	VMOVDQU Y0, (R14)(R12*1)
-	VMOVDQU Y1, 32(R14)(R12*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, R12
-	DECQ R13
-	JNZ  mulAvxTwo_8x1_64_loop
-	VZEROUPPER
-
-mulAvxTwo_8x1_64_end:
-	RET
-
-// func mulAvxTwo_8x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x2(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 39 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_8x2_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), DX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  (R12), R13
-	MOVQ  24(R12), R12
-	MOVQ  start+72(FP), R14
+	MOVQ  120(AX), AX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R14
+	MOVQ  120(R9), R15
+	MOVQ  144(R9), R9
+	MOVQ  start+72(FP), BP
 
 	// Add start offset to output
-	ADDQ R14, R13
-	ADDQ R14, R12
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R9
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X2
-	VPBROADCASTB X2, Y2
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X7
+	VPBROADCASTB X7, Y7
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
 
-mulAvxTwo_8x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+mulAvxTwo_6x7_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y0
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y1
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y2
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y3
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y4
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y5
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y6
 
-	// Load and process 32 bytes from input 0 to 2 outputs
-	VMOVDQU (BX), Y5
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 1 to 2 outputs
-	VMOVDQU (SI), Y5
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (SI), Y10
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 2 to 2 outputs
-	VMOVDQU (DI), Y5
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (DI), Y10
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 3 to 2 outputs
-	VMOVDQU (R8), Y5
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R8), Y10
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 384(CX), Y3
-	VMOVDQU 416(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 448(CX), Y3
-	VMOVDQU 480(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 4 to 2 outputs
-	VMOVDQU (R9), Y5
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 512(CX), Y3
-	VMOVDQU 544(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 576(CX), Y3
-	VMOVDQU 608(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (AX), Y10
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 5 to 2 outputs
-	VMOVDQU (R10), Y5
+	// Store 7 outputs
+	VMOVDQU Y0, (R10)
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 640(CX), Y3
-	VMOVDQU 672(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 704(CX), Y3
-	VMOVDQU 736(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 6 to 2 outputs
-	VMOVDQU (R11), Y5
+	VMOVDQU Y1, (R11)
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 768(CX), Y3
-	VMOVDQU 800(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 832(CX), Y3
-	VMOVDQU 864(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 7 to 2 outputs
-	VMOVDQU (DX), Y5
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 896(CX), Y3
-	VMOVDQU 928(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 960(CX), Y3
-	VMOVDQU 992(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Store 2 outputs
-	VMOVDQU Y0, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y1, (R12)
+	VMOVDQU Y2, (R12)
 	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y4, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y5, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y6, (R9)
+	ADDQ    $0x20, R9
 
 	// Prepare for next loop
-	DECQ AX
-	JNZ  mulAvxTwo_8x2_loop
+	DECQ BP
+	JNZ  mulAvxTwo_6x7_loop
 	VZEROUPPER
 
-mulAvxTwo_8x2_end:
+mulAvxTwo_6x7_end:
 	RET
 
-// func mulAvxTwo_8x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x2_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 39 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_8x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), AX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  out_base+48(FP), R11
-	MOVQ  start+72(FP), R12
-
-	// Add start offset to input
-	ADDQ         R12, DX
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, R10
-	ADDQ         R12, AX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X4
-	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R13
-	SHRQ         $0x06, R13
-
-mulAvxTwo_8x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+// func mulGFNI_6x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x7_64(SB), $8-88
+	// Loading 23 of 42 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 51 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), AX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R12
+	MOVQ            72(R9), R13
+	MOVQ            96(R9), R14
+	MOVQ            120(R9), R15
+	MOVQ            144(R9), R9
+	MOVQ            start+72(FP), BP
 
-	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Add start offset to output
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R9
 
-	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_6x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Store 7 outputs
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R9)
+	ADDQ      $0x40, R9
 
-	// Load and process 64 bytes from input 3 to 2 outputs
-	VMOVDQU (DI), Y9
-	VMOVDQU 32(DI), Y11
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_6x7_64_loop
+	VZEROUPPER
 
-	// Load and process 64 bytes from input 4 to 2 outputs
-	VMOVDQU (R8), Y9
-	VMOVDQU 32(R8), Y11
-	ADDQ    $0x40, R8
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+mulGFNI_6x7_64_end:
+	RET
 
-	// Load and process 64 bytes from input 5 to 2 outputs
-	VMOVDQU (R9), Y9
-	VMOVDQU 32(R9), Y11
-	ADDQ    $0x40, R9
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+// func mulGFNI_6x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x7_64Xor(SB), $8-88
+	// Loading 23 of 42 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 51 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), AX
+	MOVQ            out_base+48(FP), R9
+	MOVQ            out_base+48(FP), R9
+	MOVQ            (R9), R10
+	MOVQ            24(R9), R11
+	MOVQ            48(R9), R12
+	MOVQ            72(R9), R13
+	MOVQ            96(R9), R14
+	MOVQ            120(R9), R15
+	MOVQ            144(R9), R9
+	MOVQ            start+72(FP), BP
 
-	// Load and process 64 bytes from input 6 to 2 outputs
-	VMOVDQU (R10), Y9
-	VMOVDQU 32(R10), Y11
-	ADDQ    $0x40, R10
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 768(CX), Y5
-	VMOVDQU 800(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 832(CX), Y5
-	VMOVDQU 864(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Add start offset to output
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R9
 
-	// Load and process 64 bytes from input 7 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 896(CX), Y5
-	VMOVDQU 928(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 960(CX), Y5
-	VMOVDQU 992(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_6x7_64Xor_loop:
+	// Load 7 outputs
+	VMOVDQU64 (R10), Z23
+	VMOVDQU64 (R11), Z24
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R9), Z29
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Store 2 outputs
-	MOVQ    (R11), R14
-	VMOVDQU Y0, (R14)(R12*1)
-	VMOVDQU Y1, 32(R14)(R12*1)
-	MOVQ    24(R11), R14
-	VMOVDQU Y2, (R14)(R12*1)
-	VMOVDQU Y3, 32(R14)(R12*1)
+	// Store 7 outputs
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R9)
+	ADDQ      $0x40, R9
 
 	// Prepare for next loop
-	ADDQ $0x40, R12
-	DECQ R13
-	JNZ  mulAvxTwo_8x2_64_loop
+	DECQ BP
+	JNZ  mulGFNI_6x7_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_8x2_64_end:
+mulGFNI_6x7_64Xor_end:
 	RET
 
-// func mulAvxTwo_8x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x3(SB), NOSPLIT, $0-88
+// func mulAvxTwo_6x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x7Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 56 YMM used
+	// Full registers estimated 96 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x3_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), DX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  (R12), R13
-	MOVQ  24(R12), R14
-	MOVQ  48(R12), R12
-	MOVQ  start+72(FP), R15
-
+	JZ    mulAvxTwo_6x7Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), R8
+	MOVQ  120(AX), AX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R14
+	MOVQ  120(R9), R15
+	MOVQ  144(R9), R9
+	MOVQ  start+72(FP), BP
+
 	// Add start offset to output
-	ADDQ R15, R13
-	ADDQ R15, R14
-	ADDQ R15, R12
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R9
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, SI
-	ADDQ         R15, DI
-	ADDQ         R15, R8
-	ADDQ         R15, R9
-	ADDQ         R15, R10
-	ADDQ         R15, R11
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X3
-	VPBROADCASTB X3, Y3
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X7
+	VPBROADCASTB X7, Y7
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
 
-mulAvxTwo_8x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
+mulAvxTwo_6x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU (R13), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU (R14), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU (R15), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU (R9), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 0 to 3 outputs
-	VMOVDQU (BX), Y6
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU (CX), Y4
-	VMOVDQU 32(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 64(CX), Y4
-	VMOVDQU 96(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 1 to 3 outputs
-	VMOVDQU (SI), Y6
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (SI), Y10
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 192(CX), Y4
-	VMOVDQU 224(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 256(CX), Y4
-	VMOVDQU 288(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 320(CX), Y4
-	VMOVDQU 352(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 2 to 3 outputs
-	VMOVDQU (DI), Y6
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (DI), Y10
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 384(CX), Y4
-	VMOVDQU 416(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 448(CX), Y4
-	VMOVDQU 480(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 512(CX), Y4
-	VMOVDQU 544(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 3 to 3 outputs
-	VMOVDQU (R8), Y6
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R8), Y10
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 576(CX), Y4
-	VMOVDQU 608(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 640(CX), Y4
-	VMOVDQU 672(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 704(CX), Y4
-	VMOVDQU 736(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 4 to 3 outputs
-	VMOVDQU (R9), Y6
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 768(CX), Y4
-	VMOVDQU 800(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 832(CX), Y4
-	VMOVDQU 864(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 896(CX), Y4
-	VMOVDQU 928(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (AX), Y10
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 5 to 3 outputs
-	VMOVDQU (R10), Y6
+	// Store 7 outputs
+	VMOVDQU Y0, (R10)
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 960(CX), Y4
-	VMOVDQU 992(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1024(CX), Y4
-	VMOVDQU 1056(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1088(CX), Y4
-	VMOVDQU 1120(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
-
-	// Load and process 32 bytes from input 6 to 3 outputs
-	VMOVDQU (R11), Y6
+	VMOVDQU Y1, (R11)
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 1152(CX), Y4
-	VMOVDQU 1184(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1216(CX), Y4
-	VMOVDQU 1248(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1280(CX), Y4
-	VMOVDQU 1312(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
-
-	// Load and process 32 bytes from input 7 to 3 outputs
-	VMOVDQU (DX), Y6
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 1344(CX), Y4
-	VMOVDQU 1376(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1408(CX), Y4
-	VMOVDQU 1440(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1472(CX), Y4
-	VMOVDQU 1504(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
-
-	// Store 3 outputs
-	VMOVDQU Y0, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y1, (R14)
-	ADDQ    $0x20, R14
 	VMOVDQU Y2, (R12)
 	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y4, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y5, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y6, (R9)
+	ADDQ    $0x20, R9
 
 	// Prepare for next loop
-	DECQ AX
-	JNZ  mulAvxTwo_8x3_loop
+	DECQ BP
+	JNZ  mulAvxTwo_6x7Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_8x3_end:
+mulAvxTwo_6x7Xor_end:
 	RET
 
-// func mulAvxTwo_8x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x3_64(SB), $0-88
+// func mulAvxTwo_6x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x8(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 56 YMM used
+	// Full registers estimated 109 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), AX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  out_base+48(FP), R11
-	MOVQ  start+72(FP), R12
+	JZ    mulAvxTwo_6x8_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to input
-	ADDQ         R12, DX
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, R10
-	ADDQ         R12, AX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X6
-	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R13
-	SHRQ         $0x06, R13
-
-mulAvxTwo_8x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
-	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU (CX), Y7
-	VMOVDQU 32(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 64(CX), Y7
-	VMOVDQU 96(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 128(CX), Y7
-	VMOVDQU 160(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, R9
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X8
+	VPBROADCASTB X8, Y8
 
-	// Load and process 64 bytes from input 1 to 3 outputs
+mulAvxTwo_6x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
 	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
+	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 192(CX), Y7
-	VMOVDQU 224(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 256(CX), Y7
-	VMOVDQU 288(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 320(CX), Y7
-	VMOVDQU 352(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
 
-	// Load and process 64 bytes from input 2 to 3 outputs
+	// Load and process 32 bytes from input 1 to 8 outputs
 	VMOVDQU (SI), Y11
-	VMOVDQU 32(SI), Y13
-	ADDQ    $0x40, SI
+	ADDQ    $0x20, SI
 	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 384(CX), Y7
-	VMOVDQU 416(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 448(CX), Y7
-	VMOVDQU 480(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 512(CX), Y7
-	VMOVDQU 544(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 64 bytes from input 3 to 3 outputs
+	// Load and process 32 bytes from input 2 to 8 outputs
 	VMOVDQU (DI), Y11
-	VMOVDQU 32(DI), Y13
-	ADDQ    $0x40, DI
+	ADDQ    $0x20, DI
 	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 576(CX), Y7
-	VMOVDQU 608(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y7
-	VMOVDQU 672(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 704(CX), Y7
-	VMOVDQU 736(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 64 bytes from input 4 to 3 outputs
+	// Load and process 32 bytes from input 3 to 8 outputs
 	VMOVDQU (R8), Y11
-	VMOVDQU 32(R8), Y13
-	ADDQ    $0x40, R8
+	ADDQ    $0x20, R8
 	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 768(CX), Y7
-	VMOVDQU 800(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 832(CX), Y7
-	VMOVDQU 864(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 896(CX), Y7
-	VMOVDQU 928(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 64 bytes from input 5 to 3 outputs
+	// Load and process 32 bytes from input 4 to 8 outputs
 	VMOVDQU (R9), Y11
-	VMOVDQU 32(R9), Y13
-	ADDQ    $0x40, R9
+	ADDQ    $0x20, R9
 	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 960(CX), Y7
-	VMOVDQU 992(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1024(CX), Y7
-	VMOVDQU 1056(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1088(CX), Y7
-	VMOVDQU 1120(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 64 bytes from input 6 to 3 outputs
-	VMOVDQU (R10), Y11
-	VMOVDQU 32(R10), Y13
-	ADDQ    $0x40, R10
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 1152(CX), Y7
-	VMOVDQU 1184(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1216(CX), Y7
-	VMOVDQU 1248(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1280(CX), Y7
-	VMOVDQU 1312(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 64 bytes from input 7 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 1344(CX), Y7
-	VMOVDQU 1376(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1408(CX), Y7
-	VMOVDQU 1440(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1472(CX), Y7
-	VMOVDQU 1504(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	// Store 8 outputs
+	MOVQ    (R10), R12
+	VMOVDQU Y0, (R12)(R11*1)
+	MOVQ    24(R10), R12
+	VMOVDQU Y1, (R12)(R11*1)
+	MOVQ    48(R10), R12
+	VMOVDQU Y2, (R12)(R11*1)
+	MOVQ    72(R10), R12
+	VMOVDQU Y3, (R12)(R11*1)
+	MOVQ    96(R10), R12
+	VMOVDQU Y4, (R12)(R11*1)
+	MOVQ    120(R10), R12
+	VMOVDQU Y5, (R12)(R11*1)
+	MOVQ    144(R10), R12
+	VMOVDQU Y6, (R12)(R11*1)
+	MOVQ    168(R10), R12
+	VMOVDQU Y7, (R12)(R11*1)
 
-	// Store 3 outputs
-	MOVQ    (R11), R14
-	VMOVDQU Y0, (R14)(R12*1)
-	VMOVDQU Y1, 32(R14)(R12*1)
-	MOVQ    24(R11), R14
-	VMOVDQU Y2, (R14)(R12*1)
-	VMOVDQU Y3, 32(R14)(R12*1)
-	MOVQ    48(R11), R14
-	VMOVDQU Y4, (R14)(R12*1)
-	VMOVDQU Y5, 32(R14)(R12*1)
+	// Prepare for next loop
+	ADDQ $0x20, R11
+	DECQ AX
+	JNZ  mulAvxTwo_6x8_loop
+	VZEROUPPER
+
+mulAvxTwo_6x8_end:
+	RET
+
+// func mulGFNI_6x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x8_64(SB), $0-88
+	// Loading 22 of 48 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 58 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to input
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, DX
+
+mulGFNI_6x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	MOVQ      (R10), R12
+	VMOVDQU64 Z22, (R12)(R11*1)
+	MOVQ      24(R10), R12
+	VMOVDQU64 Z23, (R12)(R11*1)
+	MOVQ      48(R10), R12
+	VMOVDQU64 Z24, (R12)(R11*1)
+	MOVQ      72(R10), R12
+	VMOVDQU64 Z25, (R12)(R11*1)
+	MOVQ      96(R10), R12
+	VMOVDQU64 Z26, (R12)(R11*1)
+	MOVQ      120(R10), R12
+	VMOVDQU64 Z27, (R12)(R11*1)
+	MOVQ      144(R10), R12
+	VMOVDQU64 Z28, (R12)(R11*1)
+	MOVQ      168(R10), R12
+	VMOVDQU64 Z29, (R12)(R11*1)
 
 	// Prepare for next loop
-	ADDQ $0x40, R12
-	DECQ R13
-	JNZ  mulAvxTwo_8x3_64_loop
+	ADDQ $0x40, R11
+	DECQ AX
+	JNZ  mulGFNI_6x8_64_loop
 	VZEROUPPER
 
-mulAvxTwo_8x3_64_end:
+mulGFNI_6x8_64_end:
 	RET
 
-// func mulAvxTwo_8x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x4(SB), NOSPLIT, $8-88
+// func mulGFNI_6x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x8_64Xor(SB), $0-88
+	// Loading 22 of 48 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 58 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to input
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, DX
+
+mulGFNI_6x8_64Xor_loop:
+	// Load 8 outputs
+	MOVQ      (R10), R12
+	VMOVDQU64 (R12)(R11*1), Z22
+	MOVQ      24(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z23
+	MOVQ      48(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z24
+	MOVQ      72(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z25
+	MOVQ      96(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z26
+	MOVQ      120(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z27
+	MOVQ      144(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z28
+	MOVQ      168(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z29
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	MOVQ      (R10), R12
+	VMOVDQU64 Z22, (R12)(R11*1)
+	MOVQ      24(R10), R12
+	VMOVDQU64 Z23, (R12)(R11*1)
+	MOVQ      48(R10), R12
+	VMOVDQU64 Z24, (R12)(R11*1)
+	MOVQ      72(R10), R12
+	VMOVDQU64 Z25, (R12)(R11*1)
+	MOVQ      96(R10), R12
+	VMOVDQU64 Z26, (R12)(R11*1)
+	MOVQ      120(R10), R12
+	VMOVDQU64 Z27, (R12)(R11*1)
+	MOVQ      144(R10), R12
+	VMOVDQU64 Z28, (R12)(R11*1)
+	MOVQ      168(R10), R12
+	VMOVDQU64 Z29, (R12)(R11*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R11
+	DECQ AX
+	JNZ  mulGFNI_6x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_6x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x8Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 73 YMM used
+	// Destination kept on stack
+	// Full registers estimated 109 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x4_end
+	JZ    mulAvxTwo_6x8Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), DX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  (R12), R13
-	MOVQ  24(R12), R14
-	MOVQ  48(R12), R15
-	MOVQ  72(R12), R12
-	MOVQ  start+72(FP), BP
-
-	// Add start offset to output
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R12
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to input
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, R9
-	ADDQ         BP, R10
-	ADDQ         BP, R11
-	ADDQ         BP, DX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X4
-	VPBROADCASTB X4, Y4
-
-mulAvxTwo_8x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, R9
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X8
+	VPBROADCASTB X8, Y8
 
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (BX), Y7
+mulAvxTwo_6x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	MOVQ    (R10), R12
+	VMOVDQU (R12)(R11*1), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	MOVQ    24(R10), R12
+	VMOVDQU (R12)(R11*1), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	MOVQ    48(R10), R12
+	VMOVDQU (R12)(R11*1), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	MOVQ    72(R10), R12
+	VMOVDQU (R12)(R11*1), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	MOVQ    96(R10), R12
+	VMOVDQU (R12)(R11*1), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	MOVQ    120(R10), R12
+	VMOVDQU (R12)(R11*1), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	MOVQ    144(R10), R12
+	VMOVDQU (R12)(R11*1), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	MOVQ    168(R10), R12
+	VMOVDQU (R12)(R11*1), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 1 to 4 outputs
-	VMOVDQU (SI), Y7
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 2 to 4 outputs
-	VMOVDQU (DI), Y7
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 3 to 4 outputs
-	VMOVDQU (R8), Y7
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (R8), Y11
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 768(CX), Y5
-	VMOVDQU 800(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 832(CX), Y5
-	VMOVDQU 864(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 896(CX), Y5
-	VMOVDQU 928(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 960(CX), Y5
-	VMOVDQU 992(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 4 to 4 outputs
-	VMOVDQU (R9), Y7
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (R9), Y11
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1024(CX), Y5
-	VMOVDQU 1056(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1088(CX), Y5
-	VMOVDQU 1120(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1152(CX), Y5
-	VMOVDQU 1184(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1216(CX), Y5
-	VMOVDQU 1248(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 5 to 4 outputs
-	VMOVDQU (R10), Y7
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1280(CX), Y5
-	VMOVDQU 1312(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1344(CX), Y5
-	VMOVDQU 1376(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1408(CX), Y5
-	VMOVDQU 1440(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1472(CX), Y5
-	VMOVDQU 1504(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 6 to 4 outputs
-	VMOVDQU (R11), Y7
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1536(CX), Y5
-	VMOVDQU 1568(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1600(CX), Y5
-	VMOVDQU 1632(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1664(CX), Y5
-	VMOVDQU 1696(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1728(CX), Y5
-	VMOVDQU 1760(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 7 to 4 outputs
-	VMOVDQU (DX), Y7
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (DX), Y11
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1792(CX), Y5
-	VMOVDQU 1824(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1856(CX), Y5
-	VMOVDQU 1888(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1920(CX), Y5
-	VMOVDQU 1952(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1984(CX), Y5
-	VMOVDQU 2016(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Store 4 outputs
-	VMOVDQU Y0, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y1, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y2, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y3, (R12)
-	ADDQ    $0x20, R12
+	// Store 8 outputs
+	MOVQ    (R10), R12
+	VMOVDQU Y0, (R12)(R11*1)
+	MOVQ    24(R10), R12
+	VMOVDQU Y1, (R12)(R11*1)
+	MOVQ    48(R10), R12
+	VMOVDQU Y2, (R12)(R11*1)
+	MOVQ    72(R10), R12
+	VMOVDQU Y3, (R12)(R11*1)
+	MOVQ    96(R10), R12
+	VMOVDQU Y4, (R12)(R11*1)
+	MOVQ    120(R10), R12
+	VMOVDQU Y5, (R12)(R11*1)
+	MOVQ    144(R10), R12
+	VMOVDQU Y6, (R12)(R11*1)
+	MOVQ    168(R10), R12
+	VMOVDQU Y7, (R12)(R11*1)
 
 	// Prepare for next loop
+	ADDQ $0x20, R11
 	DECQ AX
-	JNZ  mulAvxTwo_8x4_loop
+	JNZ  mulAvxTwo_6x8Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_8x4_end:
+mulAvxTwo_6x8Xor_end:
 	RET
 
-// func mulAvxTwo_8x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x5(SB), NOSPLIT, $8-88
+// func mulAvxTwo_6x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x9(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 90 YMM used
+	// Destination kept on stack
+	// Full registers estimated 122 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x5_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), AX
-	MOVQ  out_base+48(FP), R11
-	MOVQ  (R11), R12
-	MOVQ  24(R11), R13
-	MOVQ  48(R11), R14
-	MOVQ  72(R11), R15
-	MOVQ  96(R11), R11
-	MOVQ  start+72(FP), BP
-
-	// Add start offset to output
-	ADDQ BP, R12
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R11
+	JZ    mulAvxTwo_6x9_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to input
-	ADDQ         BP, DX
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, R9
-	ADDQ         BP, R10
-	ADDQ         BP, AX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X5
-	VPBROADCASTB X5, Y5
-	MOVQ         n+80(FP), BP
-	SHRQ         $0x05, BP
-
-mulAvxTwo_8x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-
-	// Load and process 32 bytes from input 0 to 5 outputs
-	VMOVDQU (DX), Y8
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU (CX), Y6
-	VMOVDQU 32(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 64(CX), Y6
-	VMOVDQU 96(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 128(CX), Y6
-	VMOVDQU 160(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 256(CX), Y6
-	VMOVDQU 288(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, R9
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X9
+	VPBROADCASTB X9, Y9
 
-	// Load and process 32 bytes from input 1 to 5 outputs
-	VMOVDQU (BX), Y8
+mulAvxTwo_6x9_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 320(CX), Y6
-	VMOVDQU 352(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 384(CX), Y6
-	VMOVDQU 416(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 448(CX), Y6
-	VMOVDQU 480(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 512(CX), Y6
-	VMOVDQU 544(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 576(CX), Y6
-	VMOVDQU 608(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y2
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y3
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y4
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y5
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y6
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y7
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y8
 
-	// Load and process 32 bytes from input 2 to 5 outputs
-	VMOVDQU (SI), Y8
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 640(CX), Y6
-	VMOVDQU 672(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 704(CX), Y6
-	VMOVDQU 736(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 768(CX), Y6
-	VMOVDQU 800(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 832(CX), Y6
-	VMOVDQU 864(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 896(CX), Y6
-	VMOVDQU 928(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 3 to 5 outputs
-	VMOVDQU (DI), Y8
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 960(CX), Y6
-	VMOVDQU 992(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1024(CX), Y6
-	VMOVDQU 1056(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1088(CX), Y6
-	VMOVDQU 1120(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1152(CX), Y6
-	VMOVDQU 1184(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1216(CX), Y6
-	VMOVDQU 1248(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 4 to 5 outputs
-	VMOVDQU (R8), Y8
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1280(CX), Y6
-	VMOVDQU 1312(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1344(CX), Y6
-	VMOVDQU 1376(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1408(CX), Y6
-	VMOVDQU 1440(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1472(CX), Y6
-	VMOVDQU 1504(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1536(CX), Y6
-	VMOVDQU 1568(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 5 to 5 outputs
-	VMOVDQU (R9), Y8
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1600(CX), Y6
-	VMOVDQU 1632(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1664(CX), Y6
-	VMOVDQU 1696(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1728(CX), Y6
-	VMOVDQU 1760(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1792(CX), Y6
-	VMOVDQU 1824(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1856(CX), Y6
-	VMOVDQU 1888(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 6 to 5 outputs
-	VMOVDQU (R10), Y8
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1920(CX), Y6
-	VMOVDQU 1952(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1984(CX), Y6
-	VMOVDQU 2016(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 2048(CX), Y6
-	VMOVDQU 2080(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 2112(CX), Y6
-	VMOVDQU 2144(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 2176(CX), Y6
-	VMOVDQU 2208(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2880(CX), Y10
+	VMOVDQU 2912(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 7 to 5 outputs
-	VMOVDQU (AX), Y8
-	ADDQ    $0x20, AX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 2240(CX), Y6
-	VMOVDQU 2272(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 2304(CX), Y6
-	VMOVDQU 2336(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 2368(CX), Y6
-	VMOVDQU 2400(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 2432(CX), Y6
-	VMOVDQU 2464(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 2496(CX), Y6
-	VMOVDQU 2528(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	// Store 9 outputs
+	MOVQ    (R10), R12
+	VMOVDQU Y0, (R12)(R11*1)
+	MOVQ    24(R10), R12
+	VMOVDQU Y1, (R12)(R11*1)
+	MOVQ    48(R10), R12
+	VMOVDQU Y2, (R12)(R11*1)
+	MOVQ    72(R10), R12
+	VMOVDQU Y3, (R12)(R11*1)
+	MOVQ    96(R10), R12
+	VMOVDQU Y4, (R12)(R11*1)
+	MOVQ    120(R10), R12
+	VMOVDQU Y5, (R12)(R11*1)
+	MOVQ    144(R10), R12
+	VMOVDQU Y6, (R12)(R11*1)
+	MOVQ    168(R10), R12
+	VMOVDQU Y7, (R12)(R11*1)
+	MOVQ    192(R10), R12
+	VMOVDQU Y8, (R12)(R11*1)
 
-	// Store 5 outputs
-	VMOVDQU Y0, (R12)
-	ADDQ    $0x20, R12
-	VMOVDQU Y1, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y2, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y3, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y4, (R11)
-	ADDQ    $0x20, R11
+	// Prepare for next loop
+	ADDQ $0x20, R11
+	DECQ AX
+	JNZ  mulAvxTwo_6x9_loop
+	VZEROUPPER
+
+mulAvxTwo_6x9_end:
+	RET
+
+// func mulGFNI_6x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x9_64(SB), $0-88
+	// Loading 21 of 54 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 65 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to input
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, DX
+
+mulGFNI_6x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R10), R12
+	VMOVDQU64 Z21, (R12)(R11*1)
+	MOVQ      24(R10), R12
+	VMOVDQU64 Z22, (R12)(R11*1)
+	MOVQ      48(R10), R12
+	VMOVDQU64 Z23, (R12)(R11*1)
+	MOVQ      72(R10), R12
+	VMOVDQU64 Z24, (R12)(R11*1)
+	MOVQ      96(R10), R12
+	VMOVDQU64 Z25, (R12)(R11*1)
+	MOVQ      120(R10), R12
+	VMOVDQU64 Z26, (R12)(R11*1)
+	MOVQ      144(R10), R12
+	VMOVDQU64 Z27, (R12)(R11*1)
+	MOVQ      168(R10), R12
+	VMOVDQU64 Z28, (R12)(R11*1)
+	MOVQ      192(R10), R12
+	VMOVDQU64 Z29, (R12)(R11*1)
 
 	// Prepare for next loop
-	DECQ BP
-	JNZ  mulAvxTwo_8x5_loop
+	ADDQ $0x40, R11
+	DECQ AX
+	JNZ  mulGFNI_6x9_64_loop
 	VZEROUPPER
 
-mulAvxTwo_8x5_end:
+mulGFNI_6x9_64_end:
 	RET
 
-// func mulAvxTwo_8x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x6(SB), NOSPLIT, $0-88
+// func mulGFNI_6x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x9_64Xor(SB), $0-88
+	// Loading 21 of 54 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 65 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to input
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, DX
+
+mulGFNI_6x9_64Xor_loop:
+	// Load 9 outputs
+	MOVQ      (R10), R12
+	VMOVDQU64 (R12)(R11*1), Z21
+	MOVQ      24(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z22
+	MOVQ      48(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z23
+	MOVQ      72(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z24
+	MOVQ      96(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z25
+	MOVQ      120(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z26
+	MOVQ      144(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z27
+	MOVQ      168(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z28
+	MOVQ      192(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z29
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R10), R12
+	VMOVDQU64 Z21, (R12)(R11*1)
+	MOVQ      24(R10), R12
+	VMOVDQU64 Z22, (R12)(R11*1)
+	MOVQ      48(R10), R12
+	VMOVDQU64 Z23, (R12)(R11*1)
+	MOVQ      72(R10), R12
+	VMOVDQU64 Z24, (R12)(R11*1)
+	MOVQ      96(R10), R12
+	VMOVDQU64 Z25, (R12)(R11*1)
+	MOVQ      120(R10), R12
+	VMOVDQU64 Z26, (R12)(R11*1)
+	MOVQ      144(R10), R12
+	VMOVDQU64 Z27, (R12)(R11*1)
+	MOVQ      168(R10), R12
+	VMOVDQU64 Z28, (R12)(R11*1)
+	MOVQ      192(R10), R12
+	VMOVDQU64 Z29, (R12)(R11*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R11
+	DECQ AX
+	JNZ  mulGFNI_6x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_6x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x9Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 107 YMM used
+	// Full registers estimated 122 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x6_end
+	JZ    mulAvxTwo_6x9Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), DX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  start+72(FP), R13
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, R11
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X6
-	VPBROADCASTB X6, Y6
-
-mulAvxTwo_8x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, R9
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X9
+	VPBROADCASTB X9, Y9
 
-	// Load and process 32 bytes from input 0 to 6 outputs
-	VMOVDQU (BX), Y9
+mulAvxTwo_6x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU (CX), Y7
-	VMOVDQU 32(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 64(CX), Y7
-	VMOVDQU 96(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 128(CX), Y7
-	VMOVDQU 160(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 192(CX), Y7
-	VMOVDQU 224(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 256(CX), Y7
-	VMOVDQU 288(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 320(CX), Y7
-	VMOVDQU 352(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	MOVQ    (R10), R12
+	VMOVDQU (R12)(R11*1), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	MOVQ    24(R10), R12
+	VMOVDQU (R12)(R11*1), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	MOVQ    48(R10), R12
+	VMOVDQU (R12)(R11*1), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	MOVQ    72(R10), R12
+	VMOVDQU (R12)(R11*1), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	MOVQ    96(R10), R12
+	VMOVDQU (R12)(R11*1), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	MOVQ    120(R10), R12
+	VMOVDQU (R12)(R11*1), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	MOVQ    144(R10), R12
+	VMOVDQU (R12)(R11*1), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	MOVQ    168(R10), R12
+	VMOVDQU (R12)(R11*1), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	MOVQ    192(R10), R12
+	VMOVDQU (R12)(R11*1), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 1 to 6 outputs
-	VMOVDQU (SI), Y9
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 384(CX), Y7
-	VMOVDQU 416(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 448(CX), Y7
-	VMOVDQU 480(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 512(CX), Y7
-	VMOVDQU 544(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 576(CX), Y7
-	VMOVDQU 608(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 640(CX), Y7
-	VMOVDQU 672(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 704(CX), Y7
-	VMOVDQU 736(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 2 to 6 outputs
-	VMOVDQU (DI), Y9
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 768(CX), Y7
-	VMOVDQU 800(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 832(CX), Y7
-	VMOVDQU 864(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 896(CX), Y7
-	VMOVDQU 928(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 960(CX), Y7
-	VMOVDQU 992(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1024(CX), Y7
-	VMOVDQU 1056(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1088(CX), Y7
-	VMOVDQU 1120(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 3 to 6 outputs
-	VMOVDQU (R8), Y9
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1152(CX), Y7
-	VMOVDQU 1184(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1216(CX), Y7
-	VMOVDQU 1248(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1280(CX), Y7
-	VMOVDQU 1312(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 1344(CX), Y7
-	VMOVDQU 1376(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1408(CX), Y7
-	VMOVDQU 1440(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1472(CX), Y7
-	VMOVDQU 1504(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 4 to 6 outputs
-	VMOVDQU (R9), Y9
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1536(CX), Y7
-	VMOVDQU 1568(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1600(CX), Y7
-	VMOVDQU 1632(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1664(CX), Y7
-	VMOVDQU 1696(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 1728(CX), Y7
-	VMOVDQU 1760(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1792(CX), Y7
-	VMOVDQU 1824(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1856(CX), Y7
-	VMOVDQU 1888(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 5 to 6 outputs
-	VMOVDQU (R10), Y9
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1920(CX), Y7
-	VMOVDQU 1952(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1984(CX), Y7
-	VMOVDQU 2016(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2048(CX), Y7
-	VMOVDQU 2080(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2112(CX), Y7
-	VMOVDQU 2144(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2176(CX), Y7
-	VMOVDQU 2208(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 2240(CX), Y7
-	VMOVDQU 2272(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 6 to 6 outputs
-	VMOVDQU (R11), Y9
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 2304(CX), Y7
-	VMOVDQU 2336(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 2368(CX), Y7
-	VMOVDQU 2400(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2432(CX), Y7
-	VMOVDQU 2464(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2496(CX), Y7
-	VMOVDQU 2528(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2560(CX), Y7
-	VMOVDQU 2592(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 2624(CX), Y7
-	VMOVDQU 2656(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 7 to 6 outputs
-	VMOVDQU (DX), Y9
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (DX), Y12
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 2688(CX), Y7
-	VMOVDQU 2720(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 2752(CX), Y7
-	VMOVDQU 2784(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2816(CX), Y7
-	VMOVDQU 2848(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2880(CX), Y7
-	VMOVDQU 2912(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2944(CX), Y7
-	VMOVDQU 2976(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 3008(CX), Y7
-	VMOVDQU 3040(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2880(CX), Y10
+	VMOVDQU 2912(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Store 6 outputs
-	MOVQ    (R12), R14
-	VMOVDQU Y0, (R14)(R13*1)
-	MOVQ    24(R12), R14
-	VMOVDQU Y1, (R14)(R13*1)
-	MOVQ    48(R12), R14
-	VMOVDQU Y2, (R14)(R13*1)
-	MOVQ    72(R12), R14
-	VMOVDQU Y3, (R14)(R13*1)
-	MOVQ    96(R12), R14
-	VMOVDQU Y4, (R14)(R13*1)
-	MOVQ    120(R12), R14
-	VMOVDQU Y5, (R14)(R13*1)
+	// Store 9 outputs
+	MOVQ    (R10), R12
+	VMOVDQU Y0, (R12)(R11*1)
+	MOVQ    24(R10), R12
+	VMOVDQU Y1, (R12)(R11*1)
+	MOVQ    48(R10), R12
+	VMOVDQU Y2, (R12)(R11*1)
+	MOVQ    72(R10), R12
+	VMOVDQU Y3, (R12)(R11*1)
+	MOVQ    96(R10), R12
+	VMOVDQU Y4, (R12)(R11*1)
+	MOVQ    120(R10), R12
+	VMOVDQU Y5, (R12)(R11*1)
+	MOVQ    144(R10), R12
+	VMOVDQU Y6, (R12)(R11*1)
+	MOVQ    168(R10), R12
+	VMOVDQU Y7, (R12)(R11*1)
+	MOVQ    192(R10), R12
+	VMOVDQU Y8, (R12)(R11*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R13
+	ADDQ $0x20, R11
 	DECQ AX
-	JNZ  mulAvxTwo_8x6_loop
+	JNZ  mulAvxTwo_6x9Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_8x6_end:
+mulAvxTwo_6x9Xor_end:
 	RET
 
-// func mulAvxTwo_8x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x7(SB), NOSPLIT, $0-88
+// func mulAvxTwo_6x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x10(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 124 YMM used
+	// Full registers estimated 135 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x7_end
+	JZ    mulAvxTwo_6x10_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), DX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  start+72(FP), R13
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, R11
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X7
-	VPBROADCASTB X7, Y7
-
-mulAvxTwo_8x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, R9
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X10
+	VPBROADCASTB X10, Y10
 
-	// Load and process 32 bytes from input 0 to 7 outputs
-	VMOVDQU (BX), Y10
+mulAvxTwo_6x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU (CX), Y8
-	VMOVDQU 32(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 64(CX), Y8
-	VMOVDQU 96(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 128(CX), Y8
-	VMOVDQU 160(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 192(CX), Y8
-	VMOVDQU 224(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 320(CX), Y8
-	VMOVDQU 352(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 384(CX), Y8
-	VMOVDQU 416(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
 
-	// Load and process 32 bytes from input 1 to 7 outputs
-	VMOVDQU (SI), Y10
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 448(CX), Y8
-	VMOVDQU 480(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 512(CX), Y8
-	VMOVDQU 544(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 576(CX), Y8
-	VMOVDQU 608(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 640(CX), Y8
-	VMOVDQU 672(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 704(CX), Y8
-	VMOVDQU 736(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 768(CX), Y8
-	VMOVDQU 800(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 832(CX), Y8
-	VMOVDQU 864(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 2 to 7 outputs
-	VMOVDQU (DI), Y10
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 896(CX), Y8
-	VMOVDQU 928(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 960(CX), Y8
-	VMOVDQU 992(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1024(CX), Y8
-	VMOVDQU 1056(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1088(CX), Y8
-	VMOVDQU 1120(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1152(CX), Y8
-	VMOVDQU 1184(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1216(CX), Y8
-	VMOVDQU 1248(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1280(CX), Y8
-	VMOVDQU 1312(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 3 to 7 outputs
-	VMOVDQU (R8), Y10
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1344(CX), Y8
-	VMOVDQU 1376(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1408(CX), Y8
-	VMOVDQU 1440(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1472(CX), Y8
-	VMOVDQU 1504(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1536(CX), Y8
-	VMOVDQU 1568(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1600(CX), Y8
-	VMOVDQU 1632(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1664(CX), Y8
-	VMOVDQU 1696(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1728(CX), Y8
-	VMOVDQU 1760(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 4 to 7 outputs
-	VMOVDQU (R9), Y10
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (R9), Y13
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1792(CX), Y8
-	VMOVDQU 1824(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1856(CX), Y8
-	VMOVDQU 1888(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1920(CX), Y8
-	VMOVDQU 1952(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1984(CX), Y8
-	VMOVDQU 2016(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2048(CX), Y8
-	VMOVDQU 2080(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2112(CX), Y8
-	VMOVDQU 2144(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2176(CX), Y8
-	VMOVDQU 2208(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
-
-	// Load and process 32 bytes from input 5 to 7 outputs
-	VMOVDQU (R10), Y10
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 2240(CX), Y8
-	VMOVDQU 2272(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 2304(CX), Y8
-	VMOVDQU 2336(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 2368(CX), Y8
-	VMOVDQU 2400(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 2432(CX), Y8
-	VMOVDQU 2464(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2496(CX), Y8
-	VMOVDQU 2528(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2560(CX), Y8
-	VMOVDQU 2592(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2624(CX), Y8
-	VMOVDQU 2656(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 6 to 7 outputs
-	VMOVDQU (R11), Y10
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 2688(CX), Y8
-	VMOVDQU 2720(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 2752(CX), Y8
-	VMOVDQU 2784(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 2816(CX), Y8
-	VMOVDQU 2848(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 2880(CX), Y8
-	VMOVDQU 2912(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2944(CX), Y8
-	VMOVDQU 2976(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 3008(CX), Y8
-	VMOVDQU 3040(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 3072(CX), Y8
-	VMOVDQU 3104(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
-
-	// Load and process 32 bytes from input 7 to 7 outputs
-	VMOVDQU (DX), Y10
+	// Load and process 32 bytes from input 5 to 10 outputs
+	VMOVDQU (DX), Y13
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 3136(CX), Y8
-	VMOVDQU 3168(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 3200(CX), Y8
-	VMOVDQU 3232(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 3264(CX), Y8
-	VMOVDQU 3296(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 3328(CX), Y8
-	VMOVDQU 3360(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 3392(CX), Y8
-	VMOVDQU 3424(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 3456(CX), Y8
-	VMOVDQU 3488(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 3520(CX), Y8
-	VMOVDQU 3552(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3200(CX), Y11
+	VMOVDQU 3232(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3264(CX), Y11
+	VMOVDQU 3296(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3328(CX), Y11
+	VMOVDQU 3360(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 3392(CX), Y11
+	VMOVDQU 3424(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 3456(CX), Y11
+	VMOVDQU 3488(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 3520(CX), Y11
+	VMOVDQU 3552(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 3584(CX), Y11
+	VMOVDQU 3616(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3648(CX), Y11
+	VMOVDQU 3680(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3712(CX), Y11
+	VMOVDQU 3744(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3776(CX), Y11
+	VMOVDQU 3808(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Store 7 outputs
-	MOVQ    (R12), R14
-	VMOVDQU Y0, (R14)(R13*1)
-	MOVQ    24(R12), R14
-	VMOVDQU Y1, (R14)(R13*1)
-	MOVQ    48(R12), R14
-	VMOVDQU Y2, (R14)(R13*1)
-	MOVQ    72(R12), R14
-	VMOVDQU Y3, (R14)(R13*1)
-	MOVQ    96(R12), R14
-	VMOVDQU Y4, (R14)(R13*1)
-	MOVQ    120(R12), R14
-	VMOVDQU Y5, (R14)(R13*1)
-	MOVQ    144(R12), R14
-	VMOVDQU Y6, (R14)(R13*1)
+	// Store 10 outputs
+	MOVQ    (R10), R12
+	VMOVDQU Y0, (R12)(R11*1)
+	MOVQ    24(R10), R12
+	VMOVDQU Y1, (R12)(R11*1)
+	MOVQ    48(R10), R12
+	VMOVDQU Y2, (R12)(R11*1)
+	MOVQ    72(R10), R12
+	VMOVDQU Y3, (R12)(R11*1)
+	MOVQ    96(R10), R12
+	VMOVDQU Y4, (R12)(R11*1)
+	MOVQ    120(R10), R12
+	VMOVDQU Y5, (R12)(R11*1)
+	MOVQ    144(R10), R12
+	VMOVDQU Y6, (R12)(R11*1)
+	MOVQ    168(R10), R12
+	VMOVDQU Y7, (R12)(R11*1)
+	MOVQ    192(R10), R12
+	VMOVDQU Y8, (R12)(R11*1)
+	MOVQ    216(R10), R12
+	VMOVDQU Y9, (R12)(R11*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R13
+	ADDQ $0x20, R11
 	DECQ AX
-	JNZ  mulAvxTwo_8x7_loop
+	JNZ  mulAvxTwo_6x10_loop
 	VZEROUPPER
 
-mulAvxTwo_8x7_end:
+mulAvxTwo_6x10_end:
 	RET
 
-// func mulAvxTwo_8x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x8(SB), NOSPLIT, $0-88
+// func mulGFNI_6x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x10_64(SB), $0-88
+	// Loading 20 of 60 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 72 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to input
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, DX
+
+mulGFNI_6x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R10), R12
+	VMOVDQU64 Z20, (R12)(R11*1)
+	MOVQ      24(R10), R12
+	VMOVDQU64 Z21, (R12)(R11*1)
+	MOVQ      48(R10), R12
+	VMOVDQU64 Z22, (R12)(R11*1)
+	MOVQ      72(R10), R12
+	VMOVDQU64 Z23, (R12)(R11*1)
+	MOVQ      96(R10), R12
+	VMOVDQU64 Z24, (R12)(R11*1)
+	MOVQ      120(R10), R12
+	VMOVDQU64 Z25, (R12)(R11*1)
+	MOVQ      144(R10), R12
+	VMOVDQU64 Z26, (R12)(R11*1)
+	MOVQ      168(R10), R12
+	VMOVDQU64 Z27, (R12)(R11*1)
+	MOVQ      192(R10), R12
+	VMOVDQU64 Z28, (R12)(R11*1)
+	MOVQ      216(R10), R12
+	VMOVDQU64 Z29, (R12)(R11*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R11
+	DECQ AX
+	JNZ  mulGFNI_6x10_64_loop
+	VZEROUPPER
+
+mulGFNI_6x10_64_end:
+	RET
+
+// func mulGFNI_6x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_6x10_64Xor(SB), $0-88
+	// Loading 20 of 60 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 72 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_6x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), DX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to input
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, DX
+
+mulGFNI_6x10_64Xor_loop:
+	// Load 10 outputs
+	MOVQ      (R10), R12
+	VMOVDQU64 (R12)(R11*1), Z20
+	MOVQ      24(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z21
+	MOVQ      48(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z22
+	MOVQ      72(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z23
+	MOVQ      96(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z24
+	MOVQ      120(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z25
+	MOVQ      144(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z26
+	MOVQ      168(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z27
+	MOVQ      192(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z28
+	MOVQ      216(R10), R12
+	VMOVDQU64 (R12)(R11*1), Z29
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R10), R12
+	VMOVDQU64 Z20, (R12)(R11*1)
+	MOVQ      24(R10), R12
+	VMOVDQU64 Z21, (R12)(R11*1)
+	MOVQ      48(R10), R12
+	VMOVDQU64 Z22, (R12)(R11*1)
+	MOVQ      72(R10), R12
+	VMOVDQU64 Z23, (R12)(R11*1)
+	MOVQ      96(R10), R12
+	VMOVDQU64 Z24, (R12)(R11*1)
+	MOVQ      120(R10), R12
+	VMOVDQU64 Z25, (R12)(R11*1)
+	MOVQ      144(R10), R12
+	VMOVDQU64 Z26, (R12)(R11*1)
+	MOVQ      168(R10), R12
+	VMOVDQU64 Z27, (R12)(R11*1)
+	MOVQ      192(R10), R12
+	VMOVDQU64 Z28, (R12)(R11*1)
+	MOVQ      216(R10), R12
+	VMOVDQU64 Z29, (R12)(R11*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R11
+	DECQ AX
+	JNZ  mulGFNI_6x10_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_6x10_64Xor_end:
+	RET
+
+// func mulAvxTwo_6x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_6x10Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 141 YMM used
+	// Full registers estimated 135 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x8_end
+	JZ    mulAvxTwo_6x10Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), R8
 	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), DX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  start+72(FP), R13
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, R11
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X8
-	VPBROADCASTB X8, Y8
-
-mulAvxTwo_8x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, R9
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X10
+	VPBROADCASTB X10, Y10
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (BX), Y11
+mulAvxTwo_6x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	MOVQ    (R10), R12
+	VMOVDQU (R12)(R11*1), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	MOVQ    24(R10), R12
+	VMOVDQU (R12)(R11*1), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	MOVQ    48(R10), R12
+	VMOVDQU (R12)(R11*1), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	MOVQ    72(R10), R12
+	VMOVDQU (R12)(R11*1), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	MOVQ    96(R10), R12
+	VMOVDQU (R12)(R11*1), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	MOVQ    120(R10), R12
+	VMOVDQU (R12)(R11*1), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	MOVQ    144(R10), R12
+	VMOVDQU (R12)(R11*1), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	MOVQ    168(R10), R12
+	VMOVDQU (R12)(R11*1), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	MOVQ    192(R10), R12
+	VMOVDQU (R12)(R11*1), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	MOVQ    216(R10), R12
+	VMOVDQU (R12)(R11*1), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 1 to 8 outputs
-	VMOVDQU (SI), Y11
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 512(CX), Y9
-	VMOVDQU 544(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 576(CX), Y9
-	VMOVDQU 608(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y9
-	VMOVDQU 672(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 704(CX), Y9
-	VMOVDQU 736(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 768(CX), Y9
-	VMOVDQU 800(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 832(CX), Y9
-	VMOVDQU 864(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 896(CX), Y9
-	VMOVDQU 928(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 960(CX), Y9
-	VMOVDQU 992(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 2 to 8 outputs
-	VMOVDQU (DI), Y11
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1024(CX), Y9
-	VMOVDQU 1056(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1088(CX), Y9
-	VMOVDQU 1120(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1152(CX), Y9
-	VMOVDQU 1184(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1216(CX), Y9
-	VMOVDQU 1248(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1280(CX), Y9
-	VMOVDQU 1312(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1344(CX), Y9
-	VMOVDQU 1376(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1408(CX), Y9
-	VMOVDQU 1440(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1472(CX), Y9
-	VMOVDQU 1504(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 3 to 8 outputs
-	VMOVDQU (R8), Y11
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1536(CX), Y9
-	VMOVDQU 1568(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1600(CX), Y9
-	VMOVDQU 1632(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1664(CX), Y9
-	VMOVDQU 1696(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1728(CX), Y9
-	VMOVDQU 1760(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1792(CX), Y9
-	VMOVDQU 1824(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1856(CX), Y9
-	VMOVDQU 1888(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1920(CX), Y9
-	VMOVDQU 1952(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1984(CX), Y9
-	VMOVDQU 2016(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
-
-	// Load and process 32 bytes from input 4 to 8 outputs
-	VMOVDQU (R9), Y11
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 2048(CX), Y9
-	VMOVDQU 2080(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2112(CX), Y9
-	VMOVDQU 2144(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2176(CX), Y9
-	VMOVDQU 2208(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2240(CX), Y9
-	VMOVDQU 2272(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2304(CX), Y9
-	VMOVDQU 2336(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2368(CX), Y9
-	VMOVDQU 2400(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2432(CX), Y9
-	VMOVDQU 2464(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 2496(CX), Y9
-	VMOVDQU 2528(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
-
-	// Load and process 32 bytes from input 5 to 8 outputs
-	VMOVDQU (R10), Y11
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 2560(CX), Y9
-	VMOVDQU 2592(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2624(CX), Y9
-	VMOVDQU 2656(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2688(CX), Y9
-	VMOVDQU 2720(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2752(CX), Y9
-	VMOVDQU 2784(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2816(CX), Y9
-	VMOVDQU 2848(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2880(CX), Y9
-	VMOVDQU 2912(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2944(CX), Y9
-	VMOVDQU 2976(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 3008(CX), Y9
-	VMOVDQU 3040(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
-
-	// Load and process 32 bytes from input 6 to 8 outputs
-	VMOVDQU (R11), Y11
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 3072(CX), Y9
-	VMOVDQU 3104(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 3136(CX), Y9
-	VMOVDQU 3168(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 3200(CX), Y9
-	VMOVDQU 3232(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 3264(CX), Y9
-	VMOVDQU 3296(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 3328(CX), Y9
-	VMOVDQU 3360(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 3392(CX), Y9
-	VMOVDQU 3424(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 3456(CX), Y9
-	VMOVDQU 3488(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 3520(CX), Y9
-	VMOVDQU 3552(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
-
-	// Load and process 32 bytes from input 7 to 8 outputs
-	VMOVDQU (DX), Y11
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 3584(CX), Y9
-	VMOVDQU 3616(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 3648(CX), Y9
-	VMOVDQU 3680(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 3712(CX), Y9
-	VMOVDQU 3744(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 3776(CX), Y9
-	VMOVDQU 3808(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 3840(CX), Y9
-	VMOVDQU 3872(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 3904(CX), Y9
-	VMOVDQU 3936(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 3968(CX), Y9
-	VMOVDQU 4000(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 4032(CX), Y9
-	VMOVDQU 4064(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
-
-	// Store 8 outputs
-	MOVQ    (R12), R14
-	VMOVDQU Y0, (R14)(R13*1)
-	MOVQ    24(R12), R14
-	VMOVDQU Y1, (R14)(R13*1)
-	MOVQ    48(R12), R14
-	VMOVDQU Y2, (R14)(R13*1)
-	MOVQ    72(R12), R14
-	VMOVDQU Y3, (R14)(R13*1)
-	MOVQ    96(R12), R14
-	VMOVDQU Y4, (R14)(R13*1)
-	MOVQ    120(R12), R14
-	VMOVDQU Y5, (R14)(R13*1)
-	MOVQ    144(R12), R14
-	VMOVDQU Y6, (R14)(R13*1)
-	MOVQ    168(R12), R14
-	VMOVDQU Y7, (R14)(R13*1)
-
-	// Prepare for next loop
-	ADDQ $0x20, R13
-	DECQ AX
-	JNZ  mulAvxTwo_8x8_loop
-	VZEROUPPER
-
-mulAvxTwo_8x8_end:
-	RET
-
-// func mulAvxTwo_8x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x9(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 158 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_8x9_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), DX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  start+72(FP), R13
-
-	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, R11
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X9
-	VPBROADCASTB X9, Y9
-
-mulAvxTwo_8x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-
-	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (BX), Y12
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 512(CX), Y10
-	VMOVDQU 544(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
-
-	// Load and process 32 bytes from input 1 to 9 outputs
-	VMOVDQU (SI), Y12
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 576(CX), Y10
-	VMOVDQU 608(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 640(CX), Y10
-	VMOVDQU 672(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 704(CX), Y10
-	VMOVDQU 736(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 768(CX), Y10
-	VMOVDQU 800(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 832(CX), Y10
-	VMOVDQU 864(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 896(CX), Y10
-	VMOVDQU 928(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 960(CX), Y10
-	VMOVDQU 992(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1024(CX), Y10
-	VMOVDQU 1056(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1088(CX), Y10
-	VMOVDQU 1120(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 2 to 9 outputs
-	VMOVDQU (DI), Y12
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1152(CX), Y10
-	VMOVDQU 1184(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1216(CX), Y10
-	VMOVDQU 1248(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1280(CX), Y10
-	VMOVDQU 1312(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1344(CX), Y10
-	VMOVDQU 1376(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1408(CX), Y10
-	VMOVDQU 1440(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 1472(CX), Y10
-	VMOVDQU 1504(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 1536(CX), Y10
-	VMOVDQU 1568(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1600(CX), Y10
-	VMOVDQU 1632(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1664(CX), Y10
-	VMOVDQU 1696(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (R9), Y13
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
-
-	// Load and process 32 bytes from input 3 to 9 outputs
-	VMOVDQU (R8), Y12
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1728(CX), Y10
-	VMOVDQU 1760(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1792(CX), Y10
-	VMOVDQU 1824(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1856(CX), Y10
-	VMOVDQU 1888(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1920(CX), Y10
-	VMOVDQU 1952(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1984(CX), Y10
-	VMOVDQU 2016(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2048(CX), Y10
-	VMOVDQU 2080(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2112(CX), Y10
-	VMOVDQU 2144(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2176(CX), Y10
-	VMOVDQU 2208(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2240(CX), Y10
-	VMOVDQU 2272(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 4 to 9 outputs
-	VMOVDQU (R9), Y12
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 2304(CX), Y10
-	VMOVDQU 2336(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	// Load and process 32 bytes from input 5 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3200(CX), Y11
+	VMOVDQU 3232(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2368(CX), Y10
-	VMOVDQU 2400(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3264(CX), Y11
+	VMOVDQU 3296(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 2432(CX), Y10
-	VMOVDQU 2464(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3328(CX), Y11
+	VMOVDQU 3360(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 2496(CX), Y10
-	VMOVDQU 2528(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 3392(CX), Y11
+	VMOVDQU 3424(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 2560(CX), Y10
-	VMOVDQU 2592(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 3456(CX), Y11
+	VMOVDQU 3488(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2624(CX), Y10
-	VMOVDQU 2656(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 3520(CX), Y11
+	VMOVDQU 3552(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2688(CX), Y10
-	VMOVDQU 2720(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 3584(CX), Y11
+	VMOVDQU 3616(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2752(CX), Y10
-	VMOVDQU 2784(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3648(CX), Y11
+	VMOVDQU 3680(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2816(CX), Y10
-	VMOVDQU 2848(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3712(CX), Y11
+	VMOVDQU 3744(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3776(CX), Y11
+	VMOVDQU 3808(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 5 to 9 outputs
-	VMOVDQU (R10), Y12
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
+	// Store 10 outputs
+	MOVQ    (R10), R12
+	VMOVDQU Y0, (R12)(R11*1)
+	MOVQ    24(R10), R12
+	VMOVDQU Y1, (R12)(R11*1)
+	MOVQ    48(R10), R12
+	VMOVDQU Y2, (R12)(R11*1)
+	MOVQ    72(R10), R12
+	VMOVDQU Y3, (R12)(R11*1)
+	MOVQ    96(R10), R12
+	VMOVDQU Y4, (R12)(R11*1)
+	MOVQ    120(R10), R12
+	VMOVDQU Y5, (R12)(R11*1)
+	MOVQ    144(R10), R12
+	VMOVDQU Y6, (R12)(R11*1)
+	MOVQ    168(R10), R12
+	VMOVDQU Y7, (R12)(R11*1)
+	MOVQ    192(R10), R12
+	VMOVDQU Y8, (R12)(R11*1)
+	MOVQ    216(R10), R12
+	VMOVDQU Y9, (R12)(R11*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R11
+	DECQ AX
+	JNZ  mulAvxTwo_6x10Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_6x10Xor_end:
+	RET
+
+// func mulAvxTwo_7x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x1(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x1_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X1
+	VPBROADCASTB X1, Y1
+
+mulAvxTwo_7x1_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y4
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	VPXOR   Y2, Y3, Y0
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y4
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y4
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 128(CX), Y2
+	VMOVDQU 160(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y4
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 192(CX), Y2
+	VMOVDQU 224(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y4
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 256(CX), Y2
+	VMOVDQU 288(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y4
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 320(CX), Y2
+	VMOVDQU 352(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 6 to 1 outputs
+	VMOVDQU (DX), Y4
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 384(CX), Y2
+	VMOVDQU 416(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x1_loop
+	VZEROUPPER
+
+mulAvxTwo_7x1_end:
+	RET
+
+// func mulAvxTwo_7x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x1_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x1_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_7x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y6
+	VMOVDQU 32(R10), Y5
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R11)
+	VMOVDQU Y1, 32(R11)
+	ADDQ    $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x1_64_loop
+	VZEROUPPER
+
+mulAvxTwo_7x1_64_end:
+	RET
+
+// func mulGFNI_7x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x1_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), CX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R10
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R10
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, CX
+
+mulGFNI_7x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z8
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z8, Z7
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z8
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z8
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z8
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z8
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (R9), Z8
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z5, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU64      (CX), Z8
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z6, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Store 1 outputs
+	VMOVDQU64 Z7, (R10)
+	ADDQ      $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x1_64_loop
+	VZEROUPPER
+
+mulGFNI_7x1_64_end:
+	RET
+
+// func mulGFNI_7x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), CX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R10
+	MOVQ            start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R10
+
+	// Add start offset to input
+	ADDQ R11, DX
+	ADDQ R11, BX
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, CX
+
+mulGFNI_7x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (R10), Z7
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z8
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z8
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z8
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z8
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z8
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (R9), Z8
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z5, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU64      (CX), Z8
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z6, Z8, Z8
+	VXORPD         Z7, Z8, Z7
+
+	// Store 1 outputs
+	VMOVDQU64 Z7, (R10)
+	ADDQ      $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x1Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x1Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X1
+	VPBROADCASTB X1, Y1
+
+mulAvxTwo_7x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y4
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU (R11), Y0
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y4
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y4
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 128(CX), Y2
+	VMOVDQU 160(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y4
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 192(CX), Y2
+	VMOVDQU 224(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y4
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 256(CX), Y2
+	VMOVDQU 288(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y4
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 320(CX), Y2
+	VMOVDQU 352(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 6 to 1 outputs
+	VMOVDQU (DX), Y4
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 384(CX), Y2
+	VMOVDQU 416(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x1Xor_end:
+	RET
+
+// func mulAvxTwo_7x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x1_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x1_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_7x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (R11), Y0
+	VMOVDQU 32(R11), Y1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y6
+	VMOVDQU 32(R10), Y5
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R11)
+	VMOVDQU Y1, 32(R11)
+	ADDQ    $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x1_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x2(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x2_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R11
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+	ADDQ R13, R11
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_7x2_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y1
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y5
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y5
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 640(CX), Y3
+	VMOVDQU 672(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 704(CX), Y3
+	VMOVDQU 736(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 6 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 768(CX), Y3
+	VMOVDQU 800(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 832(CX), Y3
+	VMOVDQU 864(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x2_loop
+	VZEROUPPER
+
+mulAvxTwo_7x2_end:
+	RET
+
+// func mulAvxTwo_7x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x2_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 65 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x2_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R11
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+	ADDQ R13, R11
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_7x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y9
+	VMOVDQU 32(R10), Y11
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R12)
+	VMOVDQU Y1, 32(R12)
+	ADDQ    $0x40, R12
+	VMOVDQU Y2, (R11)
+	VMOVDQU Y3, 32(R11)
+	ADDQ    $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x2_64_loop
+	VZEROUPPER
+
+mulAvxTwo_7x2_64_end:
+	RET
+
+// func mulGFNI_7x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x2_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), CX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R10
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+	ADDQ R12, R10
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, CX
+
+mulGFNI_7x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z16
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z16, Z14
+	VGF2P8AFFINEQB $0x00, Z1, Z16, Z15
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z16
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z3, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z16
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z5, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z16
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z7, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z16
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z9, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (R9), Z16
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z10, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z11, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU64      (CX), Z16
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z12, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z13, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Store 2 outputs
+	VMOVDQU64 Z14, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z15, (R10)
+	ADDQ      $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x2_64_loop
+	VZEROUPPER
+
+mulGFNI_7x2_64_end:
+	RET
+
+// func mulGFNI_7x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), CX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R10
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+	ADDQ R12, R10
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, CX
+
+mulGFNI_7x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (R11), Z14
+	VMOVDQU64 (R10), Z15
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z16
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z1, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z16
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z3, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z16
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z5, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z16
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z7, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z16
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z9, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (R9), Z16
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z10, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z11, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU64      (CX), Z16
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z12, Z16, Z17
+	VXORPD         Z14, Z17, Z14
+	VGF2P8AFFINEQB $0x00, Z13, Z16, Z17
+	VXORPD         Z15, Z17, Z15
+
+	// Store 2 outputs
+	VMOVDQU64 Z14, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z15, (R10)
+	ADDQ      $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x2_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x2Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x2Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R11
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+	ADDQ R13, R11
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_7x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R12), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y5
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y5
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 640(CX), Y3
+	VMOVDQU 672(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 704(CX), Y3
+	VMOVDQU 736(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 6 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 768(CX), Y3
+	VMOVDQU 800(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 832(CX), Y3
+	VMOVDQU 864(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x2Xor_end:
+	RET
+
+// func mulAvxTwo_7x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 65 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R11
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+	ADDQ R13, R11
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_7x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R12), Y0
+	VMOVDQU 32(R12), Y1
+	VMOVDQU (R11), Y2
+	VMOVDQU 32(R11), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y9
+	VMOVDQU 32(R10), Y11
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R12)
+	VMOVDQU Y1, 32(R12)
+	ADDQ    $0x40, R12
+	VMOVDQU Y2, (R11)
+	VMOVDQU Y3, 32(R11)
+	ADDQ    $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x2_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x3(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x3_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R11
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R11
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_7x3_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y0
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y1
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y2
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y6
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y6
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 960(CX), Y4
+	VMOVDQU 992(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1024(CX), Y4
+	VMOVDQU 1056(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1088(CX), Y4
+	VMOVDQU 1120(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 6 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1152(CX), Y4
+	VMOVDQU 1184(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1216(CX), Y4
+	VMOVDQU 1248(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1280(CX), Y4
+	VMOVDQU 1312(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x3_loop
+	VZEROUPPER
+
+mulAvxTwo_7x3_end:
+	RET
+
+// func mulAvxTwo_7x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x3_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 94 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x3_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R11
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R11
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_7x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y11
+	VMOVDQU 32(R9), Y13
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y11
+	VMOVDQU 32(R10), Y13
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R12)
+	VMOVDQU Y1, 32(R12)
+	ADDQ    $0x40, R12
+	VMOVDQU Y2, (R13)
+	VMOVDQU Y3, 32(R13)
+	ADDQ    $0x40, R13
+	VMOVDQU Y4, (R11)
+	VMOVDQU Y5, 32(R11)
+	ADDQ    $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x3_64_loop
+	VZEROUPPER
+
+mulAvxTwo_7x3_64_end:
+	RET
+
+// func mulGFNI_7x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x3_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), CX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R12
+	MOVQ            48(R10), R10
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R10
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, CX
+
+mulGFNI_7x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z24
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z24, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z24, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z24, Z23
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z24
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z4, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z5, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z24
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z7, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z8, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z24
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z24
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z13, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z14, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (R9), Z24
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z15, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z16, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z17, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU64      (CX), Z24
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z18, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z19, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z20, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Store 3 outputs
+	VMOVDQU64 Z21, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z22, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x3_64_loop
+	VZEROUPPER
+
+mulGFNI_7x3_64_end:
+	RET
+
+// func mulGFNI_7x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x3_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), CX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R12
+	MOVQ            48(R10), R10
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R10
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, CX
+
+mulGFNI_7x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (R11), Z21
+	VMOVDQU64 (R12), Z22
+	VMOVDQU64 (R10), Z23
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z24
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z24
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z4, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z5, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z24
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z7, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z8, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z24
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z24
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z13, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z14, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (R9), Z24
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z15, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z16, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z17, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU64      (CX), Z24
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z18, Z24, Z25
+	VXORPD         Z21, Z25, Z21
+	VGF2P8AFFINEQB $0x00, Z19, Z24, Z25
+	VXORPD         Z22, Z25, Z22
+	VGF2P8AFFINEQB $0x00, Z20, Z24, Z25
+	VXORPD         Z23, Z25, Z23
+
+	// Store 3 outputs
+	VMOVDQU64 Z21, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z22, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z23, (R10)
+	ADDQ      $0x40, R10
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x3_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x3Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x3Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R11
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R11
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_7x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R12), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU (R13), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y6
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y6
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 960(CX), Y4
+	VMOVDQU 992(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1024(CX), Y4
+	VMOVDQU 1056(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1088(CX), Y4
+	VMOVDQU 1120(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 6 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1152(CX), Y4
+	VMOVDQU 1184(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1216(CX), Y4
+	VMOVDQU 1248(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1280(CX), Y4
+	VMOVDQU 1312(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x3Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x3Xor_end:
+	RET
+
+// func mulAvxTwo_7x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x3_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 94 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R11
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R11
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_7x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R12), Y0
+	VMOVDQU 32(R12), Y1
+	VMOVDQU (R13), Y2
+	VMOVDQU 32(R13), Y3
+	VMOVDQU (R11), Y4
+	VMOVDQU 32(R11), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y11
+	VMOVDQU 32(R9), Y13
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y11
+	VMOVDQU 32(R10), Y13
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R12)
+	VMOVDQU Y1, 32(R12)
+	ADDQ    $0x40, R12
+	VMOVDQU Y2, (R13)
+	VMOVDQU Y3, 32(R13)
+	ADDQ    $0x40, R13
+	VMOVDQU Y4, (R11)
+	VMOVDQU Y5, 32(R11)
+	ADDQ    $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x4(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 65 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R14
+	MOVQ  72(R11), R11
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R11
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_7x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R9), Y7
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (R10), Y7
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 6 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1536(CX), Y5
+	VMOVDQU 1568(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1600(CX), Y5
+	VMOVDQU 1632(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1664(CX), Y5
+	VMOVDQU 1696(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1728(CX), Y5
+	VMOVDQU 1760(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y2, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x4_loop
+	VZEROUPPER
+
+mulAvxTwo_7x4_end:
+	RET
+
+// func mulGFNI_7x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x4_64(SB), $0-88
+	// Loading 26 of 28 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R13
+	MOVQ            48(R11), R14
+	MOVQ            72(R11), R11
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R11
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, DX
+
+mulGFNI_7x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 4 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB      $0x00, Z24, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z25, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 4 outputs
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x4_64_loop
+	VZEROUPPER
+
+mulGFNI_7x4_64_end:
+	RET
+
+// func mulGFNI_7x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x4_64Xor(SB), $0-88
+	// Loading 26 of 28 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R13
+	MOVQ            48(R11), R14
+	MOVQ            72(R11), R11
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R11
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, DX
+
+mulGFNI_7x4_64Xor_loop:
+	// Load 4 outputs
+	VMOVDQU64 (R12), Z26
+	VMOVDQU64 (R13), Z27
+	VMOVDQU64 (R14), Z28
+	VMOVDQU64 (R11), Z29
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 4 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB      $0x00, Z24, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z25, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 4 outputs
+	VMOVDQU64 Z26, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x4_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x4_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x4Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 65 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x4Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R14
+	MOVQ  72(R11), R11
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R11
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_7x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R12), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU (R13), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU (R14), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R9), Y7
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (R10), Y7
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 6 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1536(CX), Y5
+	VMOVDQU 1568(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1600(CX), Y5
+	VMOVDQU 1632(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1664(CX), Y5
+	VMOVDQU 1696(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1728(CX), Y5
+	VMOVDQU 1760(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y2, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x4Xor_end:
+	RET
+
+// func mulAvxTwo_7x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x5(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 80 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x5_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R14
+	MOVQ  72(R11), R15
+	MOVQ  96(R11), R11
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R11
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_7x5_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y4
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R9), Y8
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (R10), Y8
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 6 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1920(CX), Y6
+	VMOVDQU 1952(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1984(CX), Y6
+	VMOVDQU 2016(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2048(CX), Y6
+	VMOVDQU 2080(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2112(CX), Y6
+	VMOVDQU 2144(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2176(CX), Y6
+	VMOVDQU 2208(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y2, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y3, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x5_loop
+	VZEROUPPER
+
+mulAvxTwo_7x5_end:
+	RET
+
+// func mulGFNI_7x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x5_64(SB), $8-88
+	// Loading 25 of 35 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R13
+	MOVQ            48(R11), R14
+	MOVQ            72(R11), R15
+	MOVQ            96(R11), R11
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R11
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, DX
+
+mulGFNI_7x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 5 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x5_64_loop
+	VZEROUPPER
+
+mulGFNI_7x5_64_end:
+	RET
+
+// func mulGFNI_7x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x5_64Xor(SB), $8-88
+	// Loading 25 of 35 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R13
+	MOVQ            48(R11), R14
+	MOVQ            72(R11), R15
+	MOVQ            96(R11), R11
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R11
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, DX
+
+mulGFNI_7x5_64Xor_loop:
+	// Load 5 outputs
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R11), Z29
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 5 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_7x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x5Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 80 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R14
+	MOVQ  72(R11), R15
+	MOVQ  96(R11), R11
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R11
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_7x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (R12), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU (R13), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU (R14), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU (R15), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R9), Y8
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (R10), Y8
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 6 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1920(CX), Y6
+	VMOVDQU 1952(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1984(CX), Y6
+	VMOVDQU 2016(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2048(CX), Y6
+	VMOVDQU 2080(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2112(CX), Y6
+	VMOVDQU 2144(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2176(CX), Y6
+	VMOVDQU 2208(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y2, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y3, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_7x5Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x5Xor_end:
+	RET
+
+// func mulAvxTwo_7x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x6(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 95 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x6_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), R8
+	MOVQ  120(AX), R9
+	MOVQ  144(AX), AX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R13
+	MOVQ  72(R10), R14
+	MOVQ  96(R10), R15
+	MOVQ  120(R10), R10
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R10
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
+	VPBROADCASTB X6, Y6
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_7x6_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y3
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y5
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (R8), Y9
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 5 to 6 outputs
+	VMOVDQU (R9), Y9
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 6 to 6 outputs
+	VMOVDQU (AX), Y9
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2304(CX), Y7
+	VMOVDQU 2336(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2368(CX), Y7
+	VMOVDQU 2400(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2432(CX), Y7
+	VMOVDQU 2464(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2496(CX), Y7
+	VMOVDQU 2528(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2560(CX), Y7
+	VMOVDQU 2592(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2624(CX), Y7
+	VMOVDQU 2656(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y2, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y3, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y4, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y5, (R10)
+	ADDQ    $0x20, R10
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_7x6_loop
+	VZEROUPPER
+
+mulAvxTwo_7x6_end:
+	RET
+
+// func mulGFNI_7x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x6_64(SB), $8-88
+	// Loading 24 of 42 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), R9
+	MOVQ            144(AX), AX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R12
+	MOVQ            48(R10), R13
+	MOVQ            72(R10), R14
+	MOVQ            96(R10), R15
+	MOVQ            120(R10), R10
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R10
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_7x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 6 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 6 outputs
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R10)
+	ADDQ      $0x40, R10
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_7x6_64_loop
+	VZEROUPPER
+
+mulGFNI_7x6_64_end:
+	RET
+
+// func mulGFNI_7x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x6_64Xor(SB), $8-88
+	// Loading 24 of 42 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), R9
+	MOVQ            144(AX), AX
+	MOVQ            out_base+48(FP), R10
+	MOVQ            out_base+48(FP), R10
+	MOVQ            (R10), R11
+	MOVQ            24(R10), R12
+	MOVQ            48(R10), R13
+	MOVQ            72(R10), R14
+	MOVQ            96(R10), R15
+	MOVQ            120(R10), R10
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R10
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_7x6_64Xor_loop:
+	// Load 6 outputs
+	VMOVDQU64 (R11), Z24
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R10), Z29
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 6 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 6 outputs
+	VMOVDQU64 Z24, (R11)
+	ADDQ      $0x40, R11
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R10)
+	ADDQ      $0x40, R10
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_7x6_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x6_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x6Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 95 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x6Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), R8
+	MOVQ  120(AX), R9
+	MOVQ  144(AX), AX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R11
+	MOVQ  24(R10), R12
+	MOVQ  48(R10), R13
+	MOVQ  72(R10), R14
+	MOVQ  96(R10), R15
+	MOVQ  120(R10), R10
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R10
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
+	VPBROADCASTB X6, Y6
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_7x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (R11), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU (R12), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU (R13), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU (R14), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU (R15), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU (R10), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (R8), Y9
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 5 to 6 outputs
+	VMOVDQU (R9), Y9
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 6 to 6 outputs
+	VMOVDQU (AX), Y9
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2304(CX), Y7
+	VMOVDQU 2336(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2368(CX), Y7
+	VMOVDQU 2400(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2432(CX), Y7
+	VMOVDQU 2464(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2496(CX), Y7
+	VMOVDQU 2528(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2560(CX), Y7
+	VMOVDQU 2592(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2624(CX), Y7
+	VMOVDQU 2656(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	VMOVDQU Y0, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y1, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y2, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y3, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y4, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y5, (R10)
+	ADDQ    $0x20, R10
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_7x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x6Xor_end:
+	RET
+
+// func mulAvxTwo_7x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x7(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 110 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x7_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_7x7_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y0
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y1
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y2
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y3
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y4
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y5
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y6
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R9), Y10
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (R10), Y10
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 6 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2688(CX), Y8
+	VMOVDQU 2720(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2752(CX), Y8
+	VMOVDQU 2784(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2816(CX), Y8
+	VMOVDQU 2848(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2880(CX), Y8
+	VMOVDQU 2912(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2944(CX), Y8
+	VMOVDQU 2976(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3008(CX), Y8
+	VMOVDQU 3040(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3072(CX), Y8
+	VMOVDQU 3104(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	MOVQ    (R11), R13
+	VMOVDQU Y0, (R13)(R12*1)
+	MOVQ    24(R11), R13
+	VMOVDQU Y1, (R13)(R12*1)
+	MOVQ    48(R11), R13
+	VMOVDQU Y2, (R13)(R12*1)
+	MOVQ    72(R11), R13
+	VMOVDQU Y3, (R13)(R12*1)
+	MOVQ    96(R11), R13
+	VMOVDQU Y4, (R13)(R12*1)
+	MOVQ    120(R11), R13
+	VMOVDQU Y5, (R13)(R12*1)
+	MOVQ    144(R11), R13
+	VMOVDQU Y6, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R12
+	DECQ AX
+	JNZ  mulAvxTwo_7x7_loop
+	VZEROUPPER
+
+mulAvxTwo_7x7_end:
+	RET
+
+// func mulGFNI_7x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x7_64(SB), $0-88
+	// Loading 23 of 49 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 58 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, DX
+
+mulGFNI_7x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 7 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 Z23, (R13)(R12*1)
+	MOVQ      24(R11), R13
+	VMOVDQU64 Z24, (R13)(R12*1)
+	MOVQ      48(R11), R13
+	VMOVDQU64 Z25, (R13)(R12*1)
+	MOVQ      72(R11), R13
+	VMOVDQU64 Z26, (R13)(R12*1)
+	MOVQ      96(R11), R13
+	VMOVDQU64 Z27, (R13)(R12*1)
+	MOVQ      120(R11), R13
+	VMOVDQU64 Z28, (R13)(R12*1)
+	MOVQ      144(R11), R13
+	VMOVDQU64 Z29, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R12
+	DECQ AX
+	JNZ  mulGFNI_7x7_64_loop
+	VZEROUPPER
+
+mulGFNI_7x7_64_end:
+	RET
+
+// func mulGFNI_7x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x7_64Xor(SB), $0-88
+	// Loading 23 of 49 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 58 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, DX
+
+mulGFNI_7x7_64Xor_loop:
+	// Load 7 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 (R13)(R12*1), Z23
+	MOVQ      24(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z24
+	MOVQ      48(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z25
+	MOVQ      72(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z26
+	MOVQ      96(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z27
+	MOVQ      120(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z28
+	MOVQ      144(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z29
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 7 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 Z23, (R13)(R12*1)
+	MOVQ      24(R11), R13
+	VMOVDQU64 Z24, (R13)(R12*1)
+	MOVQ      48(R11), R13
+	VMOVDQU64 Z25, (R13)(R12*1)
+	MOVQ      72(R11), R13
+	VMOVDQU64 Z26, (R13)(R12*1)
+	MOVQ      96(R11), R13
+	VMOVDQU64 Z27, (R13)(R12*1)
+	MOVQ      120(R11), R13
+	VMOVDQU64 Z28, (R13)(R12*1)
+	MOVQ      144(R11), R13
+	VMOVDQU64 Z29, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R12
+	DECQ AX
+	JNZ  mulGFNI_7x7_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x7_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x7Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 110 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x7Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_7x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	MOVQ    (R11), R13
+	VMOVDQU (R13)(R12*1), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	MOVQ    24(R11), R13
+	VMOVDQU (R13)(R12*1), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	MOVQ    48(R11), R13
+	VMOVDQU (R13)(R12*1), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	MOVQ    72(R11), R13
+	VMOVDQU (R13)(R12*1), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	MOVQ    96(R11), R13
+	VMOVDQU (R13)(R12*1), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	MOVQ    120(R11), R13
+	VMOVDQU (R13)(R12*1), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	MOVQ    144(R11), R13
+	VMOVDQU (R13)(R12*1), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R9), Y10
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (R10), Y10
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 6 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2688(CX), Y8
+	VMOVDQU 2720(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2752(CX), Y8
+	VMOVDQU 2784(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2816(CX), Y8
+	VMOVDQU 2848(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2880(CX), Y8
+	VMOVDQU 2912(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2944(CX), Y8
+	VMOVDQU 2976(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3008(CX), Y8
+	VMOVDQU 3040(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3072(CX), Y8
+	VMOVDQU 3104(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	MOVQ    (R11), R13
+	VMOVDQU Y0, (R13)(R12*1)
+	MOVQ    24(R11), R13
+	VMOVDQU Y1, (R13)(R12*1)
+	MOVQ    48(R11), R13
+	VMOVDQU Y2, (R13)(R12*1)
+	MOVQ    72(R11), R13
+	VMOVDQU Y3, (R13)(R12*1)
+	MOVQ    96(R11), R13
+	VMOVDQU Y4, (R13)(R12*1)
+	MOVQ    120(R11), R13
+	VMOVDQU Y5, (R13)(R12*1)
+	MOVQ    144(R11), R13
+	VMOVDQU Y6, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R12
+	DECQ AX
+	JNZ  mulAvxTwo_7x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x7Xor_end:
+	RET
+
+// func mulAvxTwo_7x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x8(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 125 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x8_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_7x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (R8), Y11
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (R9), Y11
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (R10), Y11
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 6 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3072(CX), Y9
+	VMOVDQU 3104(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3136(CX), Y9
+	VMOVDQU 3168(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3200(CX), Y9
+	VMOVDQU 3232(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3264(CX), Y9
+	VMOVDQU 3296(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3328(CX), Y9
+	VMOVDQU 3360(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3392(CX), Y9
+	VMOVDQU 3424(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3456(CX), Y9
+	VMOVDQU 3488(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3520(CX), Y9
+	VMOVDQU 3552(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	MOVQ    (R11), R13
+	VMOVDQU Y0, (R13)(R12*1)
+	MOVQ    24(R11), R13
+	VMOVDQU Y1, (R13)(R12*1)
+	MOVQ    48(R11), R13
+	VMOVDQU Y2, (R13)(R12*1)
+	MOVQ    72(R11), R13
+	VMOVDQU Y3, (R13)(R12*1)
+	MOVQ    96(R11), R13
+	VMOVDQU Y4, (R13)(R12*1)
+	MOVQ    120(R11), R13
+	VMOVDQU Y5, (R13)(R12*1)
+	MOVQ    144(R11), R13
+	VMOVDQU Y6, (R13)(R12*1)
+	MOVQ    168(R11), R13
+	VMOVDQU Y7, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R12
+	DECQ AX
+	JNZ  mulAvxTwo_7x8_loop
+	VZEROUPPER
+
+mulAvxTwo_7x8_end:
+	RET
+
+// func mulGFNI_7x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x8_64(SB), $0-88
+	// Loading 22 of 56 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 66 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, DX
+
+mulGFNI_7x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 Z22, (R13)(R12*1)
+	MOVQ      24(R11), R13
+	VMOVDQU64 Z23, (R13)(R12*1)
+	MOVQ      48(R11), R13
+	VMOVDQU64 Z24, (R13)(R12*1)
+	MOVQ      72(R11), R13
+	VMOVDQU64 Z25, (R13)(R12*1)
+	MOVQ      96(R11), R13
+	VMOVDQU64 Z26, (R13)(R12*1)
+	MOVQ      120(R11), R13
+	VMOVDQU64 Z27, (R13)(R12*1)
+	MOVQ      144(R11), R13
+	VMOVDQU64 Z28, (R13)(R12*1)
+	MOVQ      168(R11), R13
+	VMOVDQU64 Z29, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R12
+	DECQ AX
+	JNZ  mulGFNI_7x8_64_loop
+	VZEROUPPER
+
+mulGFNI_7x8_64_end:
+	RET
+
+// func mulGFNI_7x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x8_64Xor(SB), $0-88
+	// Loading 22 of 56 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 66 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, DX
+
+mulGFNI_7x8_64Xor_loop:
+	// Load 8 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 (R13)(R12*1), Z22
+	MOVQ      24(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z23
+	MOVQ      48(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z24
+	MOVQ      72(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z25
+	MOVQ      96(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z26
+	MOVQ      120(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z27
+	MOVQ      144(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z28
+	MOVQ      168(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z29
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 Z22, (R13)(R12*1)
+	MOVQ      24(R11), R13
+	VMOVDQU64 Z23, (R13)(R12*1)
+	MOVQ      48(R11), R13
+	VMOVDQU64 Z24, (R13)(R12*1)
+	MOVQ      72(R11), R13
+	VMOVDQU64 Z25, (R13)(R12*1)
+	MOVQ      96(R11), R13
+	VMOVDQU64 Z26, (R13)(R12*1)
+	MOVQ      120(R11), R13
+	VMOVDQU64 Z27, (R13)(R12*1)
+	MOVQ      144(R11), R13
+	VMOVDQU64 Z28, (R13)(R12*1)
+	MOVQ      168(R11), R13
+	VMOVDQU64 Z29, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R12
+	DECQ AX
+	JNZ  mulGFNI_7x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x8Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 125 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_7x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	MOVQ    (R11), R13
+	VMOVDQU (R13)(R12*1), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	MOVQ    24(R11), R13
+	VMOVDQU (R13)(R12*1), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	MOVQ    48(R11), R13
+	VMOVDQU (R13)(R12*1), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	MOVQ    72(R11), R13
+	VMOVDQU (R13)(R12*1), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	MOVQ    96(R11), R13
+	VMOVDQU (R13)(R12*1), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	MOVQ    120(R11), R13
+	VMOVDQU (R13)(R12*1), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	MOVQ    144(R11), R13
+	VMOVDQU (R13)(R12*1), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	MOVQ    168(R11), R13
+	VMOVDQU (R13)(R12*1), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (R8), Y11
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (R9), Y11
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (R10), Y11
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 6 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3072(CX), Y9
+	VMOVDQU 3104(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3136(CX), Y9
+	VMOVDQU 3168(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3200(CX), Y9
+	VMOVDQU 3232(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3264(CX), Y9
+	VMOVDQU 3296(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3328(CX), Y9
+	VMOVDQU 3360(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3392(CX), Y9
+	VMOVDQU 3424(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3456(CX), Y9
+	VMOVDQU 3488(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3520(CX), Y9
+	VMOVDQU 3552(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	MOVQ    (R11), R13
+	VMOVDQU Y0, (R13)(R12*1)
+	MOVQ    24(R11), R13
+	VMOVDQU Y1, (R13)(R12*1)
+	MOVQ    48(R11), R13
+	VMOVDQU Y2, (R13)(R12*1)
+	MOVQ    72(R11), R13
+	VMOVDQU Y3, (R13)(R12*1)
+	MOVQ    96(R11), R13
+	VMOVDQU Y4, (R13)(R12*1)
+	MOVQ    120(R11), R13
+	VMOVDQU Y5, (R13)(R12*1)
+	MOVQ    144(R11), R13
+	VMOVDQU Y6, (R13)(R12*1)
+	MOVQ    168(R11), R13
+	VMOVDQU Y7, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R12
+	DECQ AX
+	JNZ  mulAvxTwo_7x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x8Xor_end:
+	RET
+
+// func mulAvxTwo_7x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x9(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 140 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x9_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_7x9_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y2
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y3
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y4
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y5
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y6
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y7
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y8
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (R10), Y12
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2880(CX), Y10
+	VMOVDQU 2912(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 6 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 3456(CX), Y10
+	VMOVDQU 3488(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 3520(CX), Y10
+	VMOVDQU 3552(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3584(CX), Y10
+	VMOVDQU 3616(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3648(CX), Y10
+	VMOVDQU 3680(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3712(CX), Y10
+	VMOVDQU 3744(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3776(CX), Y10
+	VMOVDQU 3808(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3840(CX), Y10
+	VMOVDQU 3872(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3904(CX), Y10
+	VMOVDQU 3936(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3968(CX), Y10
+	VMOVDQU 4000(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	MOVQ    (R11), R13
+	VMOVDQU Y0, (R13)(R12*1)
+	MOVQ    24(R11), R13
+	VMOVDQU Y1, (R13)(R12*1)
+	MOVQ    48(R11), R13
+	VMOVDQU Y2, (R13)(R12*1)
+	MOVQ    72(R11), R13
+	VMOVDQU Y3, (R13)(R12*1)
+	MOVQ    96(R11), R13
+	VMOVDQU Y4, (R13)(R12*1)
+	MOVQ    120(R11), R13
+	VMOVDQU Y5, (R13)(R12*1)
+	MOVQ    144(R11), R13
+	VMOVDQU Y6, (R13)(R12*1)
+	MOVQ    168(R11), R13
+	VMOVDQU Y7, (R13)(R12*1)
+	MOVQ    192(R11), R13
+	VMOVDQU Y8, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R12
+	DECQ AX
+	JNZ  mulAvxTwo_7x9_loop
+	VZEROUPPER
+
+mulAvxTwo_7x9_end:
+	RET
+
+// func mulGFNI_7x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x9_64(SB), $0-88
+	// Loading 21 of 63 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 74 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, DX
+
+mulGFNI_7x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 Z21, (R13)(R12*1)
+	MOVQ      24(R11), R13
+	VMOVDQU64 Z22, (R13)(R12*1)
+	MOVQ      48(R11), R13
+	VMOVDQU64 Z23, (R13)(R12*1)
+	MOVQ      72(R11), R13
+	VMOVDQU64 Z24, (R13)(R12*1)
+	MOVQ      96(R11), R13
+	VMOVDQU64 Z25, (R13)(R12*1)
+	MOVQ      120(R11), R13
+	VMOVDQU64 Z26, (R13)(R12*1)
+	MOVQ      144(R11), R13
+	VMOVDQU64 Z27, (R13)(R12*1)
+	MOVQ      168(R11), R13
+	VMOVDQU64 Z28, (R13)(R12*1)
+	MOVQ      192(R11), R13
+	VMOVDQU64 Z29, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R12
+	DECQ AX
+	JNZ  mulGFNI_7x9_64_loop
+	VZEROUPPER
+
+mulGFNI_7x9_64_end:
+	RET
+
+// func mulGFNI_7x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x9_64Xor(SB), $0-88
+	// Loading 21 of 63 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 74 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, DX
+
+mulGFNI_7x9_64Xor_loop:
+	// Load 9 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 (R13)(R12*1), Z21
+	MOVQ      24(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z22
+	MOVQ      48(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z23
+	MOVQ      72(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z24
+	MOVQ      96(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z25
+	MOVQ      120(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z26
+	MOVQ      144(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z27
+	MOVQ      168(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z28
+	MOVQ      192(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z29
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 Z21, (R13)(R12*1)
+	MOVQ      24(R11), R13
+	VMOVDQU64 Z22, (R13)(R12*1)
+	MOVQ      48(R11), R13
+	VMOVDQU64 Z23, (R13)(R12*1)
+	MOVQ      72(R11), R13
+	VMOVDQU64 Z24, (R13)(R12*1)
+	MOVQ      96(R11), R13
+	VMOVDQU64 Z25, (R13)(R12*1)
+	MOVQ      120(R11), R13
+	VMOVDQU64 Z26, (R13)(R12*1)
+	MOVQ      144(R11), R13
+	VMOVDQU64 Z27, (R13)(R12*1)
+	MOVQ      168(R11), R13
+	VMOVDQU64 Z28, (R13)(R12*1)
+	MOVQ      192(R11), R13
+	VMOVDQU64 Z29, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R12
+	DECQ AX
+	JNZ  mulGFNI_7x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x9Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 140 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x9Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_7x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	MOVQ    (R11), R13
+	VMOVDQU (R13)(R12*1), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	MOVQ    24(R11), R13
+	VMOVDQU (R13)(R12*1), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	MOVQ    48(R11), R13
+	VMOVDQU (R13)(R12*1), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	MOVQ    72(R11), R13
+	VMOVDQU (R13)(R12*1), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	MOVQ    96(R11), R13
+	VMOVDQU (R13)(R12*1), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	MOVQ    120(R11), R13
+	VMOVDQU (R13)(R12*1), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	MOVQ    144(R11), R13
+	VMOVDQU (R13)(R12*1), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	MOVQ    168(R11), R13
+	VMOVDQU (R13)(R12*1), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	MOVQ    192(R11), R13
+	VMOVDQU (R13)(R12*1), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (R10), Y12
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2880(CX), Y10
+	VMOVDQU 2912(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 6 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 3456(CX), Y10
+	VMOVDQU 3488(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 3520(CX), Y10
+	VMOVDQU 3552(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3584(CX), Y10
+	VMOVDQU 3616(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3648(CX), Y10
+	VMOVDQU 3680(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3712(CX), Y10
+	VMOVDQU 3744(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3776(CX), Y10
+	VMOVDQU 3808(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3840(CX), Y10
+	VMOVDQU 3872(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3904(CX), Y10
+	VMOVDQU 3936(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3968(CX), Y10
+	VMOVDQU 4000(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	MOVQ    (R11), R13
+	VMOVDQU Y0, (R13)(R12*1)
+	MOVQ    24(R11), R13
+	VMOVDQU Y1, (R13)(R12*1)
+	MOVQ    48(R11), R13
+	VMOVDQU Y2, (R13)(R12*1)
+	MOVQ    72(R11), R13
+	VMOVDQU Y3, (R13)(R12*1)
+	MOVQ    96(R11), R13
+	VMOVDQU Y4, (R13)(R12*1)
+	MOVQ    120(R11), R13
+	VMOVDQU Y5, (R13)(R12*1)
+	MOVQ    144(R11), R13
+	VMOVDQU Y6, (R13)(R12*1)
+	MOVQ    168(R11), R13
+	VMOVDQU Y7, (R13)(R12*1)
+	MOVQ    192(R11), R13
+	VMOVDQU Y8, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R12
+	DECQ AX
+	JNZ  mulAvxTwo_7x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x9Xor_end:
+	RET
+
+// func mulAvxTwo_7x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x10(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 155 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x10_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_7x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (R9), Y13
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 5 to 10 outputs
+	VMOVDQU (R10), Y13
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3200(CX), Y11
+	VMOVDQU 3232(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3264(CX), Y11
+	VMOVDQU 3296(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3328(CX), Y11
+	VMOVDQU 3360(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 3392(CX), Y11
+	VMOVDQU 3424(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 3456(CX), Y11
+	VMOVDQU 3488(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 3520(CX), Y11
+	VMOVDQU 3552(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 3584(CX), Y11
+	VMOVDQU 3616(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3648(CX), Y11
+	VMOVDQU 3680(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3712(CX), Y11
+	VMOVDQU 3744(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3776(CX), Y11
+	VMOVDQU 3808(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 6 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3840(CX), Y11
+	VMOVDQU 3872(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3904(CX), Y11
+	VMOVDQU 3936(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3968(CX), Y11
+	VMOVDQU 4000(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4032(CX), Y11
+	VMOVDQU 4064(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4096(CX), Y11
+	VMOVDQU 4128(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4160(CX), Y11
+	VMOVDQU 4192(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4224(CX), Y11
+	VMOVDQU 4256(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4288(CX), Y11
+	VMOVDQU 4320(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4352(CX), Y11
+	VMOVDQU 4384(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 4416(CX), Y11
+	VMOVDQU 4448(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 10 outputs
+	MOVQ    (R11), R13
+	VMOVDQU Y0, (R13)(R12*1)
+	MOVQ    24(R11), R13
+	VMOVDQU Y1, (R13)(R12*1)
+	MOVQ    48(R11), R13
+	VMOVDQU Y2, (R13)(R12*1)
+	MOVQ    72(R11), R13
+	VMOVDQU Y3, (R13)(R12*1)
+	MOVQ    96(R11), R13
+	VMOVDQU Y4, (R13)(R12*1)
+	MOVQ    120(R11), R13
+	VMOVDQU Y5, (R13)(R12*1)
+	MOVQ    144(R11), R13
+	VMOVDQU Y6, (R13)(R12*1)
+	MOVQ    168(R11), R13
+	VMOVDQU Y7, (R13)(R12*1)
+	MOVQ    192(R11), R13
+	VMOVDQU Y8, (R13)(R12*1)
+	MOVQ    216(R11), R13
+	VMOVDQU Y9, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R12
+	DECQ AX
+	JNZ  mulAvxTwo_7x10_loop
+	VZEROUPPER
+
+mulAvxTwo_7x10_end:
+	RET
+
+// func mulGFNI_7x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x10_64(SB), $0-88
+	// Loading 20 of 70 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 82 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, DX
+
+mulGFNI_7x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 Z20, (R13)(R12*1)
+	MOVQ      24(R11), R13
+	VMOVDQU64 Z21, (R13)(R12*1)
+	MOVQ      48(R11), R13
+	VMOVDQU64 Z22, (R13)(R12*1)
+	MOVQ      72(R11), R13
+	VMOVDQU64 Z23, (R13)(R12*1)
+	MOVQ      96(R11), R13
+	VMOVDQU64 Z24, (R13)(R12*1)
+	MOVQ      120(R11), R13
+	VMOVDQU64 Z25, (R13)(R12*1)
+	MOVQ      144(R11), R13
+	VMOVDQU64 Z26, (R13)(R12*1)
+	MOVQ      168(R11), R13
+	VMOVDQU64 Z27, (R13)(R12*1)
+	MOVQ      192(R11), R13
+	VMOVDQU64 Z28, (R13)(R12*1)
+	MOVQ      216(R11), R13
+	VMOVDQU64 Z29, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R12
+	DECQ AX
+	JNZ  mulGFNI_7x10_64_loop
+	VZEROUPPER
+
+mulGFNI_7x10_64_end:
+	RET
+
+// func mulGFNI_7x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_7x10_64Xor(SB), $0-88
+	// Loading 20 of 70 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 82 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_7x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), DX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, DX
+
+mulGFNI_7x10_64Xor_loop:
+	// Load 10 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 (R13)(R12*1), Z20
+	MOVQ      24(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z21
+	MOVQ      48(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z22
+	MOVQ      72(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z23
+	MOVQ      96(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z24
+	MOVQ      120(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z25
+	MOVQ      144(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z26
+	MOVQ      168(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z27
+	MOVQ      192(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z28
+	MOVQ      216(R11), R13
+	VMOVDQU64 (R13)(R12*1), Z29
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R11), R13
+	VMOVDQU64 Z20, (R13)(R12*1)
+	MOVQ      24(R11), R13
+	VMOVDQU64 Z21, (R13)(R12*1)
+	MOVQ      48(R11), R13
+	VMOVDQU64 Z22, (R13)(R12*1)
+	MOVQ      72(R11), R13
+	VMOVDQU64 Z23, (R13)(R12*1)
+	MOVQ      96(R11), R13
+	VMOVDQU64 Z24, (R13)(R12*1)
+	MOVQ      120(R11), R13
+	VMOVDQU64 Z25, (R13)(R12*1)
+	MOVQ      144(R11), R13
+	VMOVDQU64 Z26, (R13)(R12*1)
+	MOVQ      168(R11), R13
+	VMOVDQU64 Z27, (R13)(R12*1)
+	MOVQ      192(R11), R13
+	VMOVDQU64 Z28, (R13)(R12*1)
+	MOVQ      216(R11), R13
+	VMOVDQU64 Z29, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R12
+	DECQ AX
+	JNZ  mulGFNI_7x10_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_7x10_64Xor_end:
+	RET
+
+// func mulAvxTwo_7x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_7x10Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 155 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_7x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), DX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, R9
+	ADDQ         R12, R10
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_7x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	MOVQ    (R11), R13
+	VMOVDQU (R13)(R12*1), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	MOVQ    24(R11), R13
+	VMOVDQU (R13)(R12*1), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	MOVQ    48(R11), R13
+	VMOVDQU (R13)(R12*1), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	MOVQ    72(R11), R13
+	VMOVDQU (R13)(R12*1), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	MOVQ    96(R11), R13
+	VMOVDQU (R13)(R12*1), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	MOVQ    120(R11), R13
+	VMOVDQU (R13)(R12*1), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	MOVQ    144(R11), R13
+	VMOVDQU (R13)(R12*1), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	MOVQ    168(R11), R13
+	VMOVDQU (R13)(R12*1), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	MOVQ    192(R11), R13
+	VMOVDQU (R13)(R12*1), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	MOVQ    216(R11), R13
+	VMOVDQU (R13)(R12*1), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (R9), Y13
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 5 to 10 outputs
+	VMOVDQU (R10), Y13
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3200(CX), Y11
+	VMOVDQU 3232(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3264(CX), Y11
+	VMOVDQU 3296(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3328(CX), Y11
+	VMOVDQU 3360(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 3392(CX), Y11
+	VMOVDQU 3424(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 3456(CX), Y11
+	VMOVDQU 3488(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 3520(CX), Y11
+	VMOVDQU 3552(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 3584(CX), Y11
+	VMOVDQU 3616(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3648(CX), Y11
+	VMOVDQU 3680(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3712(CX), Y11
+	VMOVDQU 3744(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3776(CX), Y11
+	VMOVDQU 3808(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 6 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3840(CX), Y11
+	VMOVDQU 3872(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3904(CX), Y11
+	VMOVDQU 3936(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3968(CX), Y11
+	VMOVDQU 4000(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4032(CX), Y11
+	VMOVDQU 4064(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4096(CX), Y11
+	VMOVDQU 4128(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4160(CX), Y11
+	VMOVDQU 4192(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4224(CX), Y11
+	VMOVDQU 4256(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4288(CX), Y11
+	VMOVDQU 4320(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4352(CX), Y11
+	VMOVDQU 4384(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 4416(CX), Y11
+	VMOVDQU 4448(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 10 outputs
+	MOVQ    (R11), R13
+	VMOVDQU Y0, (R13)(R12*1)
+	MOVQ    24(R11), R13
+	VMOVDQU Y1, (R13)(R12*1)
+	MOVQ    48(R11), R13
+	VMOVDQU Y2, (R13)(R12*1)
+	MOVQ    72(R11), R13
+	VMOVDQU Y3, (R13)(R12*1)
+	MOVQ    96(R11), R13
+	VMOVDQU Y4, (R13)(R12*1)
+	MOVQ    120(R11), R13
+	VMOVDQU Y5, (R13)(R12*1)
+	MOVQ    144(R11), R13
+	VMOVDQU Y6, (R13)(R12*1)
+	MOVQ    168(R11), R13
+	VMOVDQU Y7, (R13)(R12*1)
+	MOVQ    192(R11), R13
+	VMOVDQU Y8, (R13)(R12*1)
+	MOVQ    216(R11), R13
+	VMOVDQU Y9, (R13)(R12*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R12
+	DECQ AX
+	JNZ  mulAvxTwo_7x10Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_7x10Xor_end:
+	RET
+
+// func mulAvxTwo_8x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x1(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x1_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X1
+	VPBROADCASTB X1, Y1
+
+mulAvxTwo_8x1_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y4
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	VPXOR   Y2, Y3, Y0
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y4
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y4
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 128(CX), Y2
+	VMOVDQU 160(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y4
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 192(CX), Y2
+	VMOVDQU 224(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y4
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 256(CX), Y2
+	VMOVDQU 288(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y4
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 320(CX), Y2
+	VMOVDQU 352(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y4
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 384(CX), Y2
+	VMOVDQU 416(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 7 to 1 outputs
+	VMOVDQU (DX), Y4
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 448(CX), Y2
+	VMOVDQU 480(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x1_loop
+	VZEROUPPER
+
+mulAvxTwo_8x1_end:
+	RET
+
+// func mulAvxTwo_8x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x1_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x1_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_8x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y6
+	VMOVDQU 32(R10), Y5
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y6
+	VMOVDQU 32(R11), Y5
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R12)
+	VMOVDQU Y1, 32(R12)
+	ADDQ    $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x1_64_loop
+	VZEROUPPER
+
+mulAvxTwo_8x1_64_end:
+	RET
+
+// func mulGFNI_8x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x1_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 11 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), CX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, CX
+
+mulGFNI_8x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z9
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z9, Z8
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z9
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z9
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z9
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z9
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (R9), Z9
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z5, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU64      (R10), Z9
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z6, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU64      (CX), Z9
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z7, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Store 1 outputs
+	VMOVDQU64 Z8, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_8x1_64_loop
+	VZEROUPPER
+
+mulGFNI_8x1_64_end:
+	RET
+
+// func mulGFNI_8x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 11 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), CX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R11
+	MOVQ            start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R11
+
+	// Add start offset to input
+	ADDQ R12, DX
+	ADDQ R12, BX
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, CX
+
+mulGFNI_8x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (R11), Z8
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z9
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z9
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z9
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z9
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z9
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (R9), Z9
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z5, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU64      (R10), Z9
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z6, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU64      (CX), Z9
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z7, Z9, Z9
+	VXORPD         Z8, Z9, Z8
+
+	// Store 1 outputs
+	VMOVDQU64 Z8, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_8x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x1Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x1Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X1
+	VPBROADCASTB X1, Y1
+
+mulAvxTwo_8x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y4
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU (R12), Y0
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y4
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y4
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 128(CX), Y2
+	VMOVDQU 160(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y4
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 192(CX), Y2
+	VMOVDQU 224(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y4
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 256(CX), Y2
+	VMOVDQU 288(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y4
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 320(CX), Y2
+	VMOVDQU 352(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y4
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 384(CX), Y2
+	VMOVDQU 416(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 7 to 1 outputs
+	VMOVDQU (DX), Y4
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 448(CX), Y2
+	VMOVDQU 480(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x1Xor_end:
+	RET
+
+// func mulAvxTwo_8x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x1_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x1_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_8x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (R12), Y0
+	VMOVDQU 32(R12), Y1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y6
+	VMOVDQU 32(R10), Y5
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y6
+	VMOVDQU 32(R11), Y5
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R12)
+	VMOVDQU Y1, 32(R12)
+	ADDQ    $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x1_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x2(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 39 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x2_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R12
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+	ADDQ R14, R12
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_8x2_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y1
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y5
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y5
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 640(CX), Y3
+	VMOVDQU 672(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 704(CX), Y3
+	VMOVDQU 736(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y5
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 768(CX), Y3
+	VMOVDQU 800(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 832(CX), Y3
+	VMOVDQU 864(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 7 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 896(CX), Y3
+	VMOVDQU 928(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 960(CX), Y3
+	VMOVDQU 992(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y1, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x2_loop
+	VZEROUPPER
+
+mulAvxTwo_8x2_end:
+	RET
+
+// func mulAvxTwo_8x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x2_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 73 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x2_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R12
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+	ADDQ R14, R12
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_8x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y9
+	VMOVDQU 32(R10), Y11
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y9
+	VMOVDQU 32(R11), Y11
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R13)
+	VMOVDQU Y1, 32(R13)
+	ADDQ    $0x40, R13
+	VMOVDQU Y2, (R12)
+	VMOVDQU Y3, 32(R12)
+	ADDQ    $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x2_64_loop
+	VZEROUPPER
+
+mulAvxTwo_8x2_64_end:
+	RET
+
+// func mulGFNI_8x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x2_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), CX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R11
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+	ADDQ R13, R11
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, CX
+
+mulGFNI_8x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z18
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z18, Z16
+	VGF2P8AFFINEQB $0x00, Z1, Z18, Z17
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z18
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z3, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z18
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z5, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z18
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z7, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z18
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z9, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (R9), Z18
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z10, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z11, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU64      (R10), Z18
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z12, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z13, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU64      (CX), Z18
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z14, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z15, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Store 2 outputs
+	VMOVDQU64 Z16, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z17, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_8x2_64_loop
+	VZEROUPPER
+
+mulGFNI_8x2_64_end:
+	RET
+
+// func mulGFNI_8x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), CX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R11
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+	ADDQ R13, R11
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, CX
+
+mulGFNI_8x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (R12), Z16
+	VMOVDQU64 (R11), Z17
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z18
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z1, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z18
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z3, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z18
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z5, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z18
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z7, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z18
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z9, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (R9), Z18
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z10, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z11, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU64      (R10), Z18
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z12, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z13, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU64      (CX), Z18
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z14, Z18, Z19
+	VXORPD         Z16, Z19, Z16
+	VGF2P8AFFINEQB $0x00, Z15, Z18, Z19
+	VXORPD         Z17, Z19, Z17
+
+	// Store 2 outputs
+	VMOVDQU64 Z16, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z17, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_8x2_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x2Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 39 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x2Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R12
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+	ADDQ R14, R12
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_8x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R13), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU (R12), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y5
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y5
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 640(CX), Y3
+	VMOVDQU 672(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 704(CX), Y3
+	VMOVDQU 736(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y5
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 768(CX), Y3
+	VMOVDQU 800(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 832(CX), Y3
+	VMOVDQU 864(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 7 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 896(CX), Y3
+	VMOVDQU 928(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 960(CX), Y3
+	VMOVDQU 992(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y1, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x2Xor_end:
+	RET
+
+// func mulAvxTwo_8x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 73 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R12
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+	ADDQ R14, R12
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_8x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R13), Y0
+	VMOVDQU 32(R13), Y1
+	VMOVDQU (R12), Y2
+	VMOVDQU 32(R12), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y9
+	VMOVDQU 32(R10), Y11
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y9
+	VMOVDQU 32(R11), Y11
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R13)
+	VMOVDQU Y1, 32(R13)
+	ADDQ    $0x40, R13
+	VMOVDQU Y2, (R12)
+	VMOVDQU Y3, 32(R12)
+	ADDQ    $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x2_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x3(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 56 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x3_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R14
+	MOVQ  48(R12), R12
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R12
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_8x3_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y0
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y1
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y2
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y6
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y6
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 960(CX), Y4
+	VMOVDQU 992(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1024(CX), Y4
+	VMOVDQU 1056(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1088(CX), Y4
+	VMOVDQU 1120(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 6 to 3 outputs
+	VMOVDQU (R11), Y6
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1152(CX), Y4
+	VMOVDQU 1184(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1216(CX), Y4
+	VMOVDQU 1248(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1280(CX), Y4
+	VMOVDQU 1312(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 7 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1344(CX), Y4
+	VMOVDQU 1376(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1408(CX), Y4
+	VMOVDQU 1440(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1472(CX), Y4
+	VMOVDQU 1504(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y1, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x3_loop
+	VZEROUPPER
+
+mulAvxTwo_8x3_end:
+	RET
+
+// func mulAvxTwo_8x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x3_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 106 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x3_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R14
+	MOVQ  48(R12), R12
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R12
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_8x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y11
+	VMOVDQU 32(R9), Y13
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y11
+	VMOVDQU 32(R10), Y13
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU (R11), Y11
+	VMOVDQU 32(R11), Y13
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R13)
+	VMOVDQU Y1, 32(R13)
+	ADDQ    $0x40, R13
+	VMOVDQU Y2, (R14)
+	VMOVDQU Y3, 32(R14)
+	ADDQ    $0x40, R14
+	VMOVDQU Y4, (R12)
+	VMOVDQU Y5, 32(R12)
+	ADDQ    $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x3_64_loop
+	VZEROUPPER
+
+mulAvxTwo_8x3_64_end:
+	RET
+
+// func mulGFNI_8x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x3_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 29 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), CX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R13
+	MOVQ            48(R11), R11
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R11
+
+	// Add start offset to input
+	ADDQ R14, DX
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, CX
+
+mulGFNI_8x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z27
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z27, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z27, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z27, Z26
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z27
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z27
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z27
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z10, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z27
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (R9), Z27
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z15, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU64      (R10), Z27
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z18, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU64      (CX), Z27
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z21, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z22, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z23, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Store 3 outputs
+	VMOVDQU64 Z24, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z25, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z26, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_8x3_64_loop
+	VZEROUPPER
+
+mulGFNI_8x3_64_end:
+	RET
+
+// func mulGFNI_8x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x3_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 29 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), CX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R13
+	MOVQ            48(R11), R11
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R11
+
+	// Add start offset to input
+	ADDQ R14, DX
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, CX
+
+mulGFNI_8x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (R12), Z24
+	VMOVDQU64 (R13), Z25
+	VMOVDQU64 (R11), Z26
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z27
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z27
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z27
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z27
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z10, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z27
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (R9), Z27
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z15, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU64      (R10), Z27
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z18, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU64      (CX), Z27
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z21, Z27, Z28
+	VXORPD         Z24, Z28, Z24
+	VGF2P8AFFINEQB $0x00, Z22, Z27, Z28
+	VXORPD         Z25, Z28, Z25
+	VGF2P8AFFINEQB $0x00, Z23, Z27, Z28
+	VXORPD         Z26, Z28, Z26
+
+	// Store 3 outputs
+	VMOVDQU64 Z24, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z25, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z26, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_8x3_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x3Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 56 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x3Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R14
+	MOVQ  48(R12), R12
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R12
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_8x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R13), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU (R14), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y6
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y6
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 960(CX), Y4
+	VMOVDQU 992(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1024(CX), Y4
+	VMOVDQU 1056(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1088(CX), Y4
+	VMOVDQU 1120(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 6 to 3 outputs
+	VMOVDQU (R11), Y6
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1152(CX), Y4
+	VMOVDQU 1184(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1216(CX), Y4
+	VMOVDQU 1248(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1280(CX), Y4
+	VMOVDQU 1312(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 7 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1344(CX), Y4
+	VMOVDQU 1376(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1408(CX), Y4
+	VMOVDQU 1440(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1472(CX), Y4
+	VMOVDQU 1504(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y1, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x3Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x3Xor_end:
+	RET
+
+// func mulAvxTwo_8x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x3_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 106 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R14
+	MOVQ  48(R12), R12
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R12
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_8x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R13), Y0
+	VMOVDQU 32(R13), Y1
+	VMOVDQU (R14), Y2
+	VMOVDQU 32(R14), Y3
+	VMOVDQU (R12), Y4
+	VMOVDQU 32(R12), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y11
+	VMOVDQU 32(R9), Y13
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y11
+	VMOVDQU 32(R10), Y13
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU (R11), Y11
+	VMOVDQU 32(R11), Y13
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R13)
+	VMOVDQU Y1, 32(R13)
+	ADDQ    $0x40, R13
+	VMOVDQU Y2, (R14)
+	VMOVDQU Y3, 32(R14)
+	ADDQ    $0x40, R14
+	VMOVDQU Y4, (R12)
+	VMOVDQU Y5, 32(R12)
+	ADDQ    $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x4(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 73 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R14
+	MOVQ  48(R12), R15
+	MOVQ  72(R12), R12
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R12
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_8x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R9), Y7
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (R10), Y7
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 6 to 4 outputs
+	VMOVDQU (R11), Y7
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1536(CX), Y5
+	VMOVDQU 1568(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1600(CX), Y5
+	VMOVDQU 1632(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1664(CX), Y5
+	VMOVDQU 1696(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1728(CX), Y5
+	VMOVDQU 1760(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 7 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1792(CX), Y5
+	VMOVDQU 1824(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1856(CX), Y5
+	VMOVDQU 1888(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1920(CX), Y5
+	VMOVDQU 1952(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1984(CX), Y5
+	VMOVDQU 2016(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y1, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y2, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x4_loop
+	VZEROUPPER
+
+mulAvxTwo_8x4_end:
+	RET
+
+// func mulGFNI_8x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x4_64(SB), $8-88
+	// Loading 26 of 32 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R13
+	MOVQ            24(R12), R14
+	MOVQ            48(R12), R15
+	MOVQ            72(R12), R12
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R12
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, DX
+
+mulGFNI_8x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 4 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB      $0x00, Z24, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z25, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 4 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 4 outputs
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_8x4_64_loop
+	VZEROUPPER
+
+mulGFNI_8x4_64_end:
+	RET
+
+// func mulGFNI_8x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x4_64Xor(SB), $8-88
+	// Loading 26 of 32 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R13
+	MOVQ            24(R12), R14
+	MOVQ            48(R12), R15
+	MOVQ            72(R12), R12
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R12
+
+	// Add start offset to input
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, DX
+
+mulGFNI_8x4_64Xor_loop:
+	// Load 4 outputs
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R12), Z29
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 4 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB      $0x00, Z24, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z25, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 4 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 4 outputs
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_8x4_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x4_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x4Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 73 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x4Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R14
+	MOVQ  48(R12), R15
+	MOVQ  72(R12), R12
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R12
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_8x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R13), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU (R14), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU (R15), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R9), Y7
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (R10), Y7
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 6 to 4 outputs
+	VMOVDQU (R11), Y7
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1536(CX), Y5
+	VMOVDQU 1568(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1600(CX), Y5
+	VMOVDQU 1632(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1664(CX), Y5
+	VMOVDQU 1696(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1728(CX), Y5
+	VMOVDQU 1760(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 7 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1792(CX), Y5
+	VMOVDQU 1824(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1856(CX), Y5
+	VMOVDQU 1888(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1920(CX), Y5
+	VMOVDQU 1952(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1984(CX), Y5
+	VMOVDQU 2016(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y1, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y2, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_8x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x4Xor_end:
+	RET
+
+// func mulAvxTwo_8x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x5(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 90 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x5_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), R8
+	MOVQ  120(AX), R9
+	MOVQ  144(AX), R10
+	MOVQ  168(AX), AX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R14
+	MOVQ  72(R11), R15
+	MOVQ  96(R11), R11
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R11
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X5
+	VPBROADCASTB X5, Y5
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_8x5_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y4
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R8), Y8
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (R9), Y8
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 6 to 5 outputs
+	VMOVDQU (R10), Y8
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1920(CX), Y6
+	VMOVDQU 1952(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1984(CX), Y6
+	VMOVDQU 2016(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2048(CX), Y6
+	VMOVDQU 2080(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2112(CX), Y6
+	VMOVDQU 2144(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2176(CX), Y6
+	VMOVDQU 2208(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 7 to 5 outputs
+	VMOVDQU (AX), Y8
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2240(CX), Y6
+	VMOVDQU 2272(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2304(CX), Y6
+	VMOVDQU 2336(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2368(CX), Y6
+	VMOVDQU 2400(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2432(CX), Y6
+	VMOVDQU 2464(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2496(CX), Y6
+	VMOVDQU 2528(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y2, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y3, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_8x5_loop
+	VZEROUPPER
+
+mulAvxTwo_8x5_end:
+	RET
+
+// func mulGFNI_8x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x5_64(SB), $8-88
+	// Loading 25 of 40 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 47 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), R9
+	MOVQ            144(AX), R10
+	MOVQ            168(AX), AX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R13
+	MOVQ            48(R11), R14
+	MOVQ            72(R11), R15
+	MOVQ            96(R11), R11
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R11
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_8x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 5 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 5 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_8x5_64_loop
+	VZEROUPPER
+
+mulGFNI_8x5_64_end:
+	RET
+
+// func mulGFNI_8x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x5_64Xor(SB), $8-88
+	// Loading 25 of 40 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 47 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), R9
+	MOVQ            144(AX), R10
+	MOVQ            168(AX), AX
+	MOVQ            out_base+48(FP), R11
+	MOVQ            out_base+48(FP), R11
+	MOVQ            (R11), R12
+	MOVQ            24(R11), R13
+	MOVQ            48(R11), R14
+	MOVQ            72(R11), R15
+	MOVQ            96(R11), R11
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R11
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_8x5_64Xor_loop:
+	// Load 5 outputs
+	VMOVDQU64 (R12), Z25
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R11), Z29
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 5 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 5 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	VMOVDQU64 Z25, (R12)
+	ADDQ      $0x40, R12
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R11)
+	ADDQ      $0x40, R11
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_8x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x5Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 90 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x5Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), R8
+	MOVQ  120(AX), R9
+	MOVQ  144(AX), R10
+	MOVQ  168(AX), AX
+	MOVQ  out_base+48(FP), R11
+	MOVQ  (R11), R12
+	MOVQ  24(R11), R13
+	MOVQ  48(R11), R14
+	MOVQ  72(R11), R15
+	MOVQ  96(R11), R11
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R11
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X5
+	VPBROADCASTB X5, Y5
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_8x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (R12), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU (R13), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU (R14), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU (R15), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R8), Y8
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (R9), Y8
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 6 to 5 outputs
+	VMOVDQU (R10), Y8
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1920(CX), Y6
+	VMOVDQU 1952(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1984(CX), Y6
+	VMOVDQU 2016(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2048(CX), Y6
+	VMOVDQU 2080(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2112(CX), Y6
+	VMOVDQU 2144(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2176(CX), Y6
+	VMOVDQU 2208(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 7 to 5 outputs
+	VMOVDQU (AX), Y8
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2240(CX), Y6
+	VMOVDQU 2272(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2304(CX), Y6
+	VMOVDQU 2336(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2368(CX), Y6
+	VMOVDQU 2400(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2432(CX), Y6
+	VMOVDQU 2464(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2496(CX), Y6
+	VMOVDQU 2528(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y2, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y3, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_8x5Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x5Xor_end:
+	RET
+
+// func mulAvxTwo_8x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x6(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 107 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x6_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_8x6_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y3
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y5
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (R8), Y9
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (R9), Y9
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 5 to 6 outputs
+	VMOVDQU (R10), Y9
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 6 to 6 outputs
+	VMOVDQU (R11), Y9
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2304(CX), Y7
+	VMOVDQU 2336(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2368(CX), Y7
+	VMOVDQU 2400(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2432(CX), Y7
+	VMOVDQU 2464(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2496(CX), Y7
+	VMOVDQU 2528(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2560(CX), Y7
+	VMOVDQU 2592(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2624(CX), Y7
+	VMOVDQU 2656(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 7 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2688(CX), Y7
+	VMOVDQU 2720(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2752(CX), Y7
+	VMOVDQU 2784(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2816(CX), Y7
+	VMOVDQU 2848(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2880(CX), Y7
+	VMOVDQU 2912(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2944(CX), Y7
+	VMOVDQU 2976(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3008(CX), Y7
+	VMOVDQU 3040(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x6_loop
+	VZEROUPPER
+
+mulAvxTwo_8x6_end:
+	RET
+
+// func mulGFNI_8x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x6_64(SB), $0-88
+	// Loading 24 of 48 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 56 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 6 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 6 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x6_64_loop
+	VZEROUPPER
+
+mulGFNI_8x6_64_end:
+	RET
+
+// func mulGFNI_8x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x6_64Xor(SB), $0-88
+	// Loading 24 of 48 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 56 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x6_64Xor_loop:
+	// Load 6 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 (R14)(R13*1), Z24
+	MOVQ      24(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z25
+	MOVQ      48(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z26
+	MOVQ      72(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z27
+	MOVQ      96(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z28
+	MOVQ      120(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z29
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 6 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 6 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x6_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x6_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x6Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 107 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_8x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	MOVQ    (R12), R14
+	VMOVDQU (R14)(R13*1), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	MOVQ    24(R12), R14
+	VMOVDQU (R14)(R13*1), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	MOVQ    48(R12), R14
+	VMOVDQU (R14)(R13*1), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	MOVQ    72(R12), R14
+	VMOVDQU (R14)(R13*1), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	MOVQ    96(R12), R14
+	VMOVDQU (R14)(R13*1), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	MOVQ    120(R12), R14
+	VMOVDQU (R14)(R13*1), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (R8), Y9
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (R9), Y9
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 5 to 6 outputs
+	VMOVDQU (R10), Y9
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 6 to 6 outputs
+	VMOVDQU (R11), Y9
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2304(CX), Y7
+	VMOVDQU 2336(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2368(CX), Y7
+	VMOVDQU 2400(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2432(CX), Y7
+	VMOVDQU 2464(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2496(CX), Y7
+	VMOVDQU 2528(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2560(CX), Y7
+	VMOVDQU 2592(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2624(CX), Y7
+	VMOVDQU 2656(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 7 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2688(CX), Y7
+	VMOVDQU 2720(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2752(CX), Y7
+	VMOVDQU 2784(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2816(CX), Y7
+	VMOVDQU 2848(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2880(CX), Y7
+	VMOVDQU 2912(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2944(CX), Y7
+	VMOVDQU 2976(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3008(CX), Y7
+	VMOVDQU 3040(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x6Xor_end:
+	RET
+
+// func mulAvxTwo_8x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x7(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 124 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x7_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_8x7_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y0
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y1
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y2
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y3
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y4
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y5
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y6
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R9), Y10
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (R10), Y10
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 6 to 7 outputs
+	VMOVDQU (R11), Y10
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2688(CX), Y8
+	VMOVDQU 2720(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2752(CX), Y8
+	VMOVDQU 2784(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2816(CX), Y8
+	VMOVDQU 2848(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2880(CX), Y8
+	VMOVDQU 2912(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2944(CX), Y8
+	VMOVDQU 2976(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3008(CX), Y8
+	VMOVDQU 3040(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3072(CX), Y8
+	VMOVDQU 3104(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 7 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3136(CX), Y8
+	VMOVDQU 3168(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3200(CX), Y8
+	VMOVDQU 3232(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3264(CX), Y8
+	VMOVDQU 3296(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3328(CX), Y8
+	VMOVDQU 3360(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3392(CX), Y8
+	VMOVDQU 3424(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3456(CX), Y8
+	VMOVDQU 3488(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3520(CX), Y8
+	VMOVDQU 3552(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+	MOVQ    144(R12), R14
+	VMOVDQU Y6, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x7_loop
+	VZEROUPPER
+
+mulAvxTwo_8x7_end:
+	RET
+
+// func mulGFNI_8x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x7_64(SB), $0-88
+	// Loading 23 of 56 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 65 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 7 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 7 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z23, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      144(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x7_64_loop
+	VZEROUPPER
+
+mulGFNI_8x7_64_end:
+	RET
+
+// func mulGFNI_8x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x7_64Xor(SB), $0-88
+	// Loading 23 of 56 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 65 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x7_64Xor_loop:
+	// Load 7 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 (R14)(R13*1), Z23
+	MOVQ      24(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z24
+	MOVQ      48(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z25
+	MOVQ      72(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z26
+	MOVQ      96(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z27
+	MOVQ      120(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z28
+	MOVQ      144(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z29
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 7 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 7 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z23, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      144(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x7_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x7_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x7Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 124 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x7Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_8x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	MOVQ    (R12), R14
+	VMOVDQU (R14)(R13*1), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	MOVQ    24(R12), R14
+	VMOVDQU (R14)(R13*1), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	MOVQ    48(R12), R14
+	VMOVDQU (R14)(R13*1), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	MOVQ    72(R12), R14
+	VMOVDQU (R14)(R13*1), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	MOVQ    96(R12), R14
+	VMOVDQU (R14)(R13*1), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	MOVQ    120(R12), R14
+	VMOVDQU (R14)(R13*1), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	MOVQ    144(R12), R14
+	VMOVDQU (R14)(R13*1), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R9), Y10
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (R10), Y10
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 6 to 7 outputs
+	VMOVDQU (R11), Y10
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2688(CX), Y8
+	VMOVDQU 2720(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2752(CX), Y8
+	VMOVDQU 2784(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2816(CX), Y8
+	VMOVDQU 2848(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2880(CX), Y8
+	VMOVDQU 2912(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2944(CX), Y8
+	VMOVDQU 2976(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3008(CX), Y8
+	VMOVDQU 3040(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3072(CX), Y8
+	VMOVDQU 3104(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 7 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3136(CX), Y8
+	VMOVDQU 3168(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3200(CX), Y8
+	VMOVDQU 3232(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3264(CX), Y8
+	VMOVDQU 3296(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3328(CX), Y8
+	VMOVDQU 3360(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3392(CX), Y8
+	VMOVDQU 3424(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3456(CX), Y8
+	VMOVDQU 3488(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3520(CX), Y8
+	VMOVDQU 3552(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+	MOVQ    144(R12), R14
+	VMOVDQU Y6, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x7Xor_end:
+	RET
+
+// func mulAvxTwo_8x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x8(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 141 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x8_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_8x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (R8), Y11
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (R9), Y11
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (R10), Y11
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 6 to 8 outputs
+	VMOVDQU (R11), Y11
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3072(CX), Y9
+	VMOVDQU 3104(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3136(CX), Y9
+	VMOVDQU 3168(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3200(CX), Y9
+	VMOVDQU 3232(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3264(CX), Y9
+	VMOVDQU 3296(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3328(CX), Y9
+	VMOVDQU 3360(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3392(CX), Y9
+	VMOVDQU 3424(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3456(CX), Y9
+	VMOVDQU 3488(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3520(CX), Y9
+	VMOVDQU 3552(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 7 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3584(CX), Y9
+	VMOVDQU 3616(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3648(CX), Y9
+	VMOVDQU 3680(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3712(CX), Y9
+	VMOVDQU 3744(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3776(CX), Y9
+	VMOVDQU 3808(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3840(CX), Y9
+	VMOVDQU 3872(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3904(CX), Y9
+	VMOVDQU 3936(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3968(CX), Y9
+	VMOVDQU 4000(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4032(CX), Y9
+	VMOVDQU 4064(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+	MOVQ    144(R12), R14
+	VMOVDQU Y6, (R14)(R13*1)
+	MOVQ    168(R12), R14
+	VMOVDQU Y7, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x8_loop
+	VZEROUPPER
+
+mulAvxTwo_8x8_end:
+	RET
+
+// func mulGFNI_8x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x8_64(SB), $0-88
+	// Loading 22 of 64 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 74 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 8 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z22, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z23, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      144(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      168(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x8_64_loop
+	VZEROUPPER
+
+mulGFNI_8x8_64_end:
+	RET
+
+// func mulGFNI_8x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x8_64Xor(SB), $0-88
+	// Loading 22 of 64 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 74 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x8_64Xor_loop:
+	// Load 8 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 (R14)(R13*1), Z22
+	MOVQ      24(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z23
+	MOVQ      48(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z24
+	MOVQ      72(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z25
+	MOVQ      96(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z26
+	MOVQ      120(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z27
+	MOVQ      144(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z28
+	MOVQ      168(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z29
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 8 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z22, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z23, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      144(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      168(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x8Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 141 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_8x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	MOVQ    (R12), R14
+	VMOVDQU (R14)(R13*1), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	MOVQ    24(R12), R14
+	VMOVDQU (R14)(R13*1), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	MOVQ    48(R12), R14
+	VMOVDQU (R14)(R13*1), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	MOVQ    72(R12), R14
+	VMOVDQU (R14)(R13*1), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	MOVQ    96(R12), R14
+	VMOVDQU (R14)(R13*1), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	MOVQ    120(R12), R14
+	VMOVDQU (R14)(R13*1), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	MOVQ    144(R12), R14
+	VMOVDQU (R14)(R13*1), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	MOVQ    168(R12), R14
+	VMOVDQU (R14)(R13*1), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (R8), Y11
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (R9), Y11
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (R10), Y11
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 6 to 8 outputs
+	VMOVDQU (R11), Y11
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3072(CX), Y9
+	VMOVDQU 3104(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3136(CX), Y9
+	VMOVDQU 3168(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3200(CX), Y9
+	VMOVDQU 3232(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3264(CX), Y9
+	VMOVDQU 3296(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3328(CX), Y9
+	VMOVDQU 3360(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3392(CX), Y9
+	VMOVDQU 3424(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3456(CX), Y9
+	VMOVDQU 3488(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3520(CX), Y9
+	VMOVDQU 3552(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 7 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3584(CX), Y9
+	VMOVDQU 3616(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3648(CX), Y9
+	VMOVDQU 3680(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3712(CX), Y9
+	VMOVDQU 3744(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3776(CX), Y9
+	VMOVDQU 3808(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3840(CX), Y9
+	VMOVDQU 3872(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3904(CX), Y9
+	VMOVDQU 3936(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3968(CX), Y9
+	VMOVDQU 4000(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4032(CX), Y9
+	VMOVDQU 4064(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+	MOVQ    144(R12), R14
+	VMOVDQU Y6, (R14)(R13*1)
+	MOVQ    168(R12), R14
+	VMOVDQU Y7, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x8Xor_end:
+	RET
+
+// func mulAvxTwo_8x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x9(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 158 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x9_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_8x9_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y2
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y3
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y4
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y5
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y6
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y7
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y8
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (R10), Y12
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2880(CX), Y10
+	VMOVDQU 2912(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 6 to 9 outputs
+	VMOVDQU (R11), Y12
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 3456(CX), Y10
+	VMOVDQU 3488(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 3520(CX), Y10
+	VMOVDQU 3552(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3584(CX), Y10
+	VMOVDQU 3616(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3648(CX), Y10
+	VMOVDQU 3680(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3712(CX), Y10
+	VMOVDQU 3744(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3776(CX), Y10
+	VMOVDQU 3808(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3840(CX), Y10
+	VMOVDQU 3872(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3904(CX), Y10
+	VMOVDQU 3936(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3968(CX), Y10
+	VMOVDQU 4000(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 7 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4032(CX), Y10
+	VMOVDQU 4064(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4096(CX), Y10
+	VMOVDQU 4128(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4160(CX), Y10
+	VMOVDQU 4192(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4224(CX), Y10
+	VMOVDQU 4256(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4288(CX), Y10
+	VMOVDQU 4320(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4352(CX), Y10
+	VMOVDQU 4384(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4416(CX), Y10
+	VMOVDQU 4448(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 4480(CX), Y10
+	VMOVDQU 4512(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 4544(CX), Y10
+	VMOVDQU 4576(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+	MOVQ    144(R12), R14
+	VMOVDQU Y6, (R14)(R13*1)
+	MOVQ    168(R12), R14
+	VMOVDQU Y7, (R14)(R13*1)
+	MOVQ    192(R12), R14
+	VMOVDQU Y8, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x9_loop
+	VZEROUPPER
+
+mulAvxTwo_8x9_end:
+	RET
+
+// func mulGFNI_8x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x9_64(SB), $0-88
+	// Loading 21 of 72 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 83 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 9 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z21, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z22, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z23, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      144(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      168(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      192(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x9_64_loop
+	VZEROUPPER
+
+mulGFNI_8x9_64_end:
+	RET
+
+// func mulGFNI_8x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x9_64Xor(SB), $0-88
+	// Loading 21 of 72 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 83 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x9_64Xor_loop:
+	// Load 9 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 (R14)(R13*1), Z21
+	MOVQ      24(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z22
+	MOVQ      48(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z23
+	MOVQ      72(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z24
+	MOVQ      96(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z25
+	MOVQ      120(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z26
+	MOVQ      144(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z27
+	MOVQ      168(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z28
+	MOVQ      192(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z29
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 9 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z21, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z22, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z23, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      144(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      168(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      192(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x9Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 158 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x9Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_8x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	MOVQ    (R12), R14
+	VMOVDQU (R14)(R13*1), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	MOVQ    24(R12), R14
+	VMOVDQU (R14)(R13*1), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	MOVQ    48(R12), R14
+	VMOVDQU (R14)(R13*1), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	MOVQ    72(R12), R14
+	VMOVDQU (R14)(R13*1), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	MOVQ    96(R12), R14
+	VMOVDQU (R14)(R13*1), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	MOVQ    120(R12), R14
+	VMOVDQU (R14)(R13*1), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	MOVQ    144(R12), R14
+	VMOVDQU (R14)(R13*1), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	MOVQ    168(R12), R14
+	VMOVDQU (R14)(R13*1), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	MOVQ    192(R12), R14
+	VMOVDQU (R14)(R13*1), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (R10), Y12
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2880(CX), Y10
+	VMOVDQU 2912(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 6 to 9 outputs
+	VMOVDQU (R11), Y12
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 3456(CX), Y10
+	VMOVDQU 3488(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 3520(CX), Y10
+	VMOVDQU 3552(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3584(CX), Y10
+	VMOVDQU 3616(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3648(CX), Y10
+	VMOVDQU 3680(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3712(CX), Y10
+	VMOVDQU 3744(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3776(CX), Y10
+	VMOVDQU 3808(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3840(CX), Y10
+	VMOVDQU 3872(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3904(CX), Y10
+	VMOVDQU 3936(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3968(CX), Y10
+	VMOVDQU 4000(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 7 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4032(CX), Y10
+	VMOVDQU 4064(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4096(CX), Y10
+	VMOVDQU 4128(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4160(CX), Y10
+	VMOVDQU 4192(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4224(CX), Y10
+	VMOVDQU 4256(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4288(CX), Y10
+	VMOVDQU 4320(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4352(CX), Y10
+	VMOVDQU 4384(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4416(CX), Y10
+	VMOVDQU 4448(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 4480(CX), Y10
+	VMOVDQU 4512(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 4544(CX), Y10
+	VMOVDQU 4576(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+	MOVQ    144(R12), R14
+	VMOVDQU Y6, (R14)(R13*1)
+	MOVQ    168(R12), R14
+	VMOVDQU Y7, (R14)(R13*1)
+	MOVQ    192(R12), R14
+	VMOVDQU Y8, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x9Xor_end:
+	RET
+
+// func mulAvxTwo_8x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x10(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 175 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x10_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_8x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (R9), Y13
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 5 to 10 outputs
+	VMOVDQU (R10), Y13
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3200(CX), Y11
+	VMOVDQU 3232(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3264(CX), Y11
+	VMOVDQU 3296(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3328(CX), Y11
+	VMOVDQU 3360(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 3392(CX), Y11
+	VMOVDQU 3424(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 3456(CX), Y11
+	VMOVDQU 3488(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 3520(CX), Y11
+	VMOVDQU 3552(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 3584(CX), Y11
+	VMOVDQU 3616(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3648(CX), Y11
+	VMOVDQU 3680(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3712(CX), Y11
+	VMOVDQU 3744(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3776(CX), Y11
+	VMOVDQU 3808(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 6 to 10 outputs
+	VMOVDQU (R11), Y13
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3840(CX), Y11
+	VMOVDQU 3872(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3904(CX), Y11
+	VMOVDQU 3936(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3968(CX), Y11
+	VMOVDQU 4000(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4032(CX), Y11
+	VMOVDQU 4064(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4096(CX), Y11
+	VMOVDQU 4128(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4160(CX), Y11
+	VMOVDQU 4192(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4224(CX), Y11
+	VMOVDQU 4256(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4288(CX), Y11
+	VMOVDQU 4320(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4352(CX), Y11
+	VMOVDQU 4384(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 4416(CX), Y11
+	VMOVDQU 4448(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 7 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 4480(CX), Y11
+	VMOVDQU 4512(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 4544(CX), Y11
+	VMOVDQU 4576(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 4608(CX), Y11
+	VMOVDQU 4640(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4672(CX), Y11
+	VMOVDQU 4704(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4736(CX), Y11
+	VMOVDQU 4768(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4800(CX), Y11
+	VMOVDQU 4832(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4864(CX), Y11
+	VMOVDQU 4896(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4928(CX), Y11
+	VMOVDQU 4960(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4992(CX), Y11
+	VMOVDQU 5024(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 5056(CX), Y11
+	VMOVDQU 5088(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 10 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+	MOVQ    144(R12), R14
+	VMOVDQU Y6, (R14)(R13*1)
+	MOVQ    168(R12), R14
+	VMOVDQU Y7, (R14)(R13*1)
+	MOVQ    192(R12), R14
+	VMOVDQU Y8, (R14)(R13*1)
+	MOVQ    216(R12), R14
+	VMOVDQU Y9, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x10_loop
+	VZEROUPPER
+
+mulAvxTwo_8x10_end:
+	RET
+
+// func mulGFNI_8x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x10_64(SB), $0-88
+	// Loading 20 of 80 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 92 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 10 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z20, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z21, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z22, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z23, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      144(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      168(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      192(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      216(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x10_64_loop
+	VZEROUPPER
+
+mulGFNI_8x10_64_end:
+	RET
+
+// func mulGFNI_8x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_8x10_64Xor(SB), $0-88
+	// Loading 20 of 80 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 92 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_8x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), DX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, DX
+
+mulGFNI_8x10_64Xor_loop:
+	// Load 10 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 (R14)(R13*1), Z20
+	MOVQ      24(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z21
+	MOVQ      48(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z22
+	MOVQ      72(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z23
+	MOVQ      96(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z24
+	MOVQ      120(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z25
+	MOVQ      144(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z26
+	MOVQ      168(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z27
+	MOVQ      192(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z28
+	MOVQ      216(R12), R14
+	VMOVDQU64 (R14)(R13*1), Z29
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 10 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R12), R14
+	VMOVDQU64 Z20, (R14)(R13*1)
+	MOVQ      24(R12), R14
+	VMOVDQU64 Z21, (R14)(R13*1)
+	MOVQ      48(R12), R14
+	VMOVDQU64 Z22, (R14)(R13*1)
+	MOVQ      72(R12), R14
+	VMOVDQU64 Z23, (R14)(R13*1)
+	MOVQ      96(R12), R14
+	VMOVDQU64 Z24, (R14)(R13*1)
+	MOVQ      120(R12), R14
+	VMOVDQU64 Z25, (R14)(R13*1)
+	MOVQ      144(R12), R14
+	VMOVDQU64 Z26, (R14)(R13*1)
+	MOVQ      168(R12), R14
+	VMOVDQU64 Z27, (R14)(R13*1)
+	MOVQ      192(R12), R14
+	VMOVDQU64 Z28, (R14)(R13*1)
+	MOVQ      216(R12), R14
+	VMOVDQU64 Z29, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R13
+	DECQ AX
+	JNZ  mulGFNI_8x10_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_8x10_64Xor_end:
+	RET
+
+// func mulAvxTwo_8x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_8x10Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 175 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_8x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), DX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, R9
+	ADDQ         R13, R10
+	ADDQ         R13, R11
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_8x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	MOVQ    (R12), R14
+	VMOVDQU (R14)(R13*1), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	MOVQ    24(R12), R14
+	VMOVDQU (R14)(R13*1), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	MOVQ    48(R12), R14
+	VMOVDQU (R14)(R13*1), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	MOVQ    72(R12), R14
+	VMOVDQU (R14)(R13*1), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	MOVQ    96(R12), R14
+	VMOVDQU (R14)(R13*1), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	MOVQ    120(R12), R14
+	VMOVDQU (R14)(R13*1), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	MOVQ    144(R12), R14
+	VMOVDQU (R14)(R13*1), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	MOVQ    168(R12), R14
+	VMOVDQU (R14)(R13*1), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	MOVQ    192(R12), R14
+	VMOVDQU (R14)(R13*1), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	MOVQ    216(R12), R14
+	VMOVDQU (R14)(R13*1), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (R9), Y13
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 5 to 10 outputs
+	VMOVDQU (R10), Y13
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3200(CX), Y11
+	VMOVDQU 3232(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3264(CX), Y11
+	VMOVDQU 3296(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3328(CX), Y11
+	VMOVDQU 3360(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 3392(CX), Y11
+	VMOVDQU 3424(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 3456(CX), Y11
+	VMOVDQU 3488(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 3520(CX), Y11
+	VMOVDQU 3552(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 3584(CX), Y11
+	VMOVDQU 3616(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3648(CX), Y11
+	VMOVDQU 3680(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3712(CX), Y11
+	VMOVDQU 3744(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3776(CX), Y11
+	VMOVDQU 3808(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 6 to 10 outputs
+	VMOVDQU (R11), Y13
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3840(CX), Y11
+	VMOVDQU 3872(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3904(CX), Y11
+	VMOVDQU 3936(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3968(CX), Y11
+	VMOVDQU 4000(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4032(CX), Y11
+	VMOVDQU 4064(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4096(CX), Y11
+	VMOVDQU 4128(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4160(CX), Y11
+	VMOVDQU 4192(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4224(CX), Y11
+	VMOVDQU 4256(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4288(CX), Y11
+	VMOVDQU 4320(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4352(CX), Y11
+	VMOVDQU 4384(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 4416(CX), Y11
+	VMOVDQU 4448(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 7 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 4480(CX), Y11
+	VMOVDQU 4512(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 4544(CX), Y11
+	VMOVDQU 4576(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 4608(CX), Y11
+	VMOVDQU 4640(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4672(CX), Y11
+	VMOVDQU 4704(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4736(CX), Y11
+	VMOVDQU 4768(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4800(CX), Y11
+	VMOVDQU 4832(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4864(CX), Y11
+	VMOVDQU 4896(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4928(CX), Y11
+	VMOVDQU 4960(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4992(CX), Y11
+	VMOVDQU 5024(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 5056(CX), Y11
+	VMOVDQU 5088(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 10 outputs
+	MOVQ    (R12), R14
+	VMOVDQU Y0, (R14)(R13*1)
+	MOVQ    24(R12), R14
+	VMOVDQU Y1, (R14)(R13*1)
+	MOVQ    48(R12), R14
+	VMOVDQU Y2, (R14)(R13*1)
+	MOVQ    72(R12), R14
+	VMOVDQU Y3, (R14)(R13*1)
+	MOVQ    96(R12), R14
+	VMOVDQU Y4, (R14)(R13*1)
+	MOVQ    120(R12), R14
+	VMOVDQU Y5, (R14)(R13*1)
+	MOVQ    144(R12), R14
+	VMOVDQU Y6, (R14)(R13*1)
+	MOVQ    168(R12), R14
+	VMOVDQU Y7, (R14)(R13*1)
+	MOVQ    192(R12), R14
+	VMOVDQU Y8, (R14)(R13*1)
+	MOVQ    216(R12), R14
+	VMOVDQU Y9, (R14)(R13*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R13
+	DECQ AX
+	JNZ  mulAvxTwo_8x10Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_8x10Xor_end:
+	RET
+
+// func mulAvxTwo_9x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x1(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x1_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X1
+	VPBROADCASTB X1, Y1
+
+mulAvxTwo_9x1_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y4
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	VPXOR   Y2, Y3, Y0
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y4
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y4
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 128(CX), Y2
+	VMOVDQU 160(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y4
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 192(CX), Y2
+	VMOVDQU 224(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y4
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 256(CX), Y2
+	VMOVDQU 288(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y4
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 320(CX), Y2
+	VMOVDQU 352(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y4
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 384(CX), Y2
+	VMOVDQU 416(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 7 to 1 outputs
+	VMOVDQU (R12), Y4
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 448(CX), Y2
+	VMOVDQU 480(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 8 to 1 outputs
+	VMOVDQU (DX), Y4
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 512(CX), Y2
+	VMOVDQU 544(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x1_loop
+	VZEROUPPER
+
+mulAvxTwo_9x1_end:
+	RET
+
+// func mulAvxTwo_9x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x1_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x1_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_9x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y6
+	VMOVDQU 32(R10), Y5
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y6
+	VMOVDQU 32(R11), Y5
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU (R12), Y6
+	VMOVDQU 32(R12), Y5
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 8 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R13)
+	VMOVDQU Y1, 32(R13)
+	ADDQ    $0x40, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x1_64_loop
+	VZEROUPPER
+
+mulAvxTwo_9x1_64_end:
+	RET
+
+// func mulGFNI_9x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x1_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 12 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), CX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, CX
+
+mulGFNI_9x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z10
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z10, Z9
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z10
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z10
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z10
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z10
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (R9), Z10
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z5, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU64      (R10), Z10
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z6, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU64      (R11), Z10
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z7, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 8 to 1 outputs
+	VMOVDQU64      (CX), Z10
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z8, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Store 1 outputs
+	VMOVDQU64 Z9, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_9x1_64_loop
+	VZEROUPPER
+
+mulGFNI_9x1_64_end:
+	RET
+
+// func mulGFNI_9x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 12 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), CX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R12
+	MOVQ            start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R12
+
+	// Add start offset to input
+	ADDQ R13, DX
+	ADDQ R13, BX
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, CX
+
+mulGFNI_9x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (R12), Z9
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z10
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z10
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z10
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z10
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z10
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (R9), Z10
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z5, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU64      (R10), Z10
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z6, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU64      (R11), Z10
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z7, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Load and process 64 bytes from input 8 to 1 outputs
+	VMOVDQU64      (CX), Z10
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z8, Z10, Z10
+	VXORPD         Z9, Z10, Z9
+
+	// Store 1 outputs
+	VMOVDQU64 Z9, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_9x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x1Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x1Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X1
+	VPBROADCASTB X1, Y1
+
+mulAvxTwo_9x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y4
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU (R13), Y0
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y4
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y4
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 128(CX), Y2
+	VMOVDQU 160(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y4
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 192(CX), Y2
+	VMOVDQU 224(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y4
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 256(CX), Y2
+	VMOVDQU 288(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y4
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 320(CX), Y2
+	VMOVDQU 352(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y4
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 384(CX), Y2
+	VMOVDQU 416(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 7 to 1 outputs
+	VMOVDQU (R12), Y4
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 448(CX), Y2
+	VMOVDQU 480(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 8 to 1 outputs
+	VMOVDQU (DX), Y4
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 512(CX), Y2
+	VMOVDQU 544(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x1Xor_end:
+	RET
+
+// func mulAvxTwo_9x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x1_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x1_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_9x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (R13), Y0
+	VMOVDQU 32(R13), Y1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y6
+	VMOVDQU 32(R10), Y5
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y6
+	VMOVDQU 32(R11), Y5
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU (R12), Y6
+	VMOVDQU 32(R12), Y5
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 8 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R13)
+	VMOVDQU Y1, 32(R13)
+	ADDQ    $0x40, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x1_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x2(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 43 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x2_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R13
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+	ADDQ R15, R13
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_9x2_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y1
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y5
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y5
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 640(CX), Y3
+	VMOVDQU 672(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 704(CX), Y3
+	VMOVDQU 736(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y5
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 768(CX), Y3
+	VMOVDQU 800(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 832(CX), Y3
+	VMOVDQU 864(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 7 to 2 outputs
+	VMOVDQU (R12), Y5
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 896(CX), Y3
+	VMOVDQU 928(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 960(CX), Y3
+	VMOVDQU 992(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 8 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 1024(CX), Y3
+	VMOVDQU 1056(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 1088(CX), Y3
+	VMOVDQU 1120(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x2_loop
+	VZEROUPPER
+
+mulAvxTwo_9x2_end:
+	RET
+
+// func mulAvxTwo_9x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x2_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 81 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x2_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R13
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+	ADDQ R15, R13
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_9x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y9
+	VMOVDQU 32(R10), Y11
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y9
+	VMOVDQU 32(R11), Y11
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU (R12), Y9
+	VMOVDQU 32(R12), Y11
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 8 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R14)
+	VMOVDQU Y1, 32(R14)
+	ADDQ    $0x40, R14
+	VMOVDQU Y2, (R13)
+	VMOVDQU Y3, 32(R13)
+	ADDQ    $0x40, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x2_64_loop
+	VZEROUPPER
+
+mulAvxTwo_9x2_64_end:
+	RET
+
+// func mulGFNI_9x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x2_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), CX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R13
+	MOVQ            24(R12), R12
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+	ADDQ R14, R12
+
+	// Add start offset to input
+	ADDQ R14, DX
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, CX
+
+mulGFNI_9x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z20
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z20, Z18
+	VGF2P8AFFINEQB $0x00, Z1, Z20, Z19
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z20
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z3, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z20
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z5, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z20
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z7, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z20
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z9, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (R9), Z20
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z10, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z11, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU64      (R10), Z20
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z12, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z13, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU64      (R11), Z20
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z14, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z15, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 8 to 2 outputs
+	VMOVDQU64      (CX), Z20
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z16, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z17, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Store 2 outputs
+	VMOVDQU64 Z18, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z19, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_9x2_64_loop
+	VZEROUPPER
+
+mulGFNI_9x2_64_end:
+	RET
+
+// func mulGFNI_9x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), CX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R13
+	MOVQ            24(R12), R12
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+	ADDQ R14, R12
+
+	// Add start offset to input
+	ADDQ R14, DX
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, CX
+
+mulGFNI_9x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (R13), Z18
+	VMOVDQU64 (R12), Z19
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z20
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z1, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z20
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z3, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z20
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z5, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z20
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z7, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z20
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z9, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (R9), Z20
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z10, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z11, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU64      (R10), Z20
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z12, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z13, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU64      (R11), Z20
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z14, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z15, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Load and process 64 bytes from input 8 to 2 outputs
+	VMOVDQU64      (CX), Z20
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z16, Z20, Z21
+	VXORPD         Z18, Z21, Z18
+	VGF2P8AFFINEQB $0x00, Z17, Z20, Z21
+	VXORPD         Z19, Z21, Z19
+
+	// Store 2 outputs
+	VMOVDQU64 Z18, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z19, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_9x2_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x2Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 43 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x2Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R13
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+	ADDQ R15, R13
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_9x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R14), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU (R13), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y5
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y5
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 640(CX), Y3
+	VMOVDQU 672(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 704(CX), Y3
+	VMOVDQU 736(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y5
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 768(CX), Y3
+	VMOVDQU 800(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 832(CX), Y3
+	VMOVDQU 864(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 7 to 2 outputs
+	VMOVDQU (R12), Y5
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 896(CX), Y3
+	VMOVDQU 928(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 960(CX), Y3
+	VMOVDQU 992(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 8 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 1024(CX), Y3
+	VMOVDQU 1056(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 1088(CX), Y3
+	VMOVDQU 1120(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y1, (R13)
+	ADDQ    $0x20, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x2Xor_end:
+	RET
+
+// func mulAvxTwo_9x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 81 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R13
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+	ADDQ R15, R13
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_9x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R14), Y0
+	VMOVDQU 32(R14), Y1
+	VMOVDQU (R13), Y2
+	VMOVDQU 32(R13), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y9
+	VMOVDQU 32(R10), Y11
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y9
+	VMOVDQU 32(R11), Y11
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU (R12), Y9
+	VMOVDQU 32(R12), Y11
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 8 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R14)
+	VMOVDQU Y1, 32(R14)
+	ADDQ    $0x40, R14
+	VMOVDQU Y2, (R13)
+	VMOVDQU Y3, 32(R13)
+	ADDQ    $0x40, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x2_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x3(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 62 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x3_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R15
+	MOVQ  48(R13), R13
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R13
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_9x3_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y0
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y1
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y2
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y6
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y6
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 960(CX), Y4
+	VMOVDQU 992(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1024(CX), Y4
+	VMOVDQU 1056(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1088(CX), Y4
+	VMOVDQU 1120(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 6 to 3 outputs
+	VMOVDQU (R11), Y6
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1152(CX), Y4
+	VMOVDQU 1184(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1216(CX), Y4
+	VMOVDQU 1248(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1280(CX), Y4
+	VMOVDQU 1312(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 7 to 3 outputs
+	VMOVDQU (R12), Y6
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1344(CX), Y4
+	VMOVDQU 1376(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1408(CX), Y4
+	VMOVDQU 1440(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1472(CX), Y4
+	VMOVDQU 1504(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 8 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1536(CX), Y4
+	VMOVDQU 1568(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1600(CX), Y4
+	VMOVDQU 1632(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1664(CX), Y4
+	VMOVDQU 1696(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y1, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y2, (R13)
+	ADDQ    $0x20, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x3_loop
+	VZEROUPPER
+
+mulAvxTwo_9x3_end:
+	RET
+
+// func mulAvxTwo_9x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x3_64(SB), $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 118 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x3_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R15
+	MOVQ  48(R13), R13
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R13
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_9x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y11
+	VMOVDQU 32(R9), Y13
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y11
+	VMOVDQU 32(R10), Y13
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU (R11), Y11
+	VMOVDQU 32(R11), Y13
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU (R12), Y11
+	VMOVDQU 32(R12), Y13
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 8 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R14)
+	VMOVDQU Y1, 32(R14)
+	ADDQ    $0x40, R14
+	VMOVDQU Y2, (R15)
+	VMOVDQU Y3, 32(R15)
+	ADDQ    $0x40, R15
+	VMOVDQU Y4, (R13)
+	VMOVDQU Y5, 32(R13)
+	ADDQ    $0x40, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x3_64_loop
+	VZEROUPPER
+
+mulAvxTwo_9x3_64_end:
+	RET
+
+// func mulGFNI_9x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x3_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 32 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	VBROADCASTF32X2 208(CX), Z26
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), CX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R13
+	MOVQ            24(R12), R14
+	MOVQ            48(R12), R12
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R12
+
+	// Add start offset to input
+	ADDQ R15, DX
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, CX
+
+mulGFNI_9x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU64      (R11), Z30
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 3 outputs
+	VMOVDQU64      (CX), Z30
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z25, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z26, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Store 3 outputs
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_9x3_64_loop
+	VZEROUPPER
+
+mulGFNI_9x3_64_end:
+	RET
+
+// func mulGFNI_9x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x3_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 32 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	VBROADCASTF32X2 208(CX), Z26
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), CX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R13
+	MOVQ            24(R12), R14
+	MOVQ            48(R12), R12
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R12
+
+	// Add start offset to input
+	ADDQ R15, DX
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, CX
+
+mulGFNI_9x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (R13), Z27
+	VMOVDQU64 (R14), Z28
+	VMOVDQU64 (R12), Z29
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU64      (R11), Z30
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 3 outputs
+	VMOVDQU64      (CX), Z30
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z25, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z26, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Store 3 outputs
+	VMOVDQU64 Z27, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z28, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z29, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_9x3_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x3Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 62 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x3Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R15
+	MOVQ  48(R13), R13
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R13
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_9x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R14), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU (R15), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU (R13), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y6
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y6
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 960(CX), Y4
+	VMOVDQU 992(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1024(CX), Y4
+	VMOVDQU 1056(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1088(CX), Y4
+	VMOVDQU 1120(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 6 to 3 outputs
+	VMOVDQU (R11), Y6
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1152(CX), Y4
+	VMOVDQU 1184(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1216(CX), Y4
+	VMOVDQU 1248(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1280(CX), Y4
+	VMOVDQU 1312(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 7 to 3 outputs
+	VMOVDQU (R12), Y6
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1344(CX), Y4
+	VMOVDQU 1376(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1408(CX), Y4
+	VMOVDQU 1440(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1472(CX), Y4
+	VMOVDQU 1504(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 8 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1536(CX), Y4
+	VMOVDQU 1568(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1600(CX), Y4
+	VMOVDQU 1632(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1664(CX), Y4
+	VMOVDQU 1696(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y1, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y2, (R13)
+	ADDQ    $0x20, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x3Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x3Xor_end:
+	RET
+
+// func mulAvxTwo_9x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x3_64Xor(SB), $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 118 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R15
+	MOVQ  48(R13), R13
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R13
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_9x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R14), Y0
+	VMOVDQU 32(R14), Y1
+	VMOVDQU (R15), Y2
+	VMOVDQU 32(R15), Y3
+	VMOVDQU (R13), Y4
+	VMOVDQU 32(R13), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (R9), Y11
+	VMOVDQU 32(R9), Y13
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU (R10), Y11
+	VMOVDQU 32(R10), Y13
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU (R11), Y11
+	VMOVDQU 32(R11), Y13
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU (R12), Y11
+	VMOVDQU 32(R12), Y13
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 8 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R14)
+	VMOVDQU Y1, 32(R14)
+	ADDQ    $0x40, R14
+	VMOVDQU Y2, (R15)
+	VMOVDQU Y3, 32(R15)
+	ADDQ    $0x40, R15
+	VMOVDQU Y4, (R13)
+	VMOVDQU Y5, 32(R13)
+	ADDQ    $0x40, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_9x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x4(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 81 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x4_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), R8
+	MOVQ  120(AX), R9
+	MOVQ  144(AX), R10
+	MOVQ  168(AX), R11
+	MOVQ  192(AX), AX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R14
+	MOVQ  48(R12), R15
+	MOVQ  72(R12), R12
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R12
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X4
+	VPBROADCASTB X4, Y4
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_9x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R8), Y7
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (R9), Y7
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 6 to 4 outputs
+	VMOVDQU (R10), Y7
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1536(CX), Y5
+	VMOVDQU 1568(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1600(CX), Y5
+	VMOVDQU 1632(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1664(CX), Y5
+	VMOVDQU 1696(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1728(CX), Y5
+	VMOVDQU 1760(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 7 to 4 outputs
+	VMOVDQU (R11), Y7
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1792(CX), Y5
+	VMOVDQU 1824(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1856(CX), Y5
+	VMOVDQU 1888(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1920(CX), Y5
+	VMOVDQU 1952(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1984(CX), Y5
+	VMOVDQU 2016(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 8 to 4 outputs
+	VMOVDQU (AX), Y7
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 2048(CX), Y5
+	VMOVDQU 2080(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 2112(CX), Y5
+	VMOVDQU 2144(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 2176(CX), Y5
+	VMOVDQU 2208(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 2240(CX), Y5
+	VMOVDQU 2272(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y1, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y2, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_9x4_loop
+	VZEROUPPER
+
+mulAvxTwo_9x4_end:
+	RET
+
+// func mulGFNI_9x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x4_64(SB), $8-88
+	// Loading 26 of 36 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), R9
+	MOVQ            144(AX), R10
+	MOVQ            168(AX), R11
+	MOVQ            192(AX), AX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R13
+	MOVQ            24(R12), R14
+	MOVQ            48(R12), R15
+	MOVQ            72(R12), R12
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R12
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_9x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 4 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB      $0x00, Z24, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z25, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 4 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 4 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 4 outputs
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_9x4_64_loop
+	VZEROUPPER
+
+mulGFNI_9x4_64_end:
+	RET
+
+// func mulGFNI_9x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x4_64Xor(SB), $8-88
+	// Loading 26 of 36 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 42 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), R9
+	MOVQ            144(AX), R10
+	MOVQ            168(AX), R11
+	MOVQ            192(AX), AX
+	MOVQ            out_base+48(FP), R12
+	MOVQ            out_base+48(FP), R12
+	MOVQ            (R12), R13
+	MOVQ            24(R12), R14
+	MOVQ            48(R12), R15
+	MOVQ            72(R12), R12
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R12
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_9x4_64Xor_loop:
+	// Load 4 outputs
+	VMOVDQU64 (R13), Z26
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R12), Z29
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 4 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB      $0x00, Z24, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z25, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 4 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 4 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 4 outputs
+	VMOVDQU64 Z26, (R13)
+	ADDQ      $0x40, R13
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R12)
+	ADDQ      $0x40, R12
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_9x4_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x4_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x4Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 81 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x4Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), R8
+	MOVQ  120(AX), R9
+	MOVQ  144(AX), R10
+	MOVQ  168(AX), R11
+	MOVQ  192(AX), AX
+	MOVQ  out_base+48(FP), R12
+	MOVQ  (R12), R13
+	MOVQ  24(R12), R14
+	MOVQ  48(R12), R15
+	MOVQ  72(R12), R12
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R12
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X4
+	VPBROADCASTB X4, Y4
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_9x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R13), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU (R14), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU (R15), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R8), Y7
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (R9), Y7
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 6 to 4 outputs
+	VMOVDQU (R10), Y7
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1536(CX), Y5
+	VMOVDQU 1568(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1600(CX), Y5
+	VMOVDQU 1632(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1664(CX), Y5
+	VMOVDQU 1696(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1728(CX), Y5
+	VMOVDQU 1760(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 7 to 4 outputs
+	VMOVDQU (R11), Y7
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1792(CX), Y5
+	VMOVDQU 1824(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1856(CX), Y5
+	VMOVDQU 1888(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1920(CX), Y5
+	VMOVDQU 1952(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1984(CX), Y5
+	VMOVDQU 2016(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 8 to 4 outputs
+	VMOVDQU (AX), Y7
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 2048(CX), Y5
+	VMOVDQU 2080(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 2112(CX), Y5
+	VMOVDQU 2144(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 2176(CX), Y5
+	VMOVDQU 2208(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 2240(CX), Y5
+	VMOVDQU 2272(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y1, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y2, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_9x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x4Xor_end:
+	RET
+
+// func mulAvxTwo_9x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x5(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 100 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x5_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_9x5_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y4
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R9), Y8
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (R10), Y8
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 6 to 5 outputs
+	VMOVDQU (R11), Y8
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1920(CX), Y6
+	VMOVDQU 1952(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1984(CX), Y6
+	VMOVDQU 2016(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2048(CX), Y6
+	VMOVDQU 2080(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2112(CX), Y6
+	VMOVDQU 2144(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2176(CX), Y6
+	VMOVDQU 2208(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 7 to 5 outputs
+	VMOVDQU (R12), Y8
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2240(CX), Y6
+	VMOVDQU 2272(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2304(CX), Y6
+	VMOVDQU 2336(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2368(CX), Y6
+	VMOVDQU 2400(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2432(CX), Y6
+	VMOVDQU 2464(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2496(CX), Y6
+	VMOVDQU 2528(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 8 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2560(CX), Y6
+	VMOVDQU 2592(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2624(CX), Y6
+	VMOVDQU 2656(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2688(CX), Y6
+	VMOVDQU 2720(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2752(CX), Y6
+	VMOVDQU 2784(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2816(CX), Y6
+	VMOVDQU 2848(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x5_loop
+	VZEROUPPER
+
+mulAvxTwo_9x5_end:
+	RET
+
+// func mulGFNI_9x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x5_64(SB), $0-88
+	// Loading 25 of 45 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 52 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 5 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 5 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 5 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x5_64_loop
+	VZEROUPPER
+
+mulGFNI_9x5_64_end:
+	RET
+
+// func mulGFNI_9x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x5_64Xor(SB), $0-88
+	// Loading 25 of 45 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 52 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x5_64Xor_loop:
+	// Load 5 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 (R15)(R14*1), Z25
+	MOVQ      24(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z26
+	MOVQ      48(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z27
+	MOVQ      72(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z28
+	MOVQ      96(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z29
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 5 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 5 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 5 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x5Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 100 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_9x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	MOVQ    (R13), R15
+	VMOVDQU (R15)(R14*1), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	MOVQ    24(R13), R15
+	VMOVDQU (R15)(R14*1), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	MOVQ    48(R13), R15
+	VMOVDQU (R15)(R14*1), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	MOVQ    72(R13), R15
+	VMOVDQU (R15)(R14*1), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	MOVQ    96(R13), R15
+	VMOVDQU (R15)(R14*1), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R9), Y8
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (R10), Y8
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 6 to 5 outputs
+	VMOVDQU (R11), Y8
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1920(CX), Y6
+	VMOVDQU 1952(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1984(CX), Y6
+	VMOVDQU 2016(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2048(CX), Y6
+	VMOVDQU 2080(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2112(CX), Y6
+	VMOVDQU 2144(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2176(CX), Y6
+	VMOVDQU 2208(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 7 to 5 outputs
+	VMOVDQU (R12), Y8
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2240(CX), Y6
+	VMOVDQU 2272(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2304(CX), Y6
+	VMOVDQU 2336(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2368(CX), Y6
+	VMOVDQU 2400(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2432(CX), Y6
+	VMOVDQU 2464(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2496(CX), Y6
+	VMOVDQU 2528(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 8 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2560(CX), Y6
+	VMOVDQU 2592(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2624(CX), Y6
+	VMOVDQU 2656(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2688(CX), Y6
+	VMOVDQU 2720(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2752(CX), Y6
+	VMOVDQU 2784(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2816(CX), Y6
+	VMOVDQU 2848(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Store 5 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x5Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x5Xor_end:
+	RET
+
+// func mulAvxTwo_9x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x6(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 119 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x6_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_9x6_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y3
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y5
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (R8), Y9
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (R9), Y9
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 5 to 6 outputs
+	VMOVDQU (R10), Y9
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 6 to 6 outputs
+	VMOVDQU (R11), Y9
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2304(CX), Y7
+	VMOVDQU 2336(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2368(CX), Y7
+	VMOVDQU 2400(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2432(CX), Y7
+	VMOVDQU 2464(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2496(CX), Y7
+	VMOVDQU 2528(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2560(CX), Y7
+	VMOVDQU 2592(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2624(CX), Y7
+	VMOVDQU 2656(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 7 to 6 outputs
+	VMOVDQU (R12), Y9
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2688(CX), Y7
+	VMOVDQU 2720(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2752(CX), Y7
+	VMOVDQU 2784(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2816(CX), Y7
+	VMOVDQU 2848(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2880(CX), Y7
+	VMOVDQU 2912(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2944(CX), Y7
+	VMOVDQU 2976(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3008(CX), Y7
+	VMOVDQU 3040(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 8 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 3072(CX), Y7
+	VMOVDQU 3104(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 3136(CX), Y7
+	VMOVDQU 3168(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 3200(CX), Y7
+	VMOVDQU 3232(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 3264(CX), Y7
+	VMOVDQU 3296(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 3328(CX), Y7
+	VMOVDQU 3360(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3392(CX), Y7
+	VMOVDQU 3424(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x6_loop
+	VZEROUPPER
+
+mulAvxTwo_9x6_end:
+	RET
+
+// func mulGFNI_9x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x6_64(SB), $0-88
+	// Loading 24 of 54 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 62 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 6 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 6 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 6 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x6_64_loop
+	VZEROUPPER
+
+mulGFNI_9x6_64_end:
+	RET
+
+// func mulGFNI_9x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x6_64Xor(SB), $0-88
+	// Loading 24 of 54 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 62 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x6_64Xor_loop:
+	// Load 6 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 (R15)(R14*1), Z24
+	MOVQ      24(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z25
+	MOVQ      48(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z26
+	MOVQ      72(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z27
+	MOVQ      96(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z28
+	MOVQ      120(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z29
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 6 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 6 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 6 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x6_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x6_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x6Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 119 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_9x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	MOVQ    (R13), R15
+	VMOVDQU (R15)(R14*1), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	MOVQ    24(R13), R15
+	VMOVDQU (R15)(R14*1), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	MOVQ    48(R13), R15
+	VMOVDQU (R15)(R14*1), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	MOVQ    72(R13), R15
+	VMOVDQU (R15)(R14*1), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	MOVQ    96(R13), R15
+	VMOVDQU (R15)(R14*1), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	MOVQ    120(R13), R15
+	VMOVDQU (R15)(R14*1), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (R8), Y9
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (R9), Y9
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 5 to 6 outputs
+	VMOVDQU (R10), Y9
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 6 to 6 outputs
+	VMOVDQU (R11), Y9
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2304(CX), Y7
+	VMOVDQU 2336(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2368(CX), Y7
+	VMOVDQU 2400(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2432(CX), Y7
+	VMOVDQU 2464(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2496(CX), Y7
+	VMOVDQU 2528(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2560(CX), Y7
+	VMOVDQU 2592(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2624(CX), Y7
+	VMOVDQU 2656(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 7 to 6 outputs
+	VMOVDQU (R12), Y9
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2688(CX), Y7
+	VMOVDQU 2720(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2752(CX), Y7
+	VMOVDQU 2784(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2816(CX), Y7
+	VMOVDQU 2848(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2880(CX), Y7
+	VMOVDQU 2912(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2944(CX), Y7
+	VMOVDQU 2976(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3008(CX), Y7
+	VMOVDQU 3040(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 8 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 3072(CX), Y7
+	VMOVDQU 3104(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 3136(CX), Y7
+	VMOVDQU 3168(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 3200(CX), Y7
+	VMOVDQU 3232(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 3264(CX), Y7
+	VMOVDQU 3296(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 3328(CX), Y7
+	VMOVDQU 3360(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3392(CX), Y7
+	VMOVDQU 3424(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Store 6 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x6Xor_end:
+	RET
+
+// func mulAvxTwo_9x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x7(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 138 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x7_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_9x7_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y0
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y1
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y2
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y3
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y4
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y5
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y6
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R9), Y10
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (R10), Y10
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 6 to 7 outputs
+	VMOVDQU (R11), Y10
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2688(CX), Y8
+	VMOVDQU 2720(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2752(CX), Y8
+	VMOVDQU 2784(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2816(CX), Y8
+	VMOVDQU 2848(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2880(CX), Y8
+	VMOVDQU 2912(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2944(CX), Y8
+	VMOVDQU 2976(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3008(CX), Y8
+	VMOVDQU 3040(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3072(CX), Y8
+	VMOVDQU 3104(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 7 to 7 outputs
+	VMOVDQU (R12), Y10
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3136(CX), Y8
+	VMOVDQU 3168(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3200(CX), Y8
+	VMOVDQU 3232(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3264(CX), Y8
+	VMOVDQU 3296(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3328(CX), Y8
+	VMOVDQU 3360(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3392(CX), Y8
+	VMOVDQU 3424(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3456(CX), Y8
+	VMOVDQU 3488(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3520(CX), Y8
+	VMOVDQU 3552(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 8 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3584(CX), Y8
+	VMOVDQU 3616(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3648(CX), Y8
+	VMOVDQU 3680(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3712(CX), Y8
+	VMOVDQU 3744(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3776(CX), Y8
+	VMOVDQU 3808(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3840(CX), Y8
+	VMOVDQU 3872(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3904(CX), Y8
+	VMOVDQU 3936(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3968(CX), Y8
+	VMOVDQU 4000(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+	MOVQ    144(R13), R15
+	VMOVDQU Y6, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x7_loop
+	VZEROUPPER
+
+mulAvxTwo_9x7_end:
+	RET
+
+// func mulGFNI_9x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x7_64(SB), $0-88
+	// Loading 23 of 63 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 72 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 7 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 7 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 7 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z23, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      144(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x7_64_loop
+	VZEROUPPER
+
+mulGFNI_9x7_64_end:
+	RET
+
+// func mulGFNI_9x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x7_64Xor(SB), $0-88
+	// Loading 23 of 63 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 72 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x7_64Xor_loop:
+	// Load 7 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 (R15)(R14*1), Z23
+	MOVQ      24(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z24
+	MOVQ      48(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z25
+	MOVQ      72(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z26
+	MOVQ      96(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z27
+	MOVQ      120(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z28
+	MOVQ      144(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z29
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 7 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 7 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 7 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z23, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      144(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x7_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x7_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x7Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 138 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x7Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_9x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	MOVQ    (R13), R15
+	VMOVDQU (R15)(R14*1), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	MOVQ    24(R13), R15
+	VMOVDQU (R15)(R14*1), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	MOVQ    48(R13), R15
+	VMOVDQU (R15)(R14*1), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	MOVQ    72(R13), R15
+	VMOVDQU (R15)(R14*1), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	MOVQ    96(R13), R15
+	VMOVDQU (R15)(R14*1), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	MOVQ    120(R13), R15
+	VMOVDQU (R15)(R14*1), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	MOVQ    144(R13), R15
+	VMOVDQU (R15)(R14*1), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R9), Y10
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (R10), Y10
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 6 to 7 outputs
+	VMOVDQU (R11), Y10
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2688(CX), Y8
+	VMOVDQU 2720(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2752(CX), Y8
+	VMOVDQU 2784(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2816(CX), Y8
+	VMOVDQU 2848(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2880(CX), Y8
+	VMOVDQU 2912(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2944(CX), Y8
+	VMOVDQU 2976(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3008(CX), Y8
+	VMOVDQU 3040(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3072(CX), Y8
+	VMOVDQU 3104(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 7 to 7 outputs
+	VMOVDQU (R12), Y10
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3136(CX), Y8
+	VMOVDQU 3168(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3200(CX), Y8
+	VMOVDQU 3232(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3264(CX), Y8
+	VMOVDQU 3296(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3328(CX), Y8
+	VMOVDQU 3360(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3392(CX), Y8
+	VMOVDQU 3424(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3456(CX), Y8
+	VMOVDQU 3488(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3520(CX), Y8
+	VMOVDQU 3552(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 8 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3584(CX), Y8
+	VMOVDQU 3616(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3648(CX), Y8
+	VMOVDQU 3680(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3712(CX), Y8
+	VMOVDQU 3744(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3776(CX), Y8
+	VMOVDQU 3808(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3840(CX), Y8
+	VMOVDQU 3872(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3904(CX), Y8
+	VMOVDQU 3936(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3968(CX), Y8
+	VMOVDQU 4000(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+	MOVQ    144(R13), R15
+	VMOVDQU Y6, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x7Xor_end:
+	RET
+
+// func mulAvxTwo_9x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x8(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 157 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x8_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_9x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (R8), Y11
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (R9), Y11
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (R10), Y11
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 6 to 8 outputs
+	VMOVDQU (R11), Y11
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3072(CX), Y9
+	VMOVDQU 3104(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3136(CX), Y9
+	VMOVDQU 3168(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3200(CX), Y9
+	VMOVDQU 3232(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3264(CX), Y9
+	VMOVDQU 3296(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3328(CX), Y9
+	VMOVDQU 3360(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3392(CX), Y9
+	VMOVDQU 3424(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3456(CX), Y9
+	VMOVDQU 3488(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3520(CX), Y9
+	VMOVDQU 3552(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 7 to 8 outputs
+	VMOVDQU (R12), Y11
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3584(CX), Y9
+	VMOVDQU 3616(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3648(CX), Y9
+	VMOVDQU 3680(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3712(CX), Y9
+	VMOVDQU 3744(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3776(CX), Y9
+	VMOVDQU 3808(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3840(CX), Y9
+	VMOVDQU 3872(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3904(CX), Y9
+	VMOVDQU 3936(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3968(CX), Y9
+	VMOVDQU 4000(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4032(CX), Y9
+	VMOVDQU 4064(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 8 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 4096(CX), Y9
+	VMOVDQU 4128(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 4160(CX), Y9
+	VMOVDQU 4192(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 4224(CX), Y9
+	VMOVDQU 4256(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 4288(CX), Y9
+	VMOVDQU 4320(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 4352(CX), Y9
+	VMOVDQU 4384(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 4416(CX), Y9
+	VMOVDQU 4448(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 4480(CX), Y9
+	VMOVDQU 4512(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4544(CX), Y9
+	VMOVDQU 4576(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+	MOVQ    144(R13), R15
+	VMOVDQU Y6, (R15)(R14*1)
+	MOVQ    168(R13), R15
+	VMOVDQU Y7, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x8_loop
+	VZEROUPPER
+
+mulAvxTwo_9x8_end:
+	RET
+
+// func mulGFNI_9x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x8_64(SB), $0-88
+	// Loading 22 of 72 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 82 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 8 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 8 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z22, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z23, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      144(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      168(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x8_64_loop
+	VZEROUPPER
+
+mulGFNI_9x8_64_end:
+	RET
+
+// func mulGFNI_9x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x8_64Xor(SB), $0-88
+	// Loading 22 of 72 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 82 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x8_64Xor_loop:
+	// Load 8 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 (R15)(R14*1), Z22
+	MOVQ      24(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z23
+	MOVQ      48(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z24
+	MOVQ      72(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z25
+	MOVQ      96(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z26
+	MOVQ      120(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z27
+	MOVQ      144(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z28
+	MOVQ      168(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z29
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 8 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 8 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z22, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z23, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      144(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      168(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x8Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 157 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_9x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	MOVQ    (R13), R15
+	VMOVDQU (R15)(R14*1), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	MOVQ    24(R13), R15
+	VMOVDQU (R15)(R14*1), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	MOVQ    48(R13), R15
+	VMOVDQU (R15)(R14*1), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	MOVQ    72(R13), R15
+	VMOVDQU (R15)(R14*1), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	MOVQ    96(R13), R15
+	VMOVDQU (R15)(R14*1), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	MOVQ    120(R13), R15
+	VMOVDQU (R15)(R14*1), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	MOVQ    144(R13), R15
+	VMOVDQU (R15)(R14*1), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	MOVQ    168(R13), R15
+	VMOVDQU (R15)(R14*1), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (R8), Y11
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (R9), Y11
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (R10), Y11
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 6 to 8 outputs
+	VMOVDQU (R11), Y11
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3072(CX), Y9
+	VMOVDQU 3104(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3136(CX), Y9
+	VMOVDQU 3168(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3200(CX), Y9
+	VMOVDQU 3232(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3264(CX), Y9
+	VMOVDQU 3296(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3328(CX), Y9
+	VMOVDQU 3360(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3392(CX), Y9
+	VMOVDQU 3424(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3456(CX), Y9
+	VMOVDQU 3488(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3520(CX), Y9
+	VMOVDQU 3552(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 7 to 8 outputs
+	VMOVDQU (R12), Y11
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3584(CX), Y9
+	VMOVDQU 3616(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3648(CX), Y9
+	VMOVDQU 3680(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3712(CX), Y9
+	VMOVDQU 3744(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3776(CX), Y9
+	VMOVDQU 3808(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3840(CX), Y9
+	VMOVDQU 3872(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3904(CX), Y9
+	VMOVDQU 3936(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3968(CX), Y9
+	VMOVDQU 4000(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4032(CX), Y9
+	VMOVDQU 4064(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 8 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 4096(CX), Y9
+	VMOVDQU 4128(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 4160(CX), Y9
+	VMOVDQU 4192(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 4224(CX), Y9
+	VMOVDQU 4256(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 4288(CX), Y9
+	VMOVDQU 4320(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 4352(CX), Y9
+	VMOVDQU 4384(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 4416(CX), Y9
+	VMOVDQU 4448(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 4480(CX), Y9
+	VMOVDQU 4512(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4544(CX), Y9
+	VMOVDQU 4576(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+	MOVQ    144(R13), R15
+	VMOVDQU Y6, (R15)(R14*1)
+	MOVQ    168(R13), R15
+	VMOVDQU Y7, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x8Xor_end:
+	RET
+
+// func mulAvxTwo_9x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x9(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 176 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x9_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_9x9_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y2
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y3
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y4
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y5
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y6
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y7
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y8
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (R10), Y12
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2880(CX), Y10
+	VMOVDQU 2912(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 6 to 9 outputs
+	VMOVDQU (R11), Y12
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 3456(CX), Y10
+	VMOVDQU 3488(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 3520(CX), Y10
+	VMOVDQU 3552(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3584(CX), Y10
+	VMOVDQU 3616(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3648(CX), Y10
+	VMOVDQU 3680(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3712(CX), Y10
+	VMOVDQU 3744(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3776(CX), Y10
+	VMOVDQU 3808(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3840(CX), Y10
+	VMOVDQU 3872(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3904(CX), Y10
+	VMOVDQU 3936(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3968(CX), Y10
+	VMOVDQU 4000(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 7 to 9 outputs
+	VMOVDQU (R12), Y12
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4032(CX), Y10
+	VMOVDQU 4064(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4096(CX), Y10
+	VMOVDQU 4128(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4160(CX), Y10
+	VMOVDQU 4192(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4224(CX), Y10
+	VMOVDQU 4256(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4288(CX), Y10
+	VMOVDQU 4320(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4352(CX), Y10
+	VMOVDQU 4384(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4416(CX), Y10
+	VMOVDQU 4448(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 4480(CX), Y10
+	VMOVDQU 4512(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 4544(CX), Y10
+	VMOVDQU 4576(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 8 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4608(CX), Y10
+	VMOVDQU 4640(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4672(CX), Y10
+	VMOVDQU 4704(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4736(CX), Y10
+	VMOVDQU 4768(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4800(CX), Y10
+	VMOVDQU 4832(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4864(CX), Y10
+	VMOVDQU 4896(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4928(CX), Y10
+	VMOVDQU 4960(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4992(CX), Y10
+	VMOVDQU 5024(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 5056(CX), Y10
+	VMOVDQU 5088(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 5120(CX), Y10
+	VMOVDQU 5152(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+	MOVQ    144(R13), R15
+	VMOVDQU Y6, (R15)(R14*1)
+	MOVQ    168(R13), R15
+	VMOVDQU Y7, (R15)(R14*1)
+	MOVQ    192(R13), R15
+	VMOVDQU Y8, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x9_loop
+	VZEROUPPER
+
+mulAvxTwo_9x9_end:
+	RET
+
+// func mulGFNI_9x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x9_64(SB), $0-88
+	// Loading 21 of 81 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 92 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 9 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 9 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 640(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z21, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z22, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z23, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      144(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      168(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      192(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x9_64_loop
+	VZEROUPPER
+
+mulGFNI_9x9_64_end:
+	RET
+
+// func mulGFNI_9x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x9_64Xor(SB), $0-88
+	// Loading 21 of 81 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 92 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x9_64Xor_loop:
+	// Load 9 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 (R15)(R14*1), Z21
+	MOVQ      24(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z22
+	MOVQ      48(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z23
+	MOVQ      72(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z24
+	MOVQ      96(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z25
+	MOVQ      120(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z26
+	MOVQ      144(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z27
+	MOVQ      168(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z28
+	MOVQ      192(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z29
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 9 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 9 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 640(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z21, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z22, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z23, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      144(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      168(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      192(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x9Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 176 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x9Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_9x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	MOVQ    (R13), R15
+	VMOVDQU (R15)(R14*1), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	MOVQ    24(R13), R15
+	VMOVDQU (R15)(R14*1), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	MOVQ    48(R13), R15
+	VMOVDQU (R15)(R14*1), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	MOVQ    72(R13), R15
+	VMOVDQU (R15)(R14*1), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	MOVQ    96(R13), R15
+	VMOVDQU (R15)(R14*1), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	MOVQ    120(R13), R15
+	VMOVDQU (R15)(R14*1), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	MOVQ    144(R13), R15
+	VMOVDQU (R15)(R14*1), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	MOVQ    168(R13), R15
+	VMOVDQU (R15)(R14*1), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	MOVQ    192(R13), R15
+	VMOVDQU (R15)(R14*1), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (R10), Y12
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
 	VMOVDQU 2880(CX), Y10
 	VMOVDQU 2912(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2944(CX), Y10
-	VMOVDQU 2976(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 6 to 9 outputs
+	VMOVDQU (R11), Y12
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 3456(CX), Y10
+	VMOVDQU 3488(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 3520(CX), Y10
+	VMOVDQU 3552(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3584(CX), Y10
+	VMOVDQU 3616(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3648(CX), Y10
+	VMOVDQU 3680(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3712(CX), Y10
+	VMOVDQU 3744(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3776(CX), Y10
+	VMOVDQU 3808(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3840(CX), Y10
+	VMOVDQU 3872(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3904(CX), Y10
+	VMOVDQU 3936(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3968(CX), Y10
+	VMOVDQU 4000(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 7 to 9 outputs
+	VMOVDQU (R12), Y12
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4032(CX), Y10
+	VMOVDQU 4064(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4096(CX), Y10
+	VMOVDQU 4128(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4160(CX), Y10
+	VMOVDQU 4192(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4224(CX), Y10
+	VMOVDQU 4256(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4288(CX), Y10
+	VMOVDQU 4320(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4352(CX), Y10
+	VMOVDQU 4384(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4416(CX), Y10
+	VMOVDQU 4448(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 4480(CX), Y10
+	VMOVDQU 4512(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 4544(CX), Y10
+	VMOVDQU 4576(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 8 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4608(CX), Y10
+	VMOVDQU 4640(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4672(CX), Y10
+	VMOVDQU 4704(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4736(CX), Y10
+	VMOVDQU 4768(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4800(CX), Y10
+	VMOVDQU 4832(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4864(CX), Y10
+	VMOVDQU 4896(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4928(CX), Y10
+	VMOVDQU 4960(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4992(CX), Y10
+	VMOVDQU 5024(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 5056(CX), Y10
+	VMOVDQU 5088(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 5120(CX), Y10
+	VMOVDQU 5152(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Store 9 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+	MOVQ    144(R13), R15
+	VMOVDQU Y6, (R15)(R14*1)
+	MOVQ    168(R13), R15
+	VMOVDQU Y7, (R15)(R14*1)
+	MOVQ    192(R13), R15
+	VMOVDQU Y8, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x9Xor_end:
+	RET
+
+// func mulAvxTwo_9x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x10(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 195 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x10_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_9x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (R9), Y13
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 5 to 10 outputs
+	VMOVDQU (R10), Y13
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3200(CX), Y11
+	VMOVDQU 3232(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3264(CX), Y11
+	VMOVDQU 3296(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3328(CX), Y11
+	VMOVDQU 3360(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 3392(CX), Y11
+	VMOVDQU 3424(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 3456(CX), Y11
+	VMOVDQU 3488(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 3520(CX), Y11
+	VMOVDQU 3552(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 3584(CX), Y11
+	VMOVDQU 3616(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3648(CX), Y11
+	VMOVDQU 3680(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3712(CX), Y11
+	VMOVDQU 3744(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3776(CX), Y11
+	VMOVDQU 3808(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 6 to 10 outputs
+	VMOVDQU (R11), Y13
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3840(CX), Y11
+	VMOVDQU 3872(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3904(CX), Y11
+	VMOVDQU 3936(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3968(CX), Y11
+	VMOVDQU 4000(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4032(CX), Y11
+	VMOVDQU 4064(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4096(CX), Y11
+	VMOVDQU 4128(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4160(CX), Y11
+	VMOVDQU 4192(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4224(CX), Y11
+	VMOVDQU 4256(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4288(CX), Y11
+	VMOVDQU 4320(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4352(CX), Y11
+	VMOVDQU 4384(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 4416(CX), Y11
+	VMOVDQU 4448(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 7 to 10 outputs
+	VMOVDQU (R12), Y13
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 4480(CX), Y11
+	VMOVDQU 4512(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 4544(CX), Y11
+	VMOVDQU 4576(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 4608(CX), Y11
+	VMOVDQU 4640(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4672(CX), Y11
+	VMOVDQU 4704(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4736(CX), Y11
+	VMOVDQU 4768(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4800(CX), Y11
+	VMOVDQU 4832(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4864(CX), Y11
+	VMOVDQU 4896(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4928(CX), Y11
+	VMOVDQU 4960(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4992(CX), Y11
+	VMOVDQU 5024(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 5056(CX), Y11
+	VMOVDQU 5088(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 8 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 5120(CX), Y11
+	VMOVDQU 5152(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 5184(CX), Y11
+	VMOVDQU 5216(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 5248(CX), Y11
+	VMOVDQU 5280(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 5312(CX), Y11
+	VMOVDQU 5344(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 5376(CX), Y11
+	VMOVDQU 5408(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 5440(CX), Y11
+	VMOVDQU 5472(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 5504(CX), Y11
+	VMOVDQU 5536(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 5568(CX), Y11
+	VMOVDQU 5600(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 5632(CX), Y11
+	VMOVDQU 5664(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 5696(CX), Y11
+	VMOVDQU 5728(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 10 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+	MOVQ    144(R13), R15
+	VMOVDQU Y6, (R15)(R14*1)
+	MOVQ    168(R13), R15
+	VMOVDQU Y7, (R15)(R14*1)
+	MOVQ    192(R13), R15
+	VMOVDQU Y8, (R15)(R14*1)
+	MOVQ    216(R13), R15
+	VMOVDQU Y9, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x10_loop
+	VZEROUPPER
+
+mulAvxTwo_9x10_end:
+	RET
+
+// func mulGFNI_9x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x10_64(SB), $0-88
+	// Loading 20 of 90 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 102 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 10 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 10 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 640(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 648(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 656(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 664(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 672(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 680(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 688(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 696(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 704(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 712(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z20, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z21, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z22, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z23, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      144(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      168(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      192(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      216(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x10_64_loop
+	VZEROUPPER
+
+mulGFNI_9x10_64_end:
+	RET
+
+// func mulGFNI_9x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_9x10_64Xor(SB), $0-88
+	// Loading 20 of 90 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 102 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_9x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), DX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, DX
+
+mulGFNI_9x10_64Xor_loop:
+	// Load 10 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 (R15)(R14*1), Z20
+	MOVQ      24(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z21
+	MOVQ      48(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z22
+	MOVQ      72(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z23
+	MOVQ      96(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z24
+	MOVQ      120(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z25
+	MOVQ      144(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z26
+	MOVQ      168(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z27
+	MOVQ      192(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z28
+	MOVQ      216(R13), R15
+	VMOVDQU64 (R15)(R14*1), Z29
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 10 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 10 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 640(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 648(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 656(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 664(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 672(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 680(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 688(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 696(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 704(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 712(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R13), R15
+	VMOVDQU64 Z20, (R15)(R14*1)
+	MOVQ      24(R13), R15
+	VMOVDQU64 Z21, (R15)(R14*1)
+	MOVQ      48(R13), R15
+	VMOVDQU64 Z22, (R15)(R14*1)
+	MOVQ      72(R13), R15
+	VMOVDQU64 Z23, (R15)(R14*1)
+	MOVQ      96(R13), R15
+	VMOVDQU64 Z24, (R15)(R14*1)
+	MOVQ      120(R13), R15
+	VMOVDQU64 Z25, (R15)(R14*1)
+	MOVQ      144(R13), R15
+	VMOVDQU64 Z26, (R15)(R14*1)
+	MOVQ      168(R13), R15
+	VMOVDQU64 Z27, (R15)(R14*1)
+	MOVQ      192(R13), R15
+	VMOVDQU64 Z28, (R15)(R14*1)
+	MOVQ      216(R13), R15
+	VMOVDQU64 Z29, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R14
+	DECQ AX
+	JNZ  mulGFNI_9x10_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_9x10_64Xor_end:
+	RET
+
+// func mulAvxTwo_9x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_9x10Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 195 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_9x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), DX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, R9
+	ADDQ         R14, R10
+	ADDQ         R14, R11
+	ADDQ         R14, R12
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_9x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	MOVQ    (R13), R15
+	VMOVDQU (R15)(R14*1), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	MOVQ    24(R13), R15
+	VMOVDQU (R15)(R14*1), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	MOVQ    48(R13), R15
+	VMOVDQU (R15)(R14*1), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	MOVQ    72(R13), R15
+	VMOVDQU (R15)(R14*1), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	MOVQ    96(R13), R15
+	VMOVDQU (R15)(R14*1), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	MOVQ    120(R13), R15
+	VMOVDQU (R15)(R14*1), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	MOVQ    144(R13), R15
+	VMOVDQU (R15)(R14*1), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	MOVQ    168(R13), R15
+	VMOVDQU (R15)(R14*1), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	MOVQ    192(R13), R15
+	VMOVDQU (R15)(R14*1), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	MOVQ    216(R13), R15
+	VMOVDQU (R15)(R14*1), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (R9), Y13
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 5 to 10 outputs
+	VMOVDQU (R10), Y13
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3200(CX), Y11
+	VMOVDQU 3232(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3264(CX), Y11
+	VMOVDQU 3296(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3328(CX), Y11
+	VMOVDQU 3360(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 3392(CX), Y11
+	VMOVDQU 3424(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 3456(CX), Y11
+	VMOVDQU 3488(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 3520(CX), Y11
+	VMOVDQU 3552(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 3584(CX), Y11
+	VMOVDQU 3616(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3648(CX), Y11
+	VMOVDQU 3680(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3712(CX), Y11
+	VMOVDQU 3744(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3776(CX), Y11
+	VMOVDQU 3808(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 6 to 10 outputs
+	VMOVDQU (R11), Y13
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3840(CX), Y11
+	VMOVDQU 3872(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3904(CX), Y11
+	VMOVDQU 3936(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3968(CX), Y11
+	VMOVDQU 4000(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4032(CX), Y11
+	VMOVDQU 4064(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4096(CX), Y11
+	VMOVDQU 4128(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4160(CX), Y11
+	VMOVDQU 4192(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4224(CX), Y11
+	VMOVDQU 4256(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4288(CX), Y11
+	VMOVDQU 4320(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4352(CX), Y11
+	VMOVDQU 4384(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 4416(CX), Y11
+	VMOVDQU 4448(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 7 to 10 outputs
+	VMOVDQU (R12), Y13
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 4480(CX), Y11
+	VMOVDQU 4512(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 4544(CX), Y11
+	VMOVDQU 4576(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 4608(CX), Y11
+	VMOVDQU 4640(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4672(CX), Y11
+	VMOVDQU 4704(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4736(CX), Y11
+	VMOVDQU 4768(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 3008(CX), Y10
-	VMOVDQU 3040(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4800(CX), Y11
+	VMOVDQU 4832(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 3072(CX), Y10
-	VMOVDQU 3104(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4864(CX), Y11
+	VMOVDQU 4896(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 3136(CX), Y10
-	VMOVDQU 3168(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4928(CX), Y11
+	VMOVDQU 4960(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 3200(CX), Y10
-	VMOVDQU 3232(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4992(CX), Y11
+	VMOVDQU 5024(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 3264(CX), Y10
-	VMOVDQU 3296(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 5056(CX), Y11
+	VMOVDQU 5088(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 3328(CX), Y10
-	VMOVDQU 3360(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Load and process 32 bytes from input 8 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 5120(CX), Y11
+	VMOVDQU 5152(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 3392(CX), Y10
-	VMOVDQU 3424(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 5184(CX), Y11
+	VMOVDQU 5216(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 5248(CX), Y11
+	VMOVDQU 5280(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 5312(CX), Y11
+	VMOVDQU 5344(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 5376(CX), Y11
+	VMOVDQU 5408(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 5440(CX), Y11
+	VMOVDQU 5472(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 5504(CX), Y11
+	VMOVDQU 5536(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 5568(CX), Y11
+	VMOVDQU 5600(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 5632(CX), Y11
+	VMOVDQU 5664(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 5696(CX), Y11
+	VMOVDQU 5728(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+
+	// Store 10 outputs
+	MOVQ    (R13), R15
+	VMOVDQU Y0, (R15)(R14*1)
+	MOVQ    24(R13), R15
+	VMOVDQU Y1, (R15)(R14*1)
+	MOVQ    48(R13), R15
+	VMOVDQU Y2, (R15)(R14*1)
+	MOVQ    72(R13), R15
+	VMOVDQU Y3, (R15)(R14*1)
+	MOVQ    96(R13), R15
+	VMOVDQU Y4, (R15)(R14*1)
+	MOVQ    120(R13), R15
+	VMOVDQU Y5, (R15)(R14*1)
+	MOVQ    144(R13), R15
+	VMOVDQU Y6, (R15)(R14*1)
+	MOVQ    168(R13), R15
+	VMOVDQU Y7, (R15)(R14*1)
+	MOVQ    192(R13), R15
+	VMOVDQU Y8, (R15)(R14*1)
+	MOVQ    216(R13), R15
+	VMOVDQU Y9, (R15)(R14*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R14
+	DECQ AX
+	JNZ  mulAvxTwo_9x10Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_9x10Xor_end:
+	RET
+
+// func mulAvxTwo_10x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x1(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 24 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_10x1_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  (R14), R14
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X1
+	VPBROADCASTB X1, Y1
+
+mulAvxTwo_10x1_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y4
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	VPXOR   Y2, Y3, Y0
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y4
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y4
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 128(CX), Y2
+	VMOVDQU 160(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y4
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 192(CX), Y2
+	VMOVDQU 224(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y4
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 256(CX), Y2
+	VMOVDQU 288(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y4
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 320(CX), Y2
+	VMOVDQU 352(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y4
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 384(CX), Y2
+	VMOVDQU 416(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 7 to 1 outputs
+	VMOVDQU (R12), Y4
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 448(CX), Y2
+	VMOVDQU 480(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 8 to 1 outputs
+	VMOVDQU (R13), Y4
+	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 512(CX), Y2
+	VMOVDQU 544(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 9 to 1 outputs
+	VMOVDQU (DX), Y4
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 576(CX), Y2
+	VMOVDQU 608(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R14)
+	ADDQ    $0x20, R14
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_10x1_loop
+	VZEROUPPER
+
+mulAvxTwo_10x1_end:
+	RET
+
+// func mulAvxTwo_10x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x1_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 46 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_10x1_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  out_base+48(FP), R14
+	MOVQ  (R14), R14
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_10x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y6
+	VMOVDQU 32(R10), Y5
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y6
+	VMOVDQU 32(R11), Y5
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU (R12), Y6
+	VMOVDQU 32(R12), Y5
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 8 to 1 outputs
+	VMOVDQU (R13), Y6
+	VMOVDQU 32(R13), Y5
+	ADDQ    $0x40, R13
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 9 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R14)
+	VMOVDQU Y1, 32(R14)
+	ADDQ    $0x40, R14
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_10x1_64_loop
+	VZEROUPPER
+
+mulAvxTwo_10x1_64_end:
+	RET
+
+// func mulGFNI_10x1_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x1_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 13 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x1_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), R12
+	MOVQ            216(CX), CX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            (R13), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
 
-	// Load and process 32 bytes from input 6 to 9 outputs
-	VMOVDQU (R11), Y12
+	// Add start offset to input
+	ADDQ R14, DX
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, CX
+
+mulGFNI_10x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z11
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z11, Z10
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z11
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z11
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z11
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z11
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (R9), Z11
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z5, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU64      (R10), Z11
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z6, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU64      (R11), Z11
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z7, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 8 to 1 outputs
+	VMOVDQU64      (R12), Z11
+	ADDQ           $0x40, R12
+	VGF2P8AFFINEQB $0x00, Z8, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 9 to 1 outputs
+	VMOVDQU64      (CX), Z11
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z9, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Store 1 outputs
+	VMOVDQU64 Z10, (R13)
+	ADDQ      $0x40, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_10x1_64_loop
+	VZEROUPPER
+
+mulGFNI_10x1_64_end:
+	RET
+
+// func mulGFNI_10x1_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 13 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x1_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), R12
+	MOVQ            216(CX), CX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            (R13), R13
+	MOVQ            start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R13
+
+	// Add start offset to input
+	ADDQ R14, DX
+	ADDQ R14, BX
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, CX
+
+mulGFNI_10x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU64 (R13), Z10
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU64      (DX), Z11
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU64      (BX), Z11
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z1, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU64      (SI), Z11
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z2, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU64      (DI), Z11
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z3, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU64      (R8), Z11
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z4, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU64      (R9), Z11
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z5, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU64      (R10), Z11
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z6, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU64      (R11), Z11
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z7, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 8 to 1 outputs
+	VMOVDQU64      (R12), Z11
+	ADDQ           $0x40, R12
+	VGF2P8AFFINEQB $0x00, Z8, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Load and process 64 bytes from input 9 to 1 outputs
+	VMOVDQU64      (CX), Z11
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z9, Z11, Z11
+	VXORPD         Z10, Z11, Z10
+
+	// Store 1 outputs
+	VMOVDQU64 Z10, (R13)
+	ADDQ      $0x40, R13
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulGFNI_10x1_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_10x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_10x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x1Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 24 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_10x1Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  (R14), R14
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X1
+	VPBROADCASTB X1, Y1
+
+mulAvxTwo_10x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y4
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU (R14), Y0
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y4
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y4
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 128(CX), Y2
+	VMOVDQU 160(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y4
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 192(CX), Y2
+	VMOVDQU 224(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y4
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 256(CX), Y2
+	VMOVDQU 288(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y4
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 320(CX), Y2
+	VMOVDQU 352(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y4
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 3456(CX), Y10
-	VMOVDQU 3488(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 3520(CX), Y10
-	VMOVDQU 3552(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 3584(CX), Y10
-	VMOVDQU 3616(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 3648(CX), Y10
-	VMOVDQU 3680(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 3712(CX), Y10
-	VMOVDQU 3744(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 3776(CX), Y10
-	VMOVDQU 3808(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 3840(CX), Y10
-	VMOVDQU 3872(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 3904(CX), Y10
-	VMOVDQU 3936(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 3968(CX), Y10
-	VMOVDQU 4000(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 384(CX), Y2
+	VMOVDQU 416(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
 
-	// Load and process 32 bytes from input 7 to 9 outputs
-	VMOVDQU (DX), Y12
+	// Load and process 32 bytes from input 7 to 1 outputs
+	VMOVDQU (R12), Y4
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 448(CX), Y2
+	VMOVDQU 480(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 8 to 1 outputs
+	VMOVDQU (R13), Y4
+	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 512(CX), Y2
+	VMOVDQU 544(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Load and process 32 bytes from input 9 to 1 outputs
+	VMOVDQU (DX), Y4
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 4032(CX), Y10
-	VMOVDQU 4064(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 4096(CX), Y10
-	VMOVDQU 4128(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 4160(CX), Y10
-	VMOVDQU 4192(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 4224(CX), Y10
-	VMOVDQU 4256(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 4288(CX), Y10
-	VMOVDQU 4320(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 4352(CX), Y10
-	VMOVDQU 4384(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 4416(CX), Y10
-	VMOVDQU 4448(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 4480(CX), Y10
-	VMOVDQU 4512(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 4544(CX), Y10
-	VMOVDQU 4576(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y4, Y5
+	VPAND   Y1, Y4, Y4
+	VPAND   Y1, Y5, Y5
+	VMOVDQU 576(CX), Y2
+	VMOVDQU 608(CX), Y3
+	VPSHUFB Y4, Y2, Y2
+	VPSHUFB Y5, Y3, Y3
+	XOR3WAY( $0x00, Y2, Y3, Y0)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R14)
+	ADDQ    $0x20, R14
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_10x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_10x1Xor_end:
+	RET
+
+// func mulAvxTwo_10x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x1_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 46 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_10x1_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  out_base+48(FP), R14
+	MOVQ  (R14), R14
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_10x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (R14), Y0
+	VMOVDQU 32(R14), Y1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (R10), Y6
+	VMOVDQU 32(R10), Y5
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 6 to 1 outputs
+	VMOVDQU (R11), Y6
+	VMOVDQU 32(R11), Y5
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 7 to 1 outputs
+	VMOVDQU (R12), Y6
+	VMOVDQU 32(R12), Y5
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Load and process 64 bytes from input 8 to 1 outputs
+	VMOVDQU (R13), Y6
+	VMOVDQU 32(R13), Y5
+	ADDQ    $0x40, R13
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
 
-	// Store 9 outputs
-	MOVQ    (R12), R14
-	VMOVDQU Y0, (R14)(R13*1)
-	MOVQ    24(R12), R14
-	VMOVDQU Y1, (R14)(R13*1)
-	MOVQ    48(R12), R14
-	VMOVDQU Y2, (R14)(R13*1)
-	MOVQ    72(R12), R14
-	VMOVDQU Y3, (R14)(R13*1)
-	MOVQ    96(R12), R14
-	VMOVDQU Y4, (R14)(R13*1)
-	MOVQ    120(R12), R14
-	VMOVDQU Y5, (R14)(R13*1)
-	MOVQ    144(R12), R14
-	VMOVDQU Y6, (R14)(R13*1)
-	MOVQ    168(R12), R14
-	VMOVDQU Y7, (R14)(R13*1)
-	MOVQ    192(R12), R14
-	VMOVDQU Y8, (R14)(R13*1)
+	// Load and process 64 bytes from input 9 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R14)
+	VMOVDQU Y1, 32(R14)
+	ADDQ    $0x40, R14
 
 	// Prepare for next loop
-	ADDQ $0x20, R13
 	DECQ AX
-	JNZ  mulAvxTwo_8x9_loop
+	JNZ  mulAvxTwo_10x1_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_8x9_end:
+mulAvxTwo_10x1_64Xor_end:
 	RET
 
-// func mulAvxTwo_8x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_8x10(SB), NOSPLIT, $0-88
+// func mulAvxTwo_10x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x2(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 175 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 47 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_8x10_end
+	JZ    mulAvxTwo_10x2_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -23959,614 +81707,229 @@ TEXT Β·mulAvxTwo_8x10(SB), NOSPLIT, $0-88
 	MOVQ  96(DX), R9
 	MOVQ  120(DX), R10
 	MOVQ  144(DX), R11
-	MOVQ  168(DX), DX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  start+72(FP), R13
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  (R14), R15
+	MOVQ  24(R14), R14
+	MOVQ  start+72(FP), BP
 
-	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, R11
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X10
-	VPBROADCASTB X10, Y10
+	// Add start offset to output
+	ADDQ BP, R15
+	ADDQ BP, R14
 
-mulAvxTwo_8x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, R13
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X2
+	VPBROADCASTB X2, Y2
 
-	// Load and process 32 bytes from input 0 to 10 outputs
-	VMOVDQU (BX), Y13
+mulAvxTwo_10x2_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU (CX), Y11
-	VMOVDQU 32(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 64(CX), Y11
-	VMOVDQU 96(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 128(CX), Y11
-	VMOVDQU 160(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 192(CX), Y11
-	VMOVDQU 224(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 256(CX), Y11
-	VMOVDQU 288(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 320(CX), Y11
-	VMOVDQU 352(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 384(CX), Y11
-	VMOVDQU 416(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 448(CX), Y11
-	VMOVDQU 480(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 512(CX), Y11
-	VMOVDQU 544(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 576(CX), Y11
-	VMOVDQU 608(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y1
 
-	// Load and process 32 bytes from input 1 to 10 outputs
-	VMOVDQU (SI), Y13
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 640(CX), Y11
-	VMOVDQU 672(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 704(CX), Y11
-	VMOVDQU 736(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 768(CX), Y11
-	VMOVDQU 800(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 832(CX), Y11
-	VMOVDQU 864(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 896(CX), Y11
-	VMOVDQU 928(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 960(CX), Y11
-	VMOVDQU 992(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1024(CX), Y11
-	VMOVDQU 1056(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1088(CX), Y11
-	VMOVDQU 1120(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1152(CX), Y11
-	VMOVDQU 1184(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1216(CX), Y11
-	VMOVDQU 1248(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 2 to 10 outputs
-	VMOVDQU (DI), Y13
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 1280(CX), Y11
-	VMOVDQU 1312(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 1344(CX), Y11
-	VMOVDQU 1376(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 1408(CX), Y11
-	VMOVDQU 1440(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 1472(CX), Y11
-	VMOVDQU 1504(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 1536(CX), Y11
-	VMOVDQU 1568(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 1600(CX), Y11
-	VMOVDQU 1632(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1664(CX), Y11
-	VMOVDQU 1696(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1728(CX), Y11
-	VMOVDQU 1760(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1792(CX), Y11
-	VMOVDQU 1824(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1856(CX), Y11
-	VMOVDQU 1888(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 3 to 10 outputs
-	VMOVDQU (R8), Y13
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 1920(CX), Y11
-	VMOVDQU 1952(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 1984(CX), Y11
-	VMOVDQU 2016(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 2048(CX), Y11
-	VMOVDQU 2080(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 2112(CX), Y11
-	VMOVDQU 2144(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 2176(CX), Y11
-	VMOVDQU 2208(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 2240(CX), Y11
-	VMOVDQU 2272(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 2304(CX), Y11
-	VMOVDQU 2336(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 2368(CX), Y11
-	VMOVDQU 2400(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 2432(CX), Y11
-	VMOVDQU 2464(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 2496(CX), Y11
-	VMOVDQU 2528(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 4 to 10 outputs
-	VMOVDQU (R9), Y13
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y5
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 2560(CX), Y11
-	VMOVDQU 2592(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 2624(CX), Y11
-	VMOVDQU 2656(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 2688(CX), Y11
-	VMOVDQU 2720(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 2752(CX), Y11
-	VMOVDQU 2784(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 2816(CX), Y11
-	VMOVDQU 2848(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 2880(CX), Y11
-	VMOVDQU 2912(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 2944(CX), Y11
-	VMOVDQU 2976(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 3008(CX), Y11
-	VMOVDQU 3040(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 3072(CX), Y11
-	VMOVDQU 3104(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 3136(CX), Y11
-	VMOVDQU 3168(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 5 to 10 outputs
-	VMOVDQU (R10), Y13
+	// Load and process 32 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y5
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 3200(CX), Y11
-	VMOVDQU 3232(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 3264(CX), Y11
-	VMOVDQU 3296(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 3328(CX), Y11
-	VMOVDQU 3360(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 3392(CX), Y11
-	VMOVDQU 3424(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 3456(CX), Y11
-	VMOVDQU 3488(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 3520(CX), Y11
-	VMOVDQU 3552(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 3584(CX), Y11
-	VMOVDQU 3616(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 3648(CX), Y11
-	VMOVDQU 3680(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 3712(CX), Y11
-	VMOVDQU 3744(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 3776(CX), Y11
-	VMOVDQU 3808(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 640(CX), Y3
+	VMOVDQU 672(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 704(CX), Y3
+	VMOVDQU 736(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 6 to 10 outputs
-	VMOVDQU (R11), Y13
+	// Load and process 32 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y5
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 3840(CX), Y11
-	VMOVDQU 3872(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 3904(CX), Y11
-	VMOVDQU 3936(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 3968(CX), Y11
-	VMOVDQU 4000(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 4032(CX), Y11
-	VMOVDQU 4064(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 4096(CX), Y11
-	VMOVDQU 4128(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 4160(CX), Y11
-	VMOVDQU 4192(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 4224(CX), Y11
-	VMOVDQU 4256(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 4288(CX), Y11
-	VMOVDQU 4320(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 4352(CX), Y11
-	VMOVDQU 4384(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 4416(CX), Y11
-	VMOVDQU 4448(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 768(CX), Y3
+	VMOVDQU 800(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 832(CX), Y3
+	VMOVDQU 864(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 7 to 2 outputs
+	VMOVDQU (R12), Y5
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 896(CX), Y3
+	VMOVDQU 928(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 960(CX), Y3
+	VMOVDQU 992(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Load and process 32 bytes from input 7 to 10 outputs
-	VMOVDQU (DX), Y13
+	// Load and process 32 bytes from input 8 to 2 outputs
+	VMOVDQU (R13), Y5
+	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 1024(CX), Y3
+	VMOVDQU 1056(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 1088(CX), Y3
+	VMOVDQU 1120(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 9 to 2 outputs
+	VMOVDQU (DX), Y5
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 4480(CX), Y11
-	VMOVDQU 4512(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 4544(CX), Y11
-	VMOVDQU 4576(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 4608(CX), Y11
-	VMOVDQU 4640(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 4672(CX), Y11
-	VMOVDQU 4704(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 4736(CX), Y11
-	VMOVDQU 4768(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 4800(CX), Y11
-	VMOVDQU 4832(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 4864(CX), Y11
-	VMOVDQU 4896(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 4928(CX), Y11
-	VMOVDQU 4960(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 4992(CX), Y11
-	VMOVDQU 5024(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 5056(CX), Y11
-	VMOVDQU 5088(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 1152(CX), Y3
+	VMOVDQU 1184(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 1216(CX), Y3
+	VMOVDQU 1248(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
-	// Store 10 outputs
-	MOVQ    (R12), R14
-	VMOVDQU Y0, (R14)(R13*1)
-	MOVQ    24(R12), R14
-	VMOVDQU Y1, (R14)(R13*1)
-	MOVQ    48(R12), R14
-	VMOVDQU Y2, (R14)(R13*1)
-	MOVQ    72(R12), R14
-	VMOVDQU Y3, (R14)(R13*1)
-	MOVQ    96(R12), R14
-	VMOVDQU Y4, (R14)(R13*1)
-	MOVQ    120(R12), R14
-	VMOVDQU Y5, (R14)(R13*1)
-	MOVQ    144(R12), R14
-	VMOVDQU Y6, (R14)(R13*1)
-	MOVQ    168(R12), R14
-	VMOVDQU Y7, (R14)(R13*1)
-	MOVQ    192(R12), R14
-	VMOVDQU Y8, (R14)(R13*1)
-	MOVQ    216(R12), R14
-	VMOVDQU Y9, (R14)(R13*1)
+	// Store 2 outputs
+	VMOVDQU Y0, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y1, (R14)
+	ADDQ    $0x20, R14
 
 	// Prepare for next loop
-	ADDQ $0x20, R13
 	DECQ AX
-	JNZ  mulAvxTwo_8x10_loop
+	JNZ  mulAvxTwo_10x2_loop
 	VZEROUPPER
 
-mulAvxTwo_8x10_end:
+mulAvxTwo_10x2_end:
 	RET
 
-// func mulAvxTwo_9x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x1(SB), NOSPLIT, $0-88
+// func mulAvxTwo_10x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x2_64(SB), $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 22 YMM used
+	// Full registers estimated 89 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x1_end
+	JZ    mulAvxTwo_10x2_64_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -24576,421 +81939,651 @@ TEXT Β·mulAvxTwo_9x1(SB), NOSPLIT, $0-88
 	MOVQ  120(DX), R10
 	MOVQ  144(DX), R11
 	MOVQ  168(DX), R12
-	MOVQ  192(DX), DX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  (R13), R13
-	MOVQ  start+72(FP), R14
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  out_base+48(FP), R14
+	MOVQ  (R14), R15
+	MOVQ  24(R14), R14
+	MOVQ  start+72(FP), BP
 
 	// Add start offset to output
-	ADDQ R14, R13
+	ADDQ BP, R15
+	ADDQ BP, R14
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X1
-	VPBROADCASTB X1, Y1
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, R13
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X4
+	VPBROADCASTB X4, Y4
 
-mulAvxTwo_9x1_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
+mulAvxTwo_10x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
 
-	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (BX), Y4
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU (CX), Y2
-	VMOVDQU 32(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (SI), Y4
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (DI), Y4
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 128(CX), Y2
-	VMOVDQU 160(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 3 to 1 outputs
-	VMOVDQU (R8), Y4
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 192(CX), Y2
-	VMOVDQU 224(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 4 to 1 outputs
-	VMOVDQU (R9), Y4
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 256(CX), Y2
-	VMOVDQU 288(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU (R10), Y9
+	VMOVDQU 32(R10), Y11
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 5 to 1 outputs
-	VMOVDQU (R10), Y4
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 320(CX), Y2
-	VMOVDQU 352(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU (R11), Y9
+	VMOVDQU 32(R11), Y11
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 6 to 1 outputs
-	VMOVDQU (R11), Y4
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 384(CX), Y2
-	VMOVDQU 416(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU (R12), Y9
+	VMOVDQU 32(R12), Y11
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 7 to 1 outputs
-	VMOVDQU (R12), Y4
-	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 448(CX), Y2
-	VMOVDQU 480(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 64 bytes from input 8 to 2 outputs
+	VMOVDQU (R13), Y9
+	VMOVDQU 32(R13), Y11
+	ADDQ    $0x40, R13
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
-	// Load and process 32 bytes from input 8 to 1 outputs
-	VMOVDQU (DX), Y4
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 512(CX), Y2
-	VMOVDQU 544(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	// Load and process 64 bytes from input 9 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R15)
+	VMOVDQU Y1, 32(R15)
+	ADDQ    $0x40, R15
+	VMOVDQU Y2, (R14)
+	VMOVDQU Y3, 32(R14)
+	ADDQ    $0x40, R14
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_10x2_64_loop
+	VZEROUPPER
+
+mulAvxTwo_10x2_64_end:
+	RET
+
+// func mulGFNI_10x2_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x2_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 24 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x2_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), R12
+	MOVQ            216(CX), CX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            (R13), R14
+	MOVQ            24(R13), R13
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+	ADDQ R15, R13
+
+	// Add start offset to input
+	ADDQ R15, DX
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, CX
+
+mulGFNI_10x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z22
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z22, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z22, Z21
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z22
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z3, Z22, Z23
+	VXORPD         Z21, Z23, Z21
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z22
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z5, Z22, Z23
+	VXORPD         Z21, Z23, Z21
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z22
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z7, Z22, Z23
+	VXORPD         Z21, Z23, Z21
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z22
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z9, Z22, Z23
+	VXORPD         Z21, Z23, Z21
+
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (R9), Z22
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z10, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z22, Z23
+	VXORPD         Z21, Z23, Z21
+
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU64      (R10), Z22
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z12, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z13, Z22, Z23
+	VXORPD         Z21, Z23, Z21
+
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU64      (R11), Z22
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z14, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z15, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Store 1 outputs
-	VMOVDQU Y0, (R13)
-	ADDQ    $0x20, R13
+	// Load and process 64 bytes from input 8 to 2 outputs
+	VMOVDQU64      (R12), Z22
+	ADDQ           $0x40, R12
+	VGF2P8AFFINEQB $0x00, Z16, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z17, Z22, Z23
+	VXORPD         Z21, Z23, Z21
+
+	// Load and process 64 bytes from input 9 to 2 outputs
+	VMOVDQU64      (CX), Z22
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z18, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z19, Z22, Z23
+	VXORPD         Z21, Z23, Z21
+
+	// Store 2 outputs
+	VMOVDQU64 Z20, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z21, (R13)
+	ADDQ      $0x40, R13
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_9x1_loop
+	JNZ  mulGFNI_10x2_64_loop
 	VZEROUPPER
 
-mulAvxTwo_9x1_end:
+mulGFNI_10x2_64_end:
 	RET
 
-// func mulAvxTwo_9x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x1_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 22 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_9x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), R11
-	MOVQ  192(AX), AX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  out_base+48(FP), R12
-	MOVQ  start+72(FP), R13
+// func mulGFNI_10x2_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x2_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 24 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x2_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), CX
+	MOVQ            (CX), DX
+	MOVQ            24(CX), BX
+	MOVQ            48(CX), SI
+	MOVQ            72(CX), DI
+	MOVQ            96(CX), R8
+	MOVQ            120(CX), R9
+	MOVQ            144(CX), R10
+	MOVQ            168(CX), R11
+	MOVQ            192(CX), R12
+	MOVQ            216(CX), CX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            (R13), R14
+	MOVQ            24(R13), R13
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R14
+	ADDQ R15, R13
 
 	// Add start offset to input
-	ADDQ         R13, DX
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, R11
-	ADDQ         R13, AX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R14
-	SHRQ         $0x06, R14
+	ADDQ R15, DX
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, CX
 
-mulAvxTwo_9x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+mulGFNI_10x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU64 (R14), Z20
+	VMOVDQU64 (R13), Z21
 
-	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU64      (DX), Z22
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU64      (BX), Z22
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z2, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z3, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y6
-	VMOVDQU 32(SI), Y5
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU64      (SI), Z22
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z5, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y6
-	VMOVDQU 32(DI), Y5
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU64      (DI), Z22
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z6, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z7, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Load and process 64 bytes from input 4 to 1 outputs
-	VMOVDQU (R8), Y6
-	VMOVDQU 32(R8), Y5
-	ADDQ    $0x40, R8
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU64      (R8), Z22
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z8, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z9, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Load and process 64 bytes from input 5 to 1 outputs
-	VMOVDQU (R9), Y6
-	VMOVDQU 32(R9), Y5
-	ADDQ    $0x40, R9
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 64 bytes from input 5 to 2 outputs
+	VMOVDQU64      (R9), Z22
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z10, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Load and process 64 bytes from input 6 to 1 outputs
-	VMOVDQU (R10), Y6
-	VMOVDQU 32(R10), Y5
-	ADDQ    $0x40, R10
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 384(CX), Y3
-	VMOVDQU 416(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 64 bytes from input 6 to 2 outputs
+	VMOVDQU64      (R10), Z22
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z12, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z13, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Load and process 64 bytes from input 7 to 1 outputs
-	VMOVDQU (R11), Y6
-	VMOVDQU 32(R11), Y5
-	ADDQ    $0x40, R11
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 448(CX), Y3
-	VMOVDQU 480(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 64 bytes from input 7 to 2 outputs
+	VMOVDQU64      (R11), Z22
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z14, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z15, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Load and process 64 bytes from input 8 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 512(CX), Y3
-	VMOVDQU 544(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 64 bytes from input 8 to 2 outputs
+	VMOVDQU64      (R12), Z22
+	ADDQ           $0x40, R12
+	VGF2P8AFFINEQB $0x00, Z16, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z17, Z22, Z23
+	VXORPD         Z21, Z23, Z21
 
-	// Store 1 outputs
-	MOVQ    (R12), R15
-	VMOVDQU Y0, (R15)(R13*1)
-	VMOVDQU Y1, 32(R15)(R13*1)
+	// Load and process 64 bytes from input 9 to 2 outputs
+	VMOVDQU64      (CX), Z22
+	ADDQ           $0x40, CX
+	VGF2P8AFFINEQB $0x00, Z18, Z22, Z23
+	VXORPD         Z20, Z23, Z20
+	VGF2P8AFFINEQB $0x00, Z19, Z22, Z23
+	VXORPD         Z21, Z23, Z21
+
+	// Store 2 outputs
+	VMOVDQU64 Z20, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z21, (R13)
+	ADDQ      $0x40, R13
 
 	// Prepare for next loop
-	ADDQ $0x40, R13
-	DECQ R14
-	JNZ  mulAvxTwo_9x1_64_loop
+	DECQ AX
+	JNZ  mulGFNI_10x2_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_9x1_64_end:
+mulGFNI_10x2_64Xor_end:
 	RET
 
-// func mulAvxTwo_9x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x2(SB), NOSPLIT, $0-88
+// func mulAvxTwo_10x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x2Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 43 YMM used
+	// Full registers estimated 47 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x2_end
+	JZ    mulAvxTwo_10x2Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -25000,53 +82593,51 @@ TEXT Β·mulAvxTwo_9x2(SB), NOSPLIT, $0-88
 	MOVQ  120(DX), R10
 	MOVQ  144(DX), R11
 	MOVQ  168(DX), R12
-	MOVQ  192(DX), DX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  (R13), R14
-	MOVQ  24(R13), R13
-	MOVQ  start+72(FP), R15
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  (R14), R15
+	MOVQ  24(R14), R14
+	MOVQ  start+72(FP), BP
 
 	// Add start offset to output
-	ADDQ R15, R14
-	ADDQ R15, R13
+	ADDQ BP, R15
+	ADDQ BP, R14
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, SI
-	ADDQ         R15, DI
-	ADDQ         R15, R8
-	ADDQ         R15, R9
-	ADDQ         R15, R10
-	ADDQ         R15, R11
-	ADDQ         R15, R12
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X2
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, R13
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X2
 	VPBROADCASTB X2, Y2
 
-mulAvxTwo_9x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
+mulAvxTwo_10x2Xor_loop:
 	// Load and process 32 bytes from input 0 to 2 outputs
 	VMOVDQU (BX), Y5
 	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y5, Y6
 	VPAND   Y2, Y5, Y5
 	VPAND   Y2, Y6, Y6
+	VMOVDQU (R15), Y0
 	VMOVDQU (CX), Y3
 	VMOVDQU 32(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU (R14), Y1
 	VMOVDQU 64(CX), Y3
 	VMOVDQU 96(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 1 to 2 outputs
 	VMOVDQU (SI), Y5
@@ -25058,14 +82649,12 @@ mulAvxTwo_9x2_loop:
 	VMOVDQU 160(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 192(CX), Y3
 	VMOVDQU 224(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 2 to 2 outputs
 	VMOVDQU (DI), Y5
@@ -25077,14 +82666,12 @@ mulAvxTwo_9x2_loop:
 	VMOVDQU 288(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 320(CX), Y3
 	VMOVDQU 352(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 3 to 2 outputs
 	VMOVDQU (R8), Y5
@@ -25096,14 +82683,12 @@ mulAvxTwo_9x2_loop:
 	VMOVDQU 416(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 448(CX), Y3
 	VMOVDQU 480(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 4 to 2 outputs
 	VMOVDQU (R9), Y5
@@ -25115,14 +82700,12 @@ mulAvxTwo_9x2_loop:
 	VMOVDQU 544(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 576(CX), Y3
 	VMOVDQU 608(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 5 to 2 outputs
 	VMOVDQU (R10), Y5
@@ -25134,14 +82717,12 @@ mulAvxTwo_9x2_loop:
 	VMOVDQU 672(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 704(CX), Y3
 	VMOVDQU 736(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 6 to 2 outputs
 	VMOVDQU (R11), Y5
@@ -25153,14 +82734,12 @@ mulAvxTwo_9x2_loop:
 	VMOVDQU 800(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 832(CX), Y3
 	VMOVDQU 864(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 7 to 2 outputs
 	VMOVDQU (R12), Y5
@@ -25172,18 +82751,16 @@ mulAvxTwo_9x2_loop:
 	VMOVDQU 928(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 960(CX), Y3
 	VMOVDQU 992(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Load and process 32 bytes from input 8 to 2 outputs
-	VMOVDQU (DX), Y5
-	ADDQ    $0x20, DX
+	VMOVDQU (R13), Y5
+	ADDQ    $0x20, R13
 	VPSRLQ  $0x04, Y5, Y6
 	VPAND   Y2, Y5, Y5
 	VPAND   Y2, Y6, Y6
@@ -25191,81 +82768,102 @@ mulAvxTwo_9x2_loop:
 	VMOVDQU 1056(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	XOR3WAY( $0x00, Y3, Y4, Y0)
 	VMOVDQU 1088(CX), Y3
 	VMOVDQU 1120(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	XOR3WAY( $0x00, Y3, Y4, Y1)
+
+	// Load and process 32 bytes from input 9 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 1152(CX), Y3
+	VMOVDQU 1184(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y0)
+	VMOVDQU 1216(CX), Y3
+	VMOVDQU 1248(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	XOR3WAY( $0x00, Y3, Y4, Y1)
 
 	// Store 2 outputs
-	VMOVDQU Y0, (R14)
+	VMOVDQU Y0, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y1, (R14)
 	ADDQ    $0x20, R14
-	VMOVDQU Y1, (R13)
-	ADDQ    $0x20, R13
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_9x2_loop
+	JNZ  mulAvxTwo_10x2Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_9x2_end:
+mulAvxTwo_10x2Xor_end:
 	RET
 
-// func mulAvxTwo_9x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x2_64(SB), $0-88
+// func mulAvxTwo_10x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x2_64Xor(SB), $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 43 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 89 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), R11
-	MOVQ  192(AX), AX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  out_base+48(FP), R12
-	MOVQ  start+72(FP), R13
+	JZ    mulAvxTwo_10x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  out_base+48(FP), R14
+	MOVQ  (R14), R15
+	MOVQ  24(R14), R14
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R15
+	ADDQ BP, R14
 
 	// Add start offset to input
-	ADDQ         R13, DX
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, R11
-	ADDQ         R13, AX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X4
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, R13
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R14
-	SHRQ         $0x06, R14
 
-mulAvxTwo_9x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+mulAvxTwo_10x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R15), Y0
+	VMOVDQU 32(R15), Y1
+	VMOVDQU (R14), Y2
+	VMOVDQU 32(R14), Y3
 
 	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -25278,25 +82876,21 @@ mulAvxTwo_9x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -25309,25 +82903,21 @@ mulAvxTwo_9x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -25340,25 +82930,21 @@ mulAvxTwo_9x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 320(CX), Y5
 	VMOVDQU 352(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 3 to 2 outputs
-	VMOVDQU (DI), Y9
-	VMOVDQU 32(DI), Y11
-	ADDQ    $0x40, DI
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -25371,25 +82957,21 @@ mulAvxTwo_9x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 448(CX), Y5
 	VMOVDQU 480(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 4 to 2 outputs
-	VMOVDQU (R8), Y9
-	VMOVDQU 32(R8), Y11
-	ADDQ    $0x40, R8
+	VMOVDQU (R9), Y9
+	VMOVDQU 32(R9), Y11
+	ADDQ    $0x40, R9
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -25402,25 +82984,21 @@ mulAvxTwo_9x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 576(CX), Y5
 	VMOVDQU 608(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 5 to 2 outputs
-	VMOVDQU (R9), Y9
-	VMOVDQU 32(R9), Y11
-	ADDQ    $0x40, R9
+	VMOVDQU (R10), Y9
+	VMOVDQU 32(R10), Y11
+	ADDQ    $0x40, R10
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -25433,25 +83011,21 @@ mulAvxTwo_9x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 704(CX), Y5
 	VMOVDQU 736(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 6 to 2 outputs
-	VMOVDQU (R10), Y9
-	VMOVDQU 32(R10), Y11
-	ADDQ    $0x40, R10
+	VMOVDQU (R11), Y9
+	VMOVDQU 32(R11), Y11
+	ADDQ    $0x40, R11
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -25464,25 +83038,21 @@ mulAvxTwo_9x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 832(CX), Y5
 	VMOVDQU 864(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 7 to 2 outputs
-	VMOVDQU (R11), Y9
-	VMOVDQU 32(R11), Y11
-	ADDQ    $0x40, R11
+	VMOVDQU (R12), Y9
+	VMOVDQU 32(R12), Y11
+	ADDQ    $0x40, R12
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -25495,25 +83065,21 @@ mulAvxTwo_9x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 960(CX), Y5
 	VMOVDQU 992(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Load and process 64 bytes from input 8 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (R13), Y9
+	VMOVDQU 32(R13), Y11
+	ADDQ    $0x40, R13
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -25526,59 +83092,82 @@ mulAvxTwo_9x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
 	VMOVDQU 1088(CX), Y5
 	VMOVDQU 1120(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// Load and process 64 bytes from input 9 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	XOR3WAY( $0x00, Y7, Y8, Y3)
 
 	// Store 2 outputs
-	MOVQ    (R12), R15
-	VMOVDQU Y0, (R15)(R13*1)
-	VMOVDQU Y1, 32(R15)(R13*1)
-	MOVQ    24(R12), R15
-	VMOVDQU Y2, (R15)(R13*1)
-	VMOVDQU Y3, 32(R15)(R13*1)
+	VMOVDQU Y0, (R15)
+	VMOVDQU Y1, 32(R15)
+	ADDQ    $0x40, R15
+	VMOVDQU Y2, (R14)
+	VMOVDQU Y3, 32(R14)
+	ADDQ    $0x40, R14
 
 	// Prepare for next loop
-	ADDQ $0x40, R13
-	DECQ R14
-	JNZ  mulAvxTwo_9x2_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_10x2_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_9x2_64_end:
+mulAvxTwo_10x2_64Xor_end:
 	RET
 
-// func mulAvxTwo_9x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x3(SB), NOSPLIT, $8-88
+// func mulAvxTwo_10x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x3(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 62 YMM used
+	// Full registers estimated 68 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x3_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), R12
-	MOVQ  192(DX), DX
+	JZ    mulAvxTwo_10x3_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), R8
+	MOVQ  120(AX), R9
+	MOVQ  144(AX), R10
+	MOVQ  168(AX), R11
+	MOVQ  192(AX), R12
+	MOVQ  216(AX), AX
 	MOVQ  out_base+48(FP), R13
 	MOVQ  (R13), R14
 	MOVQ  24(R13), R15
@@ -25591,6 +83180,7 @@ TEXT Β·mulAvxTwo_9x3(SB), NOSPLIT, $8-88
 	ADDQ BP, R13
 
 	// Add start offset to input
+	ADDQ         BP, DX
 	ADDQ         BP, BX
 	ADDQ         BP, SI
 	ADDQ         BP, DI
@@ -25599,20 +83189,17 @@ TEXT Β·mulAvxTwo_9x3(SB), NOSPLIT, $8-88
 	ADDQ         BP, R10
 	ADDQ         BP, R11
 	ADDQ         BP, R12
-	ADDQ         BP, DX
+	ADDQ         BP, AX
 	MOVQ         $0x0000000f, BP
 	MOVQ         BP, X3
 	VPBROADCASTB X3, Y3
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
 
-mulAvxTwo_9x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-
+mulAvxTwo_10x3_loop:
 	// Load and process 32 bytes from input 0 to 3 outputs
-	VMOVDQU (BX), Y6
-	ADDQ    $0x20, BX
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -25620,24 +83207,21 @@ mulAvxTwo_9x3_loop:
 	VMOVDQU 32(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	VPXOR   Y4, Y5, Y0
 	VMOVDQU 64(CX), Y4
 	VMOVDQU 96(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	VPXOR   Y4, Y5, Y1
 	VMOVDQU 128(CX), Y4
 	VMOVDQU 160(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y4, Y5, Y2
 
 	// Load and process 32 bytes from input 1 to 3 outputs
-	VMOVDQU (SI), Y6
-	ADDQ    $0x20, SI
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -25645,24 +83229,21 @@ mulAvxTwo_9x3_loop:
 	VMOVDQU 224(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 256(CX), Y4
 	VMOVDQU 288(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 320(CX), Y4
 	VMOVDQU 352(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 2 to 3 outputs
-	VMOVDQU (DI), Y6
-	ADDQ    $0x20, DI
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -25670,24 +83251,21 @@ mulAvxTwo_9x3_loop:
 	VMOVDQU 416(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 448(CX), Y4
 	VMOVDQU 480(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 512(CX), Y4
 	VMOVDQU 544(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 3 to 3 outputs
-	VMOVDQU (R8), Y6
-	ADDQ    $0x20, R8
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -25695,24 +83273,21 @@ mulAvxTwo_9x3_loop:
 	VMOVDQU 608(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 640(CX), Y4
 	VMOVDQU 672(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 704(CX), Y4
 	VMOVDQU 736(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 4 to 3 outputs
-	VMOVDQU (R9), Y6
-	ADDQ    $0x20, R9
+	VMOVDQU (R8), Y6
+	ADDQ    $0x20, R8
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -25720,24 +83295,21 @@ mulAvxTwo_9x3_loop:
 	VMOVDQU 800(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 832(CX), Y4
 	VMOVDQU 864(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 896(CX), Y4
 	VMOVDQU 928(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 5 to 3 outputs
-	VMOVDQU (R10), Y6
-	ADDQ    $0x20, R10
+	VMOVDQU (R9), Y6
+	ADDQ    $0x20, R9
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -25745,24 +83317,21 @@ mulAvxTwo_9x3_loop:
 	VMOVDQU 992(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 1024(CX), Y4
 	VMOVDQU 1056(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 1088(CX), Y4
 	VMOVDQU 1120(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 6 to 3 outputs
-	VMOVDQU (R11), Y6
-	ADDQ    $0x20, R11
+	VMOVDQU (R10), Y6
+	ADDQ    $0x20, R10
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -25770,24 +83339,21 @@ mulAvxTwo_9x3_loop:
 	VMOVDQU 1184(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 1216(CX), Y4
 	VMOVDQU 1248(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 1280(CX), Y4
 	VMOVDQU 1312(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 7 to 3 outputs
-	VMOVDQU (R12), Y6
-	ADDQ    $0x20, R12
+	VMOVDQU (R11), Y6
+	ADDQ    $0x20, R11
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -25795,24 +83361,21 @@ mulAvxTwo_9x3_loop:
 	VMOVDQU 1376(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 1408(CX), Y4
 	VMOVDQU 1440(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 1472(CX), Y4
 	VMOVDQU 1504(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Load and process 32 bytes from input 8 to 3 outputs
-	VMOVDQU (DX), Y6
-	ADDQ    $0x20, DX
+	VMOVDQU (R12), Y6
+	ADDQ    $0x20, R12
 	VPSRLQ  $0x04, Y6, Y7
 	VPAND   Y3, Y6, Y6
 	VPAND   Y3, Y7, Y7
@@ -25820,20 +83383,39 @@ mulAvxTwo_9x3_loop:
 	VMOVDQU 1568(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	XOR3WAY( $0x00, Y4, Y5, Y0)
 	VMOVDQU 1600(CX), Y4
 	VMOVDQU 1632(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	XOR3WAY( $0x00, Y4, Y5, Y1)
 	VMOVDQU 1664(CX), Y4
 	VMOVDQU 1696(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	XOR3WAY( $0x00, Y4, Y5, Y2)
+
+	// Load and process 32 bytes from input 9 to 3 outputs
+	VMOVDQU (AX), Y6
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1728(CX), Y4
+	VMOVDQU 1760(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1792(CX), Y4
+	VMOVDQU 1824(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1856(CX), Y4
+	VMOVDQU 1888(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
 	// Store 3 outputs
 	VMOVDQU Y0, (R14)
@@ -25844,24 +83426,24 @@ mulAvxTwo_9x3_loop:
 	ADDQ    $0x20, R13
 
 	// Prepare for next loop
-	DECQ AX
-	JNZ  mulAvxTwo_9x3_loop
+	DECQ BP
+	JNZ  mulAvxTwo_10x3_loop
 	VZEROUPPER
 
-mulAvxTwo_9x3_end:
+mulAvxTwo_10x3_end:
 	RET
 
-// func mulAvxTwo_9x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x3_64(SB), $0-88
+// func mulAvxTwo_10x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x3_64(SB), $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 62 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 130 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x3_64_end
+	JZ    mulAvxTwo_10x3_64_end
 	MOVQ  in_base+24(FP), AX
 	MOVQ  (AX), DX
 	MOVQ  24(AX), BX
@@ -25871,36 +83453,40 @@ TEXT Β·mulAvxTwo_9x3_64(SB), $0-88
 	MOVQ  120(AX), R9
 	MOVQ  144(AX), R10
 	MOVQ  168(AX), R11
-	MOVQ  192(AX), AX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  out_base+48(FP), R12
-	MOVQ  start+72(FP), R13
+	MOVQ  192(AX), R12
+	MOVQ  216(AX), AX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R15
+	MOVQ  48(R13), R13
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R13
 
 	// Add start offset to input
-	ADDQ         R13, DX
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, R9
-	ADDQ         R13, R10
-	ADDQ         R13, R11
-	ADDQ         R13, AX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X6
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R14
-	SHRQ         $0x06, R14
 
-mulAvxTwo_9x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
 
+mulAvxTwo_10x3_64_loop:
 	// Load and process 64 bytes from input 0 to 3 outputs
 	VMOVDQU (DX), Y11
 	VMOVDQU 32(DX), Y13
@@ -25917,30 +83503,24 @@ mulAvxTwo_9x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 3 outputs
 	VMOVDQU (BX), Y11
@@ -25958,30 +83538,24 @@ mulAvxTwo_9x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 2 to 3 outputs
 	VMOVDQU (SI), Y11
@@ -25999,30 +83573,24 @@ mulAvxTwo_9x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 3 to 3 outputs
 	VMOVDQU (DI), Y11
@@ -26040,30 +83608,24 @@ mulAvxTwo_9x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 640(CX), Y7
 	VMOVDQU 672(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 704(CX), Y7
 	VMOVDQU 736(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 4 to 3 outputs
 	VMOVDQU (R8), Y11
@@ -26081,30 +83643,24 @@ mulAvxTwo_9x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 832(CX), Y7
 	VMOVDQU 864(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 896(CX), Y7
 	VMOVDQU 928(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 5 to 3 outputs
 	VMOVDQU (R9), Y11
@@ -26122,30 +83678,24 @@ mulAvxTwo_9x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1024(CX), Y7
 	VMOVDQU 1056(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1088(CX), Y7
 	VMOVDQU 1120(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 6 to 3 outputs
 	VMOVDQU (R10), Y11
@@ -26163,30 +83713,24 @@ mulAvxTwo_9x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1216(CX), Y7
 	VMOVDQU 1248(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1280(CX), Y7
 	VMOVDQU 1312(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 7 to 3 outputs
 	VMOVDQU (R11), Y11
@@ -26204,35 +83748,29 @@ mulAvxTwo_9x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1408(CX), Y7
 	VMOVDQU 1440(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1472(CX), Y7
 	VMOVDQU 1504(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Load and process 64 bytes from input 8 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (R12), Y11
+	VMOVDQU 32(R12), Y13
+	ADDQ    $0x40, R12
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -26245,62 +83783,480 @@ mulAvxTwo_9x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1600(CX), Y7
 	VMOVDQU 1632(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1664(CX), Y7
 	VMOVDQU 1696(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 9 to 3 outputs
+	VMOVDQU (AX), Y11
+	VMOVDQU 32(AX), Y13
+	ADDQ    $0x40, AX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
 	// Store 3 outputs
-	MOVQ    (R12), R15
-	VMOVDQU Y0, (R15)(R13*1)
-	VMOVDQU Y1, 32(R15)(R13*1)
-	MOVQ    24(R12), R15
-	VMOVDQU Y2, (R15)(R13*1)
-	VMOVDQU Y3, 32(R15)(R13*1)
-	MOVQ    48(R12), R15
-	VMOVDQU Y4, (R15)(R13*1)
-	VMOVDQU Y5, 32(R15)(R13*1)
+	VMOVDQU Y0, (R14)
+	VMOVDQU Y1, 32(R14)
+	ADDQ    $0x40, R14
+	VMOVDQU Y2, (R15)
+	VMOVDQU Y3, 32(R15)
+	ADDQ    $0x40, R15
+	VMOVDQU Y4, (R13)
+	VMOVDQU Y5, 32(R13)
+	ADDQ    $0x40, R13
 
 	// Prepare for next loop
-	ADDQ $0x40, R13
-	DECQ R14
-	JNZ  mulAvxTwo_9x3_64_loop
+	DECQ BP
+	JNZ  mulAvxTwo_10x3_64_loop
 	VZEROUPPER
 
-mulAvxTwo_9x3_64_end:
+mulAvxTwo_10x3_64_end:
 	RET
 
-// func mulAvxTwo_9x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x4(SB), NOSPLIT, $8-88
+// func mulGFNI_10x3_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x3_64(SB), $8-88
+	// Loading 27 of 30 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x3_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	VBROADCASTF32X2 208(CX), Z26
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), R9
+	MOVQ            144(AX), R10
+	MOVQ            168(AX), R11
+	MOVQ            192(AX), R12
+	MOVQ            216(AX), AX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            (R13), R14
+	MOVQ            24(R13), R15
+	MOVQ            48(R13), R13
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R13
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_10x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU64      (R11), Z30
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 3 outputs
+	VMOVDQU64      (R12), Z30
+	ADDQ           $0x40, R12
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z25, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z26, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 3 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 3 outputs
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R13)
+	ADDQ      $0x40, R13
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_10x3_64_loop
+	VZEROUPPER
+
+mulGFNI_10x3_64_end:
+	RET
+
+// func mulGFNI_10x3_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x3_64Xor(SB), $8-88
+	// Loading 27 of 30 tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x3_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	VBROADCASTF32X2 208(CX), Z26
+	MOVQ            in_base+24(FP), AX
+	MOVQ            (AX), DX
+	MOVQ            24(AX), BX
+	MOVQ            48(AX), SI
+	MOVQ            72(AX), DI
+	MOVQ            96(AX), R8
+	MOVQ            120(AX), R9
+	MOVQ            144(AX), R10
+	MOVQ            168(AX), R11
+	MOVQ            192(AX), R12
+	MOVQ            216(AX), AX
+	MOVQ            out_base+48(FP), R13
+	MOVQ            out_base+48(FP), R13
+	MOVQ            (R13), R14
+	MOVQ            24(R13), R15
+	MOVQ            48(R13), R13
+	MOVQ            start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R13
+
+	// Add start offset to input
+	ADDQ BP, DX
+	ADDQ BP, BX
+	ADDQ BP, SI
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, AX
+
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
+
+mulGFNI_10x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU64 (R14), Z27
+	VMOVDQU64 (R15), Z28
+	VMOVDQU64 (R13), Z29
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU64      (DX), Z30
+	ADDQ           $0x40, DX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU64      (R11), Z30
+	ADDQ           $0x40, R11
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 3 outputs
+	VMOVDQU64      (R12), Z30
+	ADDQ           $0x40, R12
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z25, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z26, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 3 outputs
+	VMOVDQU64           (AX), Z30
+	ADDQ                $0x40, AX
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 3 outputs
+	VMOVDQU64 Z27, (R14)
+	ADDQ      $0x40, R14
+	VMOVDQU64 Z28, (R15)
+	ADDQ      $0x40, R15
+	VMOVDQU64 Z29, (R13)
+	ADDQ      $0x40, R13
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulGFNI_10x3_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_10x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_10x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x3Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 81 YMM used
+	// Full registers estimated 68 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x4_end
+	JZ    mulAvxTwo_10x3Xor_end
 	MOVQ  in_base+24(FP), AX
 	MOVQ  (AX), DX
 	MOVQ  24(AX), BX
@@ -26310,19 +84266,18 @@ TEXT Β·mulAvxTwo_9x4(SB), NOSPLIT, $8-88
 	MOVQ  120(AX), R9
 	MOVQ  144(AX), R10
 	MOVQ  168(AX), R11
-	MOVQ  192(AX), AX
-	MOVQ  out_base+48(FP), R12
-	MOVQ  (R12), R13
-	MOVQ  24(R12), R14
-	MOVQ  48(R12), R15
-	MOVQ  72(R12), R12
+	MOVQ  192(AX), R12
+	MOVQ  216(AX), AX
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R15
+	MOVQ  48(R13), R13
 	MOVQ  start+72(FP), BP
 
 	// Add start offset to output
-	ADDQ BP, R13
 	ADDQ BP, R14
 	ADDQ BP, R15
-	ADDQ BP, R12
+	ADDQ BP, R13
 
 	// Add start offset to input
 	ADDQ         BP, DX
@@ -26333,1699 +84288,1440 @@ TEXT Β·mulAvxTwo_9x4(SB), NOSPLIT, $8-88
 	ADDQ         BP, R9
 	ADDQ         BP, R10
 	ADDQ         BP, R11
+	ADDQ         BP, R12
 	ADDQ         BP, AX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X4
-	VPBROADCASTB X4, Y4
+	MOVQ         BP, X3
+	VPBROADCASTB X3, Y3
 	MOVQ         n+80(FP), BP
 	SHRQ         $0x05, BP
 
-mulAvxTwo_9x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (DX), Y7
+mulAvxTwo_10x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (DX), Y6
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 1 to 4 outputs
-	VMOVDQU (BX), Y7
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 2 to 4 outputs
-	VMOVDQU (SI), Y7
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 3 to 4 outputs
-	VMOVDQU (DI), Y7
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 768(CX), Y5
-	VMOVDQU 800(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 832(CX), Y5
-	VMOVDQU 864(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 896(CX), Y5
-	VMOVDQU 928(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 960(CX), Y5
-	VMOVDQU 992(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 4 to 4 outputs
-	VMOVDQU (R8), Y7
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1024(CX), Y5
-	VMOVDQU 1056(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1088(CX), Y5
-	VMOVDQU 1120(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1152(CX), Y5
-	VMOVDQU 1184(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1216(CX), Y5
-	VMOVDQU 1248(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 5 to 4 outputs
-	VMOVDQU (R9), Y7
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1280(CX), Y5
-	VMOVDQU 1312(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1344(CX), Y5
-	VMOVDQU 1376(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1408(CX), Y5
-	VMOVDQU 1440(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1472(CX), Y5
-	VMOVDQU 1504(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 6 to 4 outputs
-	VMOVDQU (R10), Y7
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1536(CX), Y5
-	VMOVDQU 1568(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1600(CX), Y5
-	VMOVDQU 1632(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1664(CX), Y5
-	VMOVDQU 1696(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1728(CX), Y5
-	VMOVDQU 1760(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 7 to 4 outputs
-	VMOVDQU (R11), Y7
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1792(CX), Y5
-	VMOVDQU 1824(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1856(CX), Y5
-	VMOVDQU 1888(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1920(CX), Y5
-	VMOVDQU 1952(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1984(CX), Y5
-	VMOVDQU 2016(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Load and process 32 bytes from input 8 to 4 outputs
-	VMOVDQU (AX), Y7
-	ADDQ    $0x20, AX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 2048(CX), Y5
-	VMOVDQU 2080(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 2112(CX), Y5
-	VMOVDQU 2144(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 2176(CX), Y5
-	VMOVDQU 2208(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R14), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 2240(CX), Y5
-	VMOVDQU 2272(CX), Y6
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU (R15), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
-
-	// Store 4 outputs
-	VMOVDQU Y0, (R13)
-	ADDQ    $0x20, R13
-	VMOVDQU Y1, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y2, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y3, (R12)
-	ADDQ    $0x20, R12
-
-	// Prepare for next loop
-	DECQ BP
-	JNZ  mulAvxTwo_9x4_loop
-	VZEROUPPER
-
-mulAvxTwo_9x4_end:
-	RET
-
-// func mulAvxTwo_9x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x5(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 100 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_9x5_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), R12
-	MOVQ  192(DX), DX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  start+72(FP), R14
-
-	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X5
-	VPBROADCASTB X5, Y5
-
-mulAvxTwo_9x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU (R13), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 0 to 5 outputs
-	VMOVDQU (BX), Y8
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU (CX), Y6
-	VMOVDQU 32(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 64(CX), Y6
-	VMOVDQU 96(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 128(CX), Y6
-	VMOVDQU 160(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 256(CX), Y6
-	VMOVDQU 288(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 1 to 5 outputs
-	VMOVDQU (SI), Y8
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (SI), Y6
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 320(CX), Y6
-	VMOVDQU 352(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 384(CX), Y6
-	VMOVDQU 416(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 448(CX), Y6
-	VMOVDQU 480(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 512(CX), Y6
-	VMOVDQU 544(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 576(CX), Y6
-	VMOVDQU 608(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 2 to 5 outputs
-	VMOVDQU (DI), Y8
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (DI), Y6
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 640(CX), Y6
-	VMOVDQU 672(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 704(CX), Y6
-	VMOVDQU 736(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 768(CX), Y6
-	VMOVDQU 800(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 832(CX), Y6
-	VMOVDQU 864(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 896(CX), Y6
-	VMOVDQU 928(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 3 to 5 outputs
-	VMOVDQU (R8), Y8
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (R8), Y6
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 960(CX), Y6
-	VMOVDQU 992(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1024(CX), Y6
-	VMOVDQU 1056(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1088(CX), Y6
-	VMOVDQU 1120(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1152(CX), Y6
-	VMOVDQU 1184(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1216(CX), Y6
-	VMOVDQU 1248(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 4 to 5 outputs
-	VMOVDQU (R9), Y8
+	// Load and process 32 bytes from input 5 to 3 outputs
+	VMOVDQU (R9), Y6
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1280(CX), Y6
-	VMOVDQU 1312(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1344(CX), Y6
-	VMOVDQU 1376(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1408(CX), Y6
-	VMOVDQU 1440(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1472(CX), Y6
-	VMOVDQU 1504(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1536(CX), Y6
-	VMOVDQU 1568(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 960(CX), Y4
+	VMOVDQU 992(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1024(CX), Y4
+	VMOVDQU 1056(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1088(CX), Y4
+	VMOVDQU 1120(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 5 to 5 outputs
-	VMOVDQU (R10), Y8
+	// Load and process 32 bytes from input 6 to 3 outputs
+	VMOVDQU (R10), Y6
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1600(CX), Y6
-	VMOVDQU 1632(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1664(CX), Y6
-	VMOVDQU 1696(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1728(CX), Y6
-	VMOVDQU 1760(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1792(CX), Y6
-	VMOVDQU 1824(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1856(CX), Y6
-	VMOVDQU 1888(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1152(CX), Y4
+	VMOVDQU 1184(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1216(CX), Y4
+	VMOVDQU 1248(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1280(CX), Y4
+	VMOVDQU 1312(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 6 to 5 outputs
-	VMOVDQU (R11), Y8
+	// Load and process 32 bytes from input 7 to 3 outputs
+	VMOVDQU (R11), Y6
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1920(CX), Y6
-	VMOVDQU 1952(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1984(CX), Y6
-	VMOVDQU 2016(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 2048(CX), Y6
-	VMOVDQU 2080(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 2112(CX), Y6
-	VMOVDQU 2144(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 2176(CX), Y6
-	VMOVDQU 2208(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1344(CX), Y4
+	VMOVDQU 1376(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1408(CX), Y4
+	VMOVDQU 1440(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1472(CX), Y4
+	VMOVDQU 1504(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 7 to 5 outputs
-	VMOVDQU (R12), Y8
+	// Load and process 32 bytes from input 8 to 3 outputs
+	VMOVDQU (R12), Y6
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 2240(CX), Y6
-	VMOVDQU 2272(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 2304(CX), Y6
-	VMOVDQU 2336(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 2368(CX), Y6
-	VMOVDQU 2400(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 2432(CX), Y6
-	VMOVDQU 2464(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 2496(CX), Y6
-	VMOVDQU 2528(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1536(CX), Y4
+	VMOVDQU 1568(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1600(CX), Y4
+	VMOVDQU 1632(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1664(CX), Y4
+	VMOVDQU 1696(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Load and process 32 bytes from input 8 to 5 outputs
-	VMOVDQU (DX), Y8
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 2560(CX), Y6
-	VMOVDQU 2592(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 2624(CX), Y6
-	VMOVDQU 2656(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 2688(CX), Y6
-	VMOVDQU 2720(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 2752(CX), Y6
-	VMOVDQU 2784(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 2816(CX), Y6
-	VMOVDQU 2848(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	// Load and process 32 bytes from input 9 to 3 outputs
+	VMOVDQU (AX), Y6
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 1728(CX), Y4
+	VMOVDQU 1760(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y0)
+	VMOVDQU 1792(CX), Y4
+	VMOVDQU 1824(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y1)
+	VMOVDQU 1856(CX), Y4
+	VMOVDQU 1888(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	XOR3WAY( $0x00, Y4, Y5, Y2)
 
-	// Store 5 outputs
-	MOVQ    (R13), R15
-	VMOVDQU Y0, (R15)(R14*1)
-	MOVQ    24(R13), R15
-	VMOVDQU Y1, (R15)(R14*1)
-	MOVQ    48(R13), R15
-	VMOVDQU Y2, (R15)(R14*1)
-	MOVQ    72(R13), R15
-	VMOVDQU Y3, (R15)(R14*1)
-	MOVQ    96(R13), R15
-	VMOVDQU Y4, (R15)(R14*1)
+	// Store 3 outputs
+	VMOVDQU Y0, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y1, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y2, (R13)
+	ADDQ    $0x20, R13
 
 	// Prepare for next loop
-	ADDQ $0x20, R14
-	DECQ AX
-	JNZ  mulAvxTwo_9x5_loop
+	DECQ BP
+	JNZ  mulAvxTwo_10x3Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_9x5_end:
+mulAvxTwo_10x3Xor_end:
 	RET
 
-// func mulAvxTwo_9x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x6(SB), NOSPLIT, $0-88
+// func mulAvxTwo_10x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x3_64Xor(SB), $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 119 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 130 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x6_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), R12
-	MOVQ  192(DX), DX
+	JZ    mulAvxTwo_10x3_64Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), R8
+	MOVQ  120(AX), R9
+	MOVQ  144(AX), R10
+	MOVQ  168(AX), R11
+	MOVQ  192(AX), R12
+	MOVQ  216(AX), AX
 	MOVQ  out_base+48(FP), R13
-	MOVQ  start+72(FP), R14
+	MOVQ  out_base+48(FP), R13
+	MOVQ  (R13), R14
+	MOVQ  24(R13), R15
+	MOVQ  48(R13), R13
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R13
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X6
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, R9
+	ADDQ         BP, R10
+	ADDQ         BP, R11
+	ADDQ         BP, R12
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
 	VPBROADCASTB X6, Y6
 
-mulAvxTwo_9x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	// Reload length to save a register
+	MOVQ n+80(FP), BP
+	SHRQ $0x06, BP
 
-	// Load and process 32 bytes from input 0 to 6 outputs
-	VMOVDQU (BX), Y9
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
+mulAvxTwo_10x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R14), Y0
+	VMOVDQU 32(R14), Y1
+	VMOVDQU (R15), Y2
+	VMOVDQU 32(R15), Y3
+	VMOVDQU (R13), Y4
+	VMOVDQU 32(R13), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU (CX), Y7
 	VMOVDQU 32(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Load and process 32 bytes from input 1 to 6 outputs
-	VMOVDQU (SI), Y9
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 384(CX), Y7
 	VMOVDQU 416(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 448(CX), Y7
 	VMOVDQU 480(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 512(CX), Y7
 	VMOVDQU 544(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 576(CX), Y7
 	VMOVDQU 608(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 640(CX), Y7
 	VMOVDQU 672(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 704(CX), Y7
 	VMOVDQU 736(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Load and process 32 bytes from input 2 to 6 outputs
-	VMOVDQU (DI), Y9
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 768(CX), Y7
 	VMOVDQU 800(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 832(CX), Y7
 	VMOVDQU 864(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 896(CX), Y7
 	VMOVDQU 928(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 5 to 3 outputs
+	VMOVDQU (R9), Y11
+	VMOVDQU 32(R9), Y13
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 960(CX), Y7
 	VMOVDQU 992(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1024(CX), Y7
 	VMOVDQU 1056(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1088(CX), Y7
 	VMOVDQU 1120(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Load and process 32 bytes from input 3 to 6 outputs
-	VMOVDQU (R8), Y9
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
+	// Load and process 64 bytes from input 6 to 3 outputs
+	VMOVDQU (R10), Y11
+	VMOVDQU 32(R10), Y13
+	ADDQ    $0x40, R10
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 1152(CX), Y7
 	VMOVDQU 1184(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1216(CX), Y7
 	VMOVDQU 1248(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1280(CX), Y7
 	VMOVDQU 1312(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 7 to 3 outputs
+	VMOVDQU (R11), Y11
+	VMOVDQU 32(R11), Y13
+	ADDQ    $0x40, R11
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 1344(CX), Y7
 	VMOVDQU 1376(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1408(CX), Y7
 	VMOVDQU 1440(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1472(CX), Y7
 	VMOVDQU 1504(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Load and process 32 bytes from input 4 to 6 outputs
-	VMOVDQU (R9), Y9
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
+	// Load and process 64 bytes from input 8 to 3 outputs
+	VMOVDQU (R12), Y11
+	VMOVDQU 32(R12), Y13
+	ADDQ    $0x40, R12
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 1536(CX), Y7
 	VMOVDQU 1568(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1600(CX), Y7
 	VMOVDQU 1632(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1664(CX), Y7
 	VMOVDQU 1696(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// Load and process 64 bytes from input 9 to 3 outputs
+	VMOVDQU (AX), Y11
+	VMOVDQU 32(AX), Y13
+	ADDQ    $0x40, AX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 1728(CX), Y7
 	VMOVDQU 1760(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	XOR3WAY( $0x00, Y9, Y10, Y1)
 	VMOVDQU 1792(CX), Y7
 	VMOVDQU 1824(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	XOR3WAY( $0x00, Y9, Y10, Y3)
 	VMOVDQU 1856(CX), Y7
 	VMOVDQU 1888(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	XOR3WAY( $0x00, Y9, Y10, Y5)
 
-	// Load and process 32 bytes from input 5 to 6 outputs
-	VMOVDQU (R10), Y9
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1920(CX), Y7
-	VMOVDQU 1952(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1984(CX), Y7
-	VMOVDQU 2016(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2048(CX), Y7
-	VMOVDQU 2080(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2112(CX), Y7
-	VMOVDQU 2144(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2176(CX), Y7
-	VMOVDQU 2208(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 2240(CX), Y7
-	VMOVDQU 2272(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	// Store 3 outputs
+	VMOVDQU Y0, (R14)
+	VMOVDQU Y1, 32(R14)
+	ADDQ    $0x40, R14
+	VMOVDQU Y2, (R15)
+	VMOVDQU Y3, 32(R15)
+	ADDQ    $0x40, R15
+	VMOVDQU Y4, (R13)
+	VMOVDQU Y5, 32(R13)
+	ADDQ    $0x40, R13
 
-	// Load and process 32 bytes from input 6 to 6 outputs
-	VMOVDQU (R11), Y9
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 2304(CX), Y7
-	VMOVDQU 2336(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 2368(CX), Y7
-	VMOVDQU 2400(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2432(CX), Y7
-	VMOVDQU 2464(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2496(CX), Y7
-	VMOVDQU 2528(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2560(CX), Y7
-	VMOVDQU 2592(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 2624(CX), Y7
-	VMOVDQU 2656(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_10x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_10x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_10x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x4(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 89 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_10x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_10x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R9), Y7
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (R10), Y7
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 7 to 6 outputs
-	VMOVDQU (R12), Y9
+	// Load and process 32 bytes from input 6 to 4 outputs
+	VMOVDQU (R11), Y7
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1536(CX), Y5
+	VMOVDQU 1568(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1600(CX), Y5
+	VMOVDQU 1632(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1664(CX), Y5
+	VMOVDQU 1696(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1728(CX), Y5
+	VMOVDQU 1760(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 7 to 4 outputs
+	VMOVDQU (R12), Y7
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 2688(CX), Y7
-	VMOVDQU 2720(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 2752(CX), Y7
-	VMOVDQU 2784(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2816(CX), Y7
-	VMOVDQU 2848(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2880(CX), Y7
-	VMOVDQU 2912(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2944(CX), Y7
-	VMOVDQU 2976(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 3008(CX), Y7
-	VMOVDQU 3040(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1792(CX), Y5
+	VMOVDQU 1824(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1856(CX), Y5
+	VMOVDQU 1888(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1920(CX), Y5
+	VMOVDQU 1952(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1984(CX), Y5
+	VMOVDQU 2016(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 8 to 6 outputs
-	VMOVDQU (DX), Y9
+	// Load and process 32 bytes from input 8 to 4 outputs
+	VMOVDQU (R13), Y7
+	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 2048(CX), Y5
+	VMOVDQU 2080(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 2112(CX), Y5
+	VMOVDQU 2144(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 2176(CX), Y5
+	VMOVDQU 2208(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 2240(CX), Y5
+	VMOVDQU 2272(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 9 to 4 outputs
+	VMOVDQU (DX), Y7
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 3072(CX), Y7
-	VMOVDQU 3104(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 3136(CX), Y7
-	VMOVDQU 3168(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 3200(CX), Y7
-	VMOVDQU 3232(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 3264(CX), Y7
-	VMOVDQU 3296(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 3328(CX), Y7
-	VMOVDQU 3360(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 3392(CX), Y7
-	VMOVDQU 3424(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 2304(CX), Y5
+	VMOVDQU 2336(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 2368(CX), Y5
+	VMOVDQU 2400(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 2432(CX), Y5
+	VMOVDQU 2464(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 2496(CX), Y5
+	VMOVDQU 2528(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Store 6 outputs
-	MOVQ    (R13), R15
-	VMOVDQU Y0, (R15)(R14*1)
-	MOVQ    24(R13), R15
-	VMOVDQU Y1, (R15)(R14*1)
-	MOVQ    48(R13), R15
-	VMOVDQU Y2, (R15)(R14*1)
-	MOVQ    72(R13), R15
-	VMOVDQU Y3, (R15)(R14*1)
-	MOVQ    96(R13), R15
-	VMOVDQU Y4, (R15)(R14*1)
-	MOVQ    120(R13), R15
-	VMOVDQU Y5, (R15)(R14*1)
+	// Store 4 outputs
+	MOVQ    (R14), BP
+	VMOVDQU Y0, (BP)(R15*1)
+	MOVQ    24(R14), BP
+	VMOVDQU Y1, (BP)(R15*1)
+	MOVQ    48(R14), BP
+	VMOVDQU Y2, (BP)(R15*1)
+	MOVQ    72(R14), BP
+	VMOVDQU Y3, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R14
+	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_9x6_loop
+	JNZ  mulAvxTwo_10x4_loop
 	VZEROUPPER
 
-mulAvxTwo_9x6_end:
+mulAvxTwo_10x4_end:
 	RET
 
-// func mulAvxTwo_9x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x7(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+// func mulGFNI_10x4_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x4_64(SB), $8-88
+	// Loading 26 of 40 tables to registers
 	// Destination kept on stack
-	// Full registers estimated 138 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_9x7_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), R12
-	MOVQ  192(DX), DX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  start+72(FP), R14
+	// Full registers estimated 46 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x4_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X7
-	VPBROADCASTB X7, Y7
-
-mulAvxTwo_9x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-
-	// Load and process 32 bytes from input 0 to 7 outputs
-	VMOVDQU (BX), Y10
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU (CX), Y8
-	VMOVDQU 32(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 64(CX), Y8
-	VMOVDQU 96(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 128(CX), Y8
-	VMOVDQU 160(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 192(CX), Y8
-	VMOVDQU 224(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 320(CX), Y8
-	VMOVDQU 352(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 384(CX), Y8
-	VMOVDQU 416(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
-
-	// Load and process 32 bytes from input 1 to 7 outputs
-	VMOVDQU (SI), Y10
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 448(CX), Y8
-	VMOVDQU 480(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 512(CX), Y8
-	VMOVDQU 544(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 576(CX), Y8
-	VMOVDQU 608(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 640(CX), Y8
-	VMOVDQU 672(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 704(CX), Y8
-	VMOVDQU 736(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 768(CX), Y8
-	VMOVDQU 800(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 832(CX), Y8
-	VMOVDQU 864(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
-
-	// Load and process 32 bytes from input 2 to 7 outputs
-	VMOVDQU (DI), Y10
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 896(CX), Y8
-	VMOVDQU 928(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 960(CX), Y8
-	VMOVDQU 992(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1024(CX), Y8
-	VMOVDQU 1056(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1088(CX), Y8
-	VMOVDQU 1120(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1152(CX), Y8
-	VMOVDQU 1184(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1216(CX), Y8
-	VMOVDQU 1248(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1280(CX), Y8
-	VMOVDQU 1312(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
-
-	// Load and process 32 bytes from input 3 to 7 outputs
-	VMOVDQU (R8), Y10
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1344(CX), Y8
-	VMOVDQU 1376(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1408(CX), Y8
-	VMOVDQU 1440(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1472(CX), Y8
-	VMOVDQU 1504(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1536(CX), Y8
-	VMOVDQU 1568(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1600(CX), Y8
-	VMOVDQU 1632(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1664(CX), Y8
-	VMOVDQU 1696(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1728(CX), Y8
-	VMOVDQU 1760(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
-
-	// Load and process 32 bytes from input 4 to 7 outputs
-	VMOVDQU (R9), Y10
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1792(CX), Y8
-	VMOVDQU 1824(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1856(CX), Y8
-	VMOVDQU 1888(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1920(CX), Y8
-	VMOVDQU 1952(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1984(CX), Y8
-	VMOVDQU 2016(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2048(CX), Y8
-	VMOVDQU 2080(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2112(CX), Y8
-	VMOVDQU 2144(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2176(CX), Y8
-	VMOVDQU 2208(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x4_64_loop:
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 4 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB      $0x00, Z24, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z25, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 4 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 4 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 4 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Load and process 32 bytes from input 5 to 7 outputs
-	VMOVDQU (R10), Y10
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 2240(CX), Y8
-	VMOVDQU 2272(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 2304(CX), Y8
-	VMOVDQU 2336(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 2368(CX), Y8
-	VMOVDQU 2400(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 2432(CX), Y8
-	VMOVDQU 2464(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2496(CX), Y8
-	VMOVDQU 2528(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2560(CX), Y8
-	VMOVDQU 2592(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2624(CX), Y8
-	VMOVDQU 2656(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	// Store 4 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
 
-	// Load and process 32 bytes from input 6 to 7 outputs
-	VMOVDQU (R11), Y10
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 2688(CX), Y8
-	VMOVDQU 2720(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 2752(CX), Y8
-	VMOVDQU 2784(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 2816(CX), Y8
-	VMOVDQU 2848(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 2880(CX), Y8
-	VMOVDQU 2912(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2944(CX), Y8
-	VMOVDQU 2976(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 3008(CX), Y8
-	VMOVDQU 3040(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 3072(CX), Y8
-	VMOVDQU 3104(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	// Prepare for next loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x4_64_loop
+	VZEROUPPER
 
-	// Load and process 32 bytes from input 7 to 7 outputs
-	VMOVDQU (R12), Y10
-	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 3136(CX), Y8
-	VMOVDQU 3168(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 3200(CX), Y8
-	VMOVDQU 3232(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 3264(CX), Y8
-	VMOVDQU 3296(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 3328(CX), Y8
-	VMOVDQU 3360(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 3392(CX), Y8
-	VMOVDQU 3424(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 3456(CX), Y8
-	VMOVDQU 3488(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 3520(CX), Y8
-	VMOVDQU 3552(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+mulGFNI_10x4_64_end:
+	RET
 
-	// Load and process 32 bytes from input 8 to 7 outputs
-	VMOVDQU (DX), Y10
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 3584(CX), Y8
-	VMOVDQU 3616(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 3648(CX), Y8
-	VMOVDQU 3680(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 3712(CX), Y8
-	VMOVDQU 3744(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 3776(CX), Y8
-	VMOVDQU 3808(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 3840(CX), Y8
-	VMOVDQU 3872(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 3904(CX), Y8
-	VMOVDQU 3936(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 3968(CX), Y8
-	VMOVDQU 4000(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+// func mulGFNI_10x4_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x4_64Xor(SB), $8-88
+	// Loading 26 of 40 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 46 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x4_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	VBROADCASTF32X2 200(CX), Z25
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
 
-	// Store 7 outputs
-	MOVQ    (R13), R15
-	VMOVDQU Y0, (R15)(R14*1)
-	MOVQ    24(R13), R15
-	VMOVDQU Y1, (R15)(R14*1)
-	MOVQ    48(R13), R15
-	VMOVDQU Y2, (R15)(R14*1)
-	MOVQ    72(R13), R15
-	VMOVDQU Y3, (R15)(R14*1)
-	MOVQ    96(R13), R15
-	VMOVDQU Y4, (R15)(R14*1)
-	MOVQ    120(R13), R15
-	VMOVDQU Y5, (R15)(R14*1)
-	MOVQ    144(R13), R15
-	VMOVDQU Y6, (R15)(R14*1)
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x4_64Xor_loop:
+	// Load 4 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 (BP)(R15*1), Z26
+	MOVQ      24(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z27
+	MOVQ      48(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z28
+	MOVQ      72(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z29
+
+	// Load and process 64 bytes from input 0 to 4 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 4 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 4 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 4 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 4 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 4 outputs
+	VMOVDQU64      (R10), Z30
+	ADDQ           $0x40, R10
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 4 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB      $0x00, Z24, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z25, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 4 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 4 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 4 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 4 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R14
+	ADDQ $0x40, R15
 	DECQ AX
-	JNZ  mulAvxTwo_9x7_loop
+	JNZ  mulGFNI_10x4_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_9x7_end:
+mulGFNI_10x4_64Xor_end:
 	RET
 
-// func mulAvxTwo_9x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x8(SB), NOSPLIT, $0-88
+// func mulAvxTwo_10x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x4Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 157 YMM used
+	// Full registers estimated 89 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x8_end
+	JZ    mulAvxTwo_10x4Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -28035,568 +85731,335 @@ TEXT Β·mulAvxTwo_9x8(SB), NOSPLIT, $0-88
 	MOVQ  120(DX), R10
 	MOVQ  144(DX), R11
 	MOVQ  168(DX), R12
-	MOVQ  192(DX), DX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  start+72(FP), R14
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  start+72(FP), R15
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X8
-	VPBROADCASTB X8, Y8
-
-mulAvxTwo_9x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X4
+	VPBROADCASTB X4, Y4
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (BX), Y11
+mulAvxTwo_10x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	MOVQ    (R14), BP
+	VMOVDQU (BP)(R15*1), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	MOVQ    24(R14), BP
+	VMOVDQU (BP)(R15*1), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	MOVQ    48(R14), BP
+	VMOVDQU (BP)(R15*1), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	MOVQ    72(R14), BP
+	VMOVDQU (BP)(R15*1), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 1 to 8 outputs
-	VMOVDQU (SI), Y11
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 512(CX), Y9
-	VMOVDQU 544(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 576(CX), Y9
-	VMOVDQU 608(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y9
-	VMOVDQU 672(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 704(CX), Y9
-	VMOVDQU 736(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 768(CX), Y9
-	VMOVDQU 800(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 832(CX), Y9
-	VMOVDQU 864(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 896(CX), Y9
-	VMOVDQU 928(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 960(CX), Y9
-	VMOVDQU 992(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 2 to 8 outputs
-	VMOVDQU (DI), Y11
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1024(CX), Y9
-	VMOVDQU 1056(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1088(CX), Y9
-	VMOVDQU 1120(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1152(CX), Y9
-	VMOVDQU 1184(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1216(CX), Y9
-	VMOVDQU 1248(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1280(CX), Y9
-	VMOVDQU 1312(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1344(CX), Y9
-	VMOVDQU 1376(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1408(CX), Y9
-	VMOVDQU 1440(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1472(CX), Y9
-	VMOVDQU 1504(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 3 to 8 outputs
-	VMOVDQU (R8), Y11
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1536(CX), Y9
-	VMOVDQU 1568(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1600(CX), Y9
-	VMOVDQU 1632(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1664(CX), Y9
-	VMOVDQU 1696(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1728(CX), Y9
-	VMOVDQU 1760(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1792(CX), Y9
-	VMOVDQU 1824(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1856(CX), Y9
-	VMOVDQU 1888(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1920(CX), Y9
-	VMOVDQU 1952(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1984(CX), Y9
-	VMOVDQU 2016(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 4 to 8 outputs
-	VMOVDQU (R9), Y11
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (R9), Y7
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 2048(CX), Y9
-	VMOVDQU 2080(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2112(CX), Y9
-	VMOVDQU 2144(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2176(CX), Y9
-	VMOVDQU 2208(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2240(CX), Y9
-	VMOVDQU 2272(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2304(CX), Y9
-	VMOVDQU 2336(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2368(CX), Y9
-	VMOVDQU 2400(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2432(CX), Y9
-	VMOVDQU 2464(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 2496(CX), Y9
-	VMOVDQU 2528(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 5 to 8 outputs
-	VMOVDQU (R10), Y11
+	// Load and process 32 bytes from input 5 to 4 outputs
+	VMOVDQU (R10), Y7
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 2560(CX), Y9
-	VMOVDQU 2592(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2624(CX), Y9
-	VMOVDQU 2656(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2688(CX), Y9
-	VMOVDQU 2720(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2752(CX), Y9
-	VMOVDQU 2784(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2816(CX), Y9
-	VMOVDQU 2848(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2880(CX), Y9
-	VMOVDQU 2912(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2944(CX), Y9
-	VMOVDQU 2976(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 3008(CX), Y9
-	VMOVDQU 3040(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1280(CX), Y5
+	VMOVDQU 1312(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1344(CX), Y5
+	VMOVDQU 1376(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1408(CX), Y5
+	VMOVDQU 1440(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1472(CX), Y5
+	VMOVDQU 1504(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 6 to 8 outputs
-	VMOVDQU (R11), Y11
+	// Load and process 32 bytes from input 6 to 4 outputs
+	VMOVDQU (R11), Y7
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 3072(CX), Y9
-	VMOVDQU 3104(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 3136(CX), Y9
-	VMOVDQU 3168(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 3200(CX), Y9
-	VMOVDQU 3232(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 3264(CX), Y9
-	VMOVDQU 3296(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 3328(CX), Y9
-	VMOVDQU 3360(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 3392(CX), Y9
-	VMOVDQU 3424(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 3456(CX), Y9
-	VMOVDQU 3488(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 3520(CX), Y9
-	VMOVDQU 3552(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1536(CX), Y5
+	VMOVDQU 1568(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1600(CX), Y5
+	VMOVDQU 1632(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1664(CX), Y5
+	VMOVDQU 1696(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1728(CX), Y5
+	VMOVDQU 1760(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 7 to 8 outputs
-	VMOVDQU (R12), Y11
+	// Load and process 32 bytes from input 7 to 4 outputs
+	VMOVDQU (R12), Y7
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 3584(CX), Y9
-	VMOVDQU 3616(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 3648(CX), Y9
-	VMOVDQU 3680(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 3712(CX), Y9
-	VMOVDQU 3744(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 3776(CX), Y9
-	VMOVDQU 3808(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 3840(CX), Y9
-	VMOVDQU 3872(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 3904(CX), Y9
-	VMOVDQU 3936(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 3968(CX), Y9
-	VMOVDQU 4000(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 4032(CX), Y9
-	VMOVDQU 4064(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1792(CX), Y5
+	VMOVDQU 1824(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 1856(CX), Y5
+	VMOVDQU 1888(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 1920(CX), Y5
+	VMOVDQU 1952(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 1984(CX), Y5
+	VMOVDQU 2016(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Load and process 32 bytes from input 8 to 8 outputs
-	VMOVDQU (DX), Y11
+	// Load and process 32 bytes from input 8 to 4 outputs
+	VMOVDQU (R13), Y7
+	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 2048(CX), Y5
+	VMOVDQU 2080(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 2112(CX), Y5
+	VMOVDQU 2144(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 2176(CX), Y5
+	VMOVDQU 2208(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 2240(CX), Y5
+	VMOVDQU 2272(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
+
+	// Load and process 32 bytes from input 9 to 4 outputs
+	VMOVDQU (DX), Y7
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 4096(CX), Y9
-	VMOVDQU 4128(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 4160(CX), Y9
-	VMOVDQU 4192(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 4224(CX), Y9
-	VMOVDQU 4256(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 4288(CX), Y9
-	VMOVDQU 4320(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 4352(CX), Y9
-	VMOVDQU 4384(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 4416(CX), Y9
-	VMOVDQU 4448(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 4480(CX), Y9
-	VMOVDQU 4512(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 4544(CX), Y9
-	VMOVDQU 4576(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 2304(CX), Y5
+	VMOVDQU 2336(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y0)
+	VMOVDQU 2368(CX), Y5
+	VMOVDQU 2400(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y1)
+	VMOVDQU 2432(CX), Y5
+	VMOVDQU 2464(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y2)
+	VMOVDQU 2496(CX), Y5
+	VMOVDQU 2528(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y3)
 
-	// Store 8 outputs
-	MOVQ    (R13), R15
-	VMOVDQU Y0, (R15)(R14*1)
-	MOVQ    24(R13), R15
-	VMOVDQU Y1, (R15)(R14*1)
-	MOVQ    48(R13), R15
-	VMOVDQU Y2, (R15)(R14*1)
-	MOVQ    72(R13), R15
-	VMOVDQU Y3, (R15)(R14*1)
-	MOVQ    96(R13), R15
-	VMOVDQU Y4, (R15)(R14*1)
-	MOVQ    120(R13), R15
-	VMOVDQU Y5, (R15)(R14*1)
-	MOVQ    144(R13), R15
-	VMOVDQU Y6, (R15)(R14*1)
-	MOVQ    168(R13), R15
-	VMOVDQU Y7, (R15)(R14*1)
+	// Store 4 outputs
+	MOVQ    (R14), BP
+	VMOVDQU Y0, (BP)(R15*1)
+	MOVQ    24(R14), BP
+	VMOVDQU Y1, (BP)(R15*1)
+	MOVQ    48(R14), BP
+	VMOVDQU Y2, (BP)(R15*1)
+	MOVQ    72(R14), BP
+	VMOVDQU Y3, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R14
+	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_9x8_loop
+	JNZ  mulAvxTwo_10x4Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_9x8_end:
+mulAvxTwo_10x4Xor_end:
 	RET
 
-// func mulAvxTwo_9x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x9(SB), NOSPLIT, $0-88
+// func mulAvxTwo_10x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x5(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 176 YMM used
+	// Full registers estimated 110 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x9_end
+	JZ    mulAvxTwo_10x5_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -28606,625 +86069,836 @@ TEXT Β·mulAvxTwo_9x9(SB), NOSPLIT, $0-88
 	MOVQ  120(DX), R10
 	MOVQ  144(DX), R11
 	MOVQ  168(DX), R12
-	MOVQ  192(DX), DX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  start+72(FP), R14
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  start+72(FP), R15
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X9
-	VPBROADCASTB X9, Y9
-
-mulAvxTwo_9x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X5
+	VPBROADCASTB X5, Y5
 
-	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (BX), Y12
+mulAvxTwo_10x5_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 512(CX), Y10
-	VMOVDQU 544(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y4
 
-	// Load and process 32 bytes from input 1 to 9 outputs
-	VMOVDQU (SI), Y12
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 576(CX), Y10
-	VMOVDQU 608(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 640(CX), Y10
-	VMOVDQU 672(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 704(CX), Y10
-	VMOVDQU 736(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 768(CX), Y10
-	VMOVDQU 800(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 832(CX), Y10
-	VMOVDQU 864(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 896(CX), Y10
-	VMOVDQU 928(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 960(CX), Y10
-	VMOVDQU 992(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1024(CX), Y10
-	VMOVDQU 1056(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1088(CX), Y10
-	VMOVDQU 1120(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 2 to 9 outputs
-	VMOVDQU (DI), Y12
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1152(CX), Y10
-	VMOVDQU 1184(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1216(CX), Y10
-	VMOVDQU 1248(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1280(CX), Y10
-	VMOVDQU 1312(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1344(CX), Y10
-	VMOVDQU 1376(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1408(CX), Y10
-	VMOVDQU 1440(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 1472(CX), Y10
-	VMOVDQU 1504(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 1536(CX), Y10
-	VMOVDQU 1568(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1600(CX), Y10
-	VMOVDQU 1632(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1664(CX), Y10
-	VMOVDQU 1696(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 3 to 9 outputs
-	VMOVDQU (R8), Y12
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1728(CX), Y10
-	VMOVDQU 1760(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1792(CX), Y10
-	VMOVDQU 1824(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1856(CX), Y10
-	VMOVDQU 1888(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1920(CX), Y10
-	VMOVDQU 1952(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1984(CX), Y10
-	VMOVDQU 2016(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2048(CX), Y10
-	VMOVDQU 2080(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2112(CX), Y10
-	VMOVDQU 2144(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2176(CX), Y10
-	VMOVDQU 2208(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2240(CX), Y10
-	VMOVDQU 2272(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
-
-	// Load and process 32 bytes from input 4 to 9 outputs
-	VMOVDQU (R9), Y12
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 2304(CX), Y10
-	VMOVDQU 2336(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2368(CX), Y10
-	VMOVDQU 2400(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 2432(CX), Y10
-	VMOVDQU 2464(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 2496(CX), Y10
-	VMOVDQU 2528(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 2560(CX), Y10
-	VMOVDQU 2592(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2624(CX), Y10
-	VMOVDQU 2656(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2688(CX), Y10
-	VMOVDQU 2720(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2752(CX), Y10
-	VMOVDQU 2784(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2816(CX), Y10
-	VMOVDQU 2848(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 5 to 9 outputs
-	VMOVDQU (R10), Y12
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R9), Y8
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (R10), Y8
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 2880(CX), Y10
-	VMOVDQU 2912(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2944(CX), Y10
-	VMOVDQU 2976(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 3008(CX), Y10
-	VMOVDQU 3040(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 3072(CX), Y10
-	VMOVDQU 3104(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 3136(CX), Y10
-	VMOVDQU 3168(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 3200(CX), Y10
-	VMOVDQU 3232(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 3264(CX), Y10
-	VMOVDQU 3296(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 3328(CX), Y10
-	VMOVDQU 3360(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 3392(CX), Y10
-	VMOVDQU 3424(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 6 to 9 outputs
-	VMOVDQU (R11), Y12
+	// Load and process 32 bytes from input 6 to 5 outputs
+	VMOVDQU (R11), Y8
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 3456(CX), Y10
-	VMOVDQU 3488(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 3520(CX), Y10
-	VMOVDQU 3552(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 3584(CX), Y10
-	VMOVDQU 3616(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 3648(CX), Y10
-	VMOVDQU 3680(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 3712(CX), Y10
-	VMOVDQU 3744(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 3776(CX), Y10
-	VMOVDQU 3808(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 3840(CX), Y10
-	VMOVDQU 3872(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 3904(CX), Y10
-	VMOVDQU 3936(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 3968(CX), Y10
-	VMOVDQU 4000(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1920(CX), Y6
+	VMOVDQU 1952(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1984(CX), Y6
+	VMOVDQU 2016(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2048(CX), Y6
+	VMOVDQU 2080(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2112(CX), Y6
+	VMOVDQU 2144(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2176(CX), Y6
+	VMOVDQU 2208(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 7 to 9 outputs
-	VMOVDQU (R12), Y12
+	// Load and process 32 bytes from input 7 to 5 outputs
+	VMOVDQU (R12), Y8
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 4032(CX), Y10
-	VMOVDQU 4064(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 4096(CX), Y10
-	VMOVDQU 4128(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 4160(CX), Y10
-	VMOVDQU 4192(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 4224(CX), Y10
-	VMOVDQU 4256(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 4288(CX), Y10
-	VMOVDQU 4320(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 4352(CX), Y10
-	VMOVDQU 4384(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 4416(CX), Y10
-	VMOVDQU 4448(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 4480(CX), Y10
-	VMOVDQU 4512(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 4544(CX), Y10
-	VMOVDQU 4576(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2240(CX), Y6
+	VMOVDQU 2272(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2304(CX), Y6
+	VMOVDQU 2336(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2368(CX), Y6
+	VMOVDQU 2400(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2432(CX), Y6
+	VMOVDQU 2464(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2496(CX), Y6
+	VMOVDQU 2528(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 8 to 9 outputs
-	VMOVDQU (DX), Y12
+	// Load and process 32 bytes from input 8 to 5 outputs
+	VMOVDQU (R13), Y8
+	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2560(CX), Y6
+	VMOVDQU 2592(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2624(CX), Y6
+	VMOVDQU 2656(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2688(CX), Y6
+	VMOVDQU 2720(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2752(CX), Y6
+	VMOVDQU 2784(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2816(CX), Y6
+	VMOVDQU 2848(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 9 to 5 outputs
+	VMOVDQU (DX), Y8
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 4608(CX), Y10
-	VMOVDQU 4640(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 4672(CX), Y10
-	VMOVDQU 4704(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 4736(CX), Y10
-	VMOVDQU 4768(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 4800(CX), Y10
-	VMOVDQU 4832(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 4864(CX), Y10
-	VMOVDQU 4896(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 4928(CX), Y10
-	VMOVDQU 4960(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 4992(CX), Y10
-	VMOVDQU 5024(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 5056(CX), Y10
-	VMOVDQU 5088(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 5120(CX), Y10
-	VMOVDQU 5152(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2880(CX), Y6
+	VMOVDQU 2912(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2944(CX), Y6
+	VMOVDQU 2976(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 3008(CX), Y6
+	VMOVDQU 3040(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 3072(CX), Y6
+	VMOVDQU 3104(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 3136(CX), Y6
+	VMOVDQU 3168(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Store 9 outputs
-	MOVQ    (R13), R15
-	VMOVDQU Y0, (R15)(R14*1)
-	MOVQ    24(R13), R15
-	VMOVDQU Y1, (R15)(R14*1)
-	MOVQ    48(R13), R15
-	VMOVDQU Y2, (R15)(R14*1)
-	MOVQ    72(R13), R15
-	VMOVDQU Y3, (R15)(R14*1)
-	MOVQ    96(R13), R15
-	VMOVDQU Y4, (R15)(R14*1)
-	MOVQ    120(R13), R15
-	VMOVDQU Y5, (R15)(R14*1)
-	MOVQ    144(R13), R15
-	VMOVDQU Y6, (R15)(R14*1)
-	MOVQ    168(R13), R15
-	VMOVDQU Y7, (R15)(R14*1)
-	MOVQ    192(R13), R15
-	VMOVDQU Y8, (R15)(R14*1)
+	// Store 5 outputs
+	MOVQ    (R14), BP
+	VMOVDQU Y0, (BP)(R15*1)
+	MOVQ    24(R14), BP
+	VMOVDQU Y1, (BP)(R15*1)
+	MOVQ    48(R14), BP
+	VMOVDQU Y2, (BP)(R15*1)
+	MOVQ    72(R14), BP
+	VMOVDQU Y3, (BP)(R15*1)
+	MOVQ    96(R14), BP
+	VMOVDQU Y4, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R14
+	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_9x9_loop
+	JNZ  mulAvxTwo_10x5_loop
 	VZEROUPPER
 
-mulAvxTwo_9x9_end:
+mulAvxTwo_10x5_end:
 	RET
 
-// func mulAvxTwo_9x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_9x10(SB), NOSPLIT, $0-88
+// func mulGFNI_10x5_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x5_64(SB), $8-88
+	// Loading 25 of 50 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 57 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x5_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x5_64_loop:
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 5 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 5 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 5 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 5 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x5_64_loop
+	VZEROUPPER
+
+mulGFNI_10x5_64_end:
+	RET
+
+// func mulGFNI_10x5_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x5_64Xor(SB), $8-88
+	// Loading 25 of 50 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 57 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x5_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	VBROADCASTF32X2 192(CX), Z24
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x5_64Xor_loop:
+	// Load 5 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 (BP)(R15*1), Z25
+	MOVQ      24(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z26
+	MOVQ      48(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z27
+	MOVQ      72(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z28
+	MOVQ      96(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z29
+
+	// Load and process 64 bytes from input 0 to 5 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 5 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 5 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 5 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 5 outputs
+	VMOVDQU64      (R9), Z30
+	ADDQ           $0x40, R9
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z24, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 5 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 5 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 5 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 5 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 5 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 5 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x5_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_10x5_64Xor_end:
+	RET
+
+// func mulAvxTwo_10x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x5Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 195 YMM used
+	// Full registers estimated 110 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_9x10_end
+	JZ    mulAvxTwo_10x5Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -29234,682 +86908,389 @@ TEXT Β·mulAvxTwo_9x10(SB), NOSPLIT, $0-88
 	MOVQ  120(DX), R10
 	MOVQ  144(DX), R11
 	MOVQ  168(DX), R12
-	MOVQ  192(DX), DX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  start+72(FP), R14
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  start+72(FP), R15
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X10
-	VPBROADCASTB X10, Y10
-
-mulAvxTwo_9x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X5
+	VPBROADCASTB X5, Y5
 
-	// Load and process 32 bytes from input 0 to 10 outputs
-	VMOVDQU (BX), Y13
+mulAvxTwo_10x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU (CX), Y11
-	VMOVDQU 32(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 64(CX), Y11
-	VMOVDQU 96(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 128(CX), Y11
-	VMOVDQU 160(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 192(CX), Y11
-	VMOVDQU 224(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 256(CX), Y11
-	VMOVDQU 288(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 320(CX), Y11
-	VMOVDQU 352(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 384(CX), Y11
-	VMOVDQU 416(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 448(CX), Y11
-	VMOVDQU 480(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 512(CX), Y11
-	VMOVDQU 544(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 576(CX), Y11
-	VMOVDQU 608(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	MOVQ    (R14), BP
+	VMOVDQU (BP)(R15*1), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	MOVQ    24(R14), BP
+	VMOVDQU (BP)(R15*1), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	MOVQ    48(R14), BP
+	VMOVDQU (BP)(R15*1), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	MOVQ    72(R14), BP
+	VMOVDQU (BP)(R15*1), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	MOVQ    96(R14), BP
+	VMOVDQU (BP)(R15*1), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 1 to 10 outputs
-	VMOVDQU (SI), Y13
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 640(CX), Y11
-	VMOVDQU 672(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 704(CX), Y11
-	VMOVDQU 736(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 768(CX), Y11
-	VMOVDQU 800(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 832(CX), Y11
-	VMOVDQU 864(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 896(CX), Y11
-	VMOVDQU 928(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 960(CX), Y11
-	VMOVDQU 992(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1024(CX), Y11
-	VMOVDQU 1056(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1088(CX), Y11
-	VMOVDQU 1120(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1152(CX), Y11
-	VMOVDQU 1184(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1216(CX), Y11
-	VMOVDQU 1248(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 2 to 10 outputs
-	VMOVDQU (DI), Y13
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 1280(CX), Y11
-	VMOVDQU 1312(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 1344(CX), Y11
-	VMOVDQU 1376(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 1408(CX), Y11
-	VMOVDQU 1440(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 1472(CX), Y11
-	VMOVDQU 1504(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 1536(CX), Y11
-	VMOVDQU 1568(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 1600(CX), Y11
-	VMOVDQU 1632(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1664(CX), Y11
-	VMOVDQU 1696(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 1728(CX), Y11
-	VMOVDQU 1760(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 1792(CX), Y11
-	VMOVDQU 1824(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 1856(CX), Y11
-	VMOVDQU 1888(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 3 to 10 outputs
-	VMOVDQU (R8), Y13
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 1920(CX), Y11
-	VMOVDQU 1952(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 1984(CX), Y11
-	VMOVDQU 2016(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 2048(CX), Y11
-	VMOVDQU 2080(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 2112(CX), Y11
-	VMOVDQU 2144(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 2176(CX), Y11
-	VMOVDQU 2208(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 2240(CX), Y11
-	VMOVDQU 2272(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 2304(CX), Y11
-	VMOVDQU 2336(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 2368(CX), Y11
-	VMOVDQU 2400(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 2432(CX), Y11
-	VMOVDQU 2464(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 2496(CX), Y11
-	VMOVDQU 2528(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 4 to 10 outputs
-	VMOVDQU (R9), Y13
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (R9), Y8
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 2560(CX), Y11
-	VMOVDQU 2592(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 2624(CX), Y11
-	VMOVDQU 2656(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 2688(CX), Y11
-	VMOVDQU 2720(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 2752(CX), Y11
-	VMOVDQU 2784(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 2816(CX), Y11
-	VMOVDQU 2848(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 2880(CX), Y11
-	VMOVDQU 2912(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 2944(CX), Y11
-	VMOVDQU 2976(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 3008(CX), Y11
-	VMOVDQU 3040(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 3072(CX), Y11
-	VMOVDQU 3104(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 3136(CX), Y11
-	VMOVDQU 3168(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 5 to 10 outputs
-	VMOVDQU (R10), Y13
+	// Load and process 32 bytes from input 5 to 5 outputs
+	VMOVDQU (R10), Y8
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 3200(CX), Y11
-	VMOVDQU 3232(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 3264(CX), Y11
-	VMOVDQU 3296(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 3328(CX), Y11
-	VMOVDQU 3360(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 3392(CX), Y11
-	VMOVDQU 3424(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 3456(CX), Y11
-	VMOVDQU 3488(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 3520(CX), Y11
-	VMOVDQU 3552(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 3584(CX), Y11
-	VMOVDQU 3616(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 3648(CX), Y11
-	VMOVDQU 3680(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 3712(CX), Y11
-	VMOVDQU 3744(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 3776(CX), Y11
-	VMOVDQU 3808(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1600(CX), Y6
+	VMOVDQU 1632(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1664(CX), Y6
+	VMOVDQU 1696(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 1728(CX), Y6
+	VMOVDQU 1760(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 1792(CX), Y6
+	VMOVDQU 1824(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 1856(CX), Y6
+	VMOVDQU 1888(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 6 to 10 outputs
-	VMOVDQU (R11), Y13
+	// Load and process 32 bytes from input 6 to 5 outputs
+	VMOVDQU (R11), Y8
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 3840(CX), Y11
-	VMOVDQU 3872(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 3904(CX), Y11
-	VMOVDQU 3936(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 3968(CX), Y11
-	VMOVDQU 4000(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 4032(CX), Y11
-	VMOVDQU 4064(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 4096(CX), Y11
-	VMOVDQU 4128(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 4160(CX), Y11
-	VMOVDQU 4192(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 4224(CX), Y11
-	VMOVDQU 4256(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 4288(CX), Y11
-	VMOVDQU 4320(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 4352(CX), Y11
-	VMOVDQU 4384(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 4416(CX), Y11
-	VMOVDQU 4448(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1920(CX), Y6
+	VMOVDQU 1952(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 1984(CX), Y6
+	VMOVDQU 2016(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2048(CX), Y6
+	VMOVDQU 2080(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2112(CX), Y6
+	VMOVDQU 2144(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2176(CX), Y6
+	VMOVDQU 2208(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 7 to 10 outputs
-	VMOVDQU (R12), Y13
+	// Load and process 32 bytes from input 7 to 5 outputs
+	VMOVDQU (R12), Y8
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 4480(CX), Y11
-	VMOVDQU 4512(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 4544(CX), Y11
-	VMOVDQU 4576(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 4608(CX), Y11
-	VMOVDQU 4640(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 4672(CX), Y11
-	VMOVDQU 4704(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 4736(CX), Y11
-	VMOVDQU 4768(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 4800(CX), Y11
-	VMOVDQU 4832(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 4864(CX), Y11
-	VMOVDQU 4896(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 4928(CX), Y11
-	VMOVDQU 4960(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 4992(CX), Y11
-	VMOVDQU 5024(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 5056(CX), Y11
-	VMOVDQU 5088(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2240(CX), Y6
+	VMOVDQU 2272(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2304(CX), Y6
+	VMOVDQU 2336(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2368(CX), Y6
+	VMOVDQU 2400(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2432(CX), Y6
+	VMOVDQU 2464(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2496(CX), Y6
+	VMOVDQU 2528(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
+
+	// Load and process 32 bytes from input 8 to 5 outputs
+	VMOVDQU (R13), Y8
+	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2560(CX), Y6
+	VMOVDQU 2592(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2624(CX), Y6
+	VMOVDQU 2656(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 2688(CX), Y6
+	VMOVDQU 2720(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 2752(CX), Y6
+	VMOVDQU 2784(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 2816(CX), Y6
+	VMOVDQU 2848(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Load and process 32 bytes from input 8 to 10 outputs
-	VMOVDQU (DX), Y13
+	// Load and process 32 bytes from input 9 to 5 outputs
+	VMOVDQU (DX), Y8
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 5120(CX), Y11
-	VMOVDQU 5152(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 5184(CX), Y11
-	VMOVDQU 5216(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 5248(CX), Y11
-	VMOVDQU 5280(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 5312(CX), Y11
-	VMOVDQU 5344(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 5376(CX), Y11
-	VMOVDQU 5408(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 5440(CX), Y11
-	VMOVDQU 5472(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 5504(CX), Y11
-	VMOVDQU 5536(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 5568(CX), Y11
-	VMOVDQU 5600(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 5632(CX), Y11
-	VMOVDQU 5664(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 5696(CX), Y11
-	VMOVDQU 5728(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 2880(CX), Y6
+	VMOVDQU 2912(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y0)
+	VMOVDQU 2944(CX), Y6
+	VMOVDQU 2976(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y1)
+	VMOVDQU 3008(CX), Y6
+	VMOVDQU 3040(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y2)
+	VMOVDQU 3072(CX), Y6
+	VMOVDQU 3104(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y3)
+	VMOVDQU 3136(CX), Y6
+	VMOVDQU 3168(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	XOR3WAY( $0x00, Y6, Y7, Y4)
 
-	// Store 10 outputs
-	MOVQ    (R13), R15
-	VMOVDQU Y0, (R15)(R14*1)
-	MOVQ    24(R13), R15
-	VMOVDQU Y1, (R15)(R14*1)
-	MOVQ    48(R13), R15
-	VMOVDQU Y2, (R15)(R14*1)
-	MOVQ    72(R13), R15
-	VMOVDQU Y3, (R15)(R14*1)
-	MOVQ    96(R13), R15
-	VMOVDQU Y4, (R15)(R14*1)
-	MOVQ    120(R13), R15
-	VMOVDQU Y5, (R15)(R14*1)
-	MOVQ    144(R13), R15
-	VMOVDQU Y6, (R15)(R14*1)
-	MOVQ    168(R13), R15
-	VMOVDQU Y7, (R15)(R14*1)
-	MOVQ    192(R13), R15
-	VMOVDQU Y8, (R15)(R14*1)
-	MOVQ    216(R13), R15
-	VMOVDQU Y9, (R15)(R14*1)
+	// Store 5 outputs
+	MOVQ    (R14), BP
+	VMOVDQU Y0, (BP)(R15*1)
+	MOVQ    24(R14), BP
+	VMOVDQU Y1, (BP)(R15*1)
+	MOVQ    48(R14), BP
+	VMOVDQU Y2, (BP)(R15*1)
+	MOVQ    72(R14), BP
+	VMOVDQU Y3, (BP)(R15*1)
+	MOVQ    96(R14), BP
+	VMOVDQU Y4, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R14
+	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_9x10_loop
+	JNZ  mulAvxTwo_10x5Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_9x10_end:
+mulAvxTwo_10x5Xor_end:
 	RET
 
-// func mulAvxTwo_10x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x1(SB), NOSPLIT, $0-88
+// func mulAvxTwo_10x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x6(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 24 YMM used
+	// Destination kept on stack
+	// Full registers estimated 131 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_10x1_end
+	JZ    mulAvxTwo_10x6_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -29922,12 +87303,8 @@ TEXT Β·mulAvxTwo_10x1(SB), NOSPLIT, $0-88
 	MOVQ  192(DX), R13
 	MOVQ  216(DX), DX
 	MOVQ  out_base+48(FP), R14
-	MOVQ  (R14), R14
 	MOVQ  start+72(FP), R15
 
-	// Add start offset to output
-	ADDQ R15, R14
-
 	// Add start offset to input
 	ADDQ         R15, BX
 	ADDQ         R15, SI
@@ -29939,1875 +87316,2390 @@ TEXT Β·mulAvxTwo_10x1(SB), NOSPLIT, $0-88
 	ADDQ         R15, R12
 	ADDQ         R15, R13
 	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X1
-	VPBROADCASTB X1, Y1
-
-mulAvxTwo_10x1_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
+	VPBROADCASTB X6, Y6
 
-	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (BX), Y4
+mulAvxTwo_10x6_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU (CX), Y2
-	VMOVDQU 32(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y3
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y5
 
-	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (SI), Y4
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (DI), Y4
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 128(CX), Y2
-	VMOVDQU 160(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 3 to 1 outputs
-	VMOVDQU (R8), Y4
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (R8), Y9
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 192(CX), Y2
-	VMOVDQU 224(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 4 to 1 outputs
-	VMOVDQU (R9), Y4
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (R9), Y9
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 256(CX), Y2
-	VMOVDQU 288(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 5 to 1 outputs
-	VMOVDQU (R10), Y4
+	// Load and process 32 bytes from input 5 to 6 outputs
+	VMOVDQU (R10), Y9
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 320(CX), Y2
-	VMOVDQU 352(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 6 to 1 outputs
-	VMOVDQU (R11), Y4
+	// Load and process 32 bytes from input 6 to 6 outputs
+	VMOVDQU (R11), Y9
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 384(CX), Y2
-	VMOVDQU 416(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2304(CX), Y7
+	VMOVDQU 2336(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2368(CX), Y7
+	VMOVDQU 2400(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2432(CX), Y7
+	VMOVDQU 2464(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2496(CX), Y7
+	VMOVDQU 2528(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2560(CX), Y7
+	VMOVDQU 2592(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2624(CX), Y7
+	VMOVDQU 2656(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 7 to 1 outputs
-	VMOVDQU (R12), Y4
+	// Load and process 32 bytes from input 7 to 6 outputs
+	VMOVDQU (R12), Y9
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 448(CX), Y2
-	VMOVDQU 480(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2688(CX), Y7
+	VMOVDQU 2720(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2752(CX), Y7
+	VMOVDQU 2784(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2816(CX), Y7
+	VMOVDQU 2848(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2880(CX), Y7
+	VMOVDQU 2912(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2944(CX), Y7
+	VMOVDQU 2976(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3008(CX), Y7
+	VMOVDQU 3040(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 8 to 1 outputs
-	VMOVDQU (R13), Y4
+	// Load and process 32 bytes from input 8 to 6 outputs
+	VMOVDQU (R13), Y9
 	ADDQ    $0x20, R13
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 512(CX), Y2
-	VMOVDQU 544(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 3072(CX), Y7
+	VMOVDQU 3104(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 3136(CX), Y7
+	VMOVDQU 3168(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 3200(CX), Y7
+	VMOVDQU 3232(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 3264(CX), Y7
+	VMOVDQU 3296(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 3328(CX), Y7
+	VMOVDQU 3360(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3392(CX), Y7
+	VMOVDQU 3424(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 32 bytes from input 9 to 1 outputs
-	VMOVDQU (DX), Y4
+	// Load and process 32 bytes from input 9 to 6 outputs
+	VMOVDQU (DX), Y9
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y4, Y5
-	VPAND   Y1, Y4, Y4
-	VPAND   Y1, Y5, Y5
-	VMOVDQU 576(CX), Y2
-	VMOVDQU 608(CX), Y3
-	VPSHUFB Y4, Y2, Y2
-	VPSHUFB Y5, Y3, Y3
-	VPXOR   Y2, Y3, Y2
-	VPXOR   Y2, Y0, Y0
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 3456(CX), Y7
+	VMOVDQU 3488(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 3520(CX), Y7
+	VMOVDQU 3552(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 3584(CX), Y7
+	VMOVDQU 3616(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 3648(CX), Y7
+	VMOVDQU 3680(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 3712(CX), Y7
+	VMOVDQU 3744(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3776(CX), Y7
+	VMOVDQU 3808(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Store 1 outputs
-	VMOVDQU Y0, (R14)
-	ADDQ    $0x20, R14
+	// Store 6 outputs
+	MOVQ    (R14), BP
+	VMOVDQU Y0, (BP)(R15*1)
+	MOVQ    24(R14), BP
+	VMOVDQU Y1, (BP)(R15*1)
+	MOVQ    48(R14), BP
+	VMOVDQU Y2, (BP)(R15*1)
+	MOVQ    72(R14), BP
+	VMOVDQU Y3, (BP)(R15*1)
+	MOVQ    96(R14), BP
+	VMOVDQU Y4, (BP)(R15*1)
+	MOVQ    120(R14), BP
+	VMOVDQU Y5, (BP)(R15*1)
 
 	// Prepare for next loop
+	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_10x1_loop
+	JNZ  mulAvxTwo_10x6_loop
 	VZEROUPPER
 
-mulAvxTwo_10x1_end:
+mulAvxTwo_10x6_end:
 	RET
 
-// func mulAvxTwo_10x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x1_64(SB), $8-88
-	// Loading no tables to registers
+// func mulGFNI_10x6_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x6_64(SB), $8-88
+	// Loading 24 of 60 tables to registers
 	// Destination kept on stack
-	// Full registers estimated 24 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_10x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), R11
-	MOVQ  192(AX), R12
-	MOVQ  216(AX), AX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  out_base+48(FP), R13
-	MOVQ  start+72(FP), R14
+	// Full registers estimated 68 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x6_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
 
 	// Add start offset to input
-	ADDQ         R14, DX
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, AX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R15
-	SHRQ         $0x06, R15
-
-mulAvxTwo_10x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
-	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y6
-	VMOVDQU 32(SI), Y5
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y6
-	VMOVDQU 32(DI), Y5
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 4 to 1 outputs
-	VMOVDQU (R8), Y6
-	VMOVDQU 32(R8), Y5
-	ADDQ    $0x40, R8
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 5 to 1 outputs
-	VMOVDQU (R9), Y6
-	VMOVDQU 32(R9), Y5
-	ADDQ    $0x40, R9
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 6 to 1 outputs
-	VMOVDQU (R10), Y6
-	VMOVDQU 32(R10), Y5
-	ADDQ    $0x40, R10
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 384(CX), Y3
-	VMOVDQU 416(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 7 to 1 outputs
-	VMOVDQU (R11), Y6
-	VMOVDQU 32(R11), Y5
-	ADDQ    $0x40, R11
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 448(CX), Y3
-	VMOVDQU 480(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 8 to 1 outputs
-	VMOVDQU (R12), Y6
-	VMOVDQU 32(R12), Y5
-	ADDQ    $0x40, R12
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 512(CX), Y3
-	VMOVDQU 544(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 9 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 576(CX), Y3
-	VMOVDQU 608(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x6_64_loop:
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 6 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 6 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 6 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Store 1 outputs
-	MOVQ    (R13), BP
-	VMOVDQU Y0, (BP)(R14*1)
-	VMOVDQU Y1, 32(BP)(R14*1)
+	// Store 6 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x40, R14
-	DECQ R15
-	JNZ  mulAvxTwo_10x1_64_loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x6_64_loop
 	VZEROUPPER
 
-mulAvxTwo_10x1_64_end:
+mulGFNI_10x6_64_end:
 	RET
 
-// func mulAvxTwo_10x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x2(SB), NOSPLIT, $8-88
-	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 47 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_10x2_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), R12
-	MOVQ  192(DX), R13
-	MOVQ  216(DX), DX
-	MOVQ  out_base+48(FP), R14
-	MOVQ  (R14), R15
-	MOVQ  24(R14), R14
-	MOVQ  start+72(FP), BP
-
-	// Add start offset to output
-	ADDQ BP, R15
-	ADDQ BP, R14
+// func mulGFNI_10x6_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x6_64Xor(SB), $8-88
+	// Loading 24 of 60 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 68 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x6_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	VBROADCASTF32X2 184(CX), Z23
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
 
 	// Add start offset to input
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, R9
-	ADDQ         BP, R10
-	ADDQ         BP, R11
-	ADDQ         BP, R12
-	ADDQ         BP, R13
-	ADDQ         BP, DX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X2
-	VPBROADCASTB X2, Y2
-
-mulAvxTwo_10x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
-	// Load and process 32 bytes from input 0 to 2 outputs
-	VMOVDQU (BX), Y5
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 1 to 2 outputs
-	VMOVDQU (SI), Y5
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 2 to 2 outputs
-	VMOVDQU (DI), Y5
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 3 to 2 outputs
-	VMOVDQU (R8), Y5
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 384(CX), Y3
-	VMOVDQU 416(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 448(CX), Y3
-	VMOVDQU 480(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 4 to 2 outputs
-	VMOVDQU (R9), Y5
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 512(CX), Y3
-	VMOVDQU 544(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 576(CX), Y3
-	VMOVDQU 608(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 5 to 2 outputs
-	VMOVDQU (R10), Y5
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 640(CX), Y3
-	VMOVDQU 672(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 704(CX), Y3
-	VMOVDQU 736(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 6 to 2 outputs
-	VMOVDQU (R11), Y5
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 768(CX), Y3
-	VMOVDQU 800(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 832(CX), Y3
-	VMOVDQU 864(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 7 to 2 outputs
-	VMOVDQU (R12), Y5
-	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 896(CX), Y3
-	VMOVDQU 928(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 960(CX), Y3
-	VMOVDQU 992(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 8 to 2 outputs
-	VMOVDQU (R13), Y5
-	ADDQ    $0x20, R13
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 1024(CX), Y3
-	VMOVDQU 1056(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 1088(CX), Y3
-	VMOVDQU 1120(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 9 to 2 outputs
-	VMOVDQU (DX), Y5
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 1152(CX), Y3
-	VMOVDQU 1184(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 1216(CX), Y3
-	VMOVDQU 1248(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x6_64Xor_loop:
+	// Load 6 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 (BP)(R15*1), Z24
+	MOVQ      24(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z25
+	MOVQ      48(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z26
+	MOVQ      72(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z27
+	MOVQ      96(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z28
+	MOVQ      120(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z29
+
+	// Load and process 64 bytes from input 0 to 6 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 6 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 6 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 6 outputs
+	VMOVDQU64      (R8), Z30
+	ADDQ           $0x40, R8
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z21, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z22, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z23, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 6 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 6 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 6 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 6 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 6 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 6 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Store 2 outputs
-	VMOVDQU Y0, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y1, (R14)
-	ADDQ    $0x20, R14
+	// Store 6 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
 
 	// Prepare for next loop
+	ADDQ $0x40, R15
 	DECQ AX
-	JNZ  mulAvxTwo_10x2_loop
+	JNZ  mulGFNI_10x6_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_10x2_end:
+mulGFNI_10x6_64Xor_end:
 	RET
 
-// func mulAvxTwo_10x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x2_64(SB), $8-88
+// func mulAvxTwo_10x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x6Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 47 YMM used
+	// Full registers estimated 131 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_10x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), R11
-	MOVQ  192(AX), R12
-	MOVQ  216(AX), AX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  out_base+48(FP), R13
-	MOVQ  start+72(FP), R14
-
-	// Add start offset to input
-	ADDQ         R14, DX
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, AX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X4
-	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R15
-	SHRQ         $0x06, R15
-
-mulAvxTwo_10x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	JZ    mulAvxTwo_10x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  start+72(FP), R15
 
-	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X6
+	VPBROADCASTB X6, Y6
 
-	// Load and process 64 bytes from input 1 to 2 outputs
+mulAvxTwo_10x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
 	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
+	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	MOVQ    (R14), BP
+	VMOVDQU (BP)(R15*1), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	MOVQ    24(R14), BP
+	VMOVDQU (BP)(R15*1), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	MOVQ    48(R14), BP
+	VMOVDQU (BP)(R15*1), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	MOVQ    72(R14), BP
+	VMOVDQU (BP)(R15*1), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	MOVQ    96(R14), BP
+	VMOVDQU (BP)(R15*1), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	MOVQ    120(R14), BP
+	VMOVDQU (BP)(R15*1), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 64 bytes from input 2 to 2 outputs
+	// Load and process 32 bytes from input 1 to 6 outputs
 	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
+	ADDQ    $0x20, SI
 	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 64 bytes from input 3 to 2 outputs
+	// Load and process 32 bytes from input 2 to 6 outputs
 	VMOVDQU (DI), Y9
-	VMOVDQU 32(DI), Y11
-	ADDQ    $0x40, DI
+	ADDQ    $0x20, DI
 	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 64 bytes from input 4 to 2 outputs
+	// Load and process 32 bytes from input 3 to 6 outputs
 	VMOVDQU (R8), Y9
-	VMOVDQU 32(R8), Y11
-	ADDQ    $0x40, R8
+	ADDQ    $0x20, R8
 	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 64 bytes from input 5 to 2 outputs
+	// Load and process 32 bytes from input 4 to 6 outputs
 	VMOVDQU (R9), Y9
-	VMOVDQU 32(R9), Y11
-	ADDQ    $0x40, R9
+	ADDQ    $0x20, R9
 	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 64 bytes from input 6 to 2 outputs
+	// Load and process 32 bytes from input 5 to 6 outputs
 	VMOVDQU (R10), Y9
-	VMOVDQU 32(R10), Y11
-	ADDQ    $0x40, R10
+	ADDQ    $0x20, R10
 	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 768(CX), Y5
-	VMOVDQU 800(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 832(CX), Y5
-	VMOVDQU 864(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1920(CX), Y7
+	VMOVDQU 1952(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 1984(CX), Y7
+	VMOVDQU 2016(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2048(CX), Y7
+	VMOVDQU 2080(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2112(CX), Y7
+	VMOVDQU 2144(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2176(CX), Y7
+	VMOVDQU 2208(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2240(CX), Y7
+	VMOVDQU 2272(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 64 bytes from input 7 to 2 outputs
+	// Load and process 32 bytes from input 6 to 6 outputs
 	VMOVDQU (R11), Y9
-	VMOVDQU 32(R11), Y11
-	ADDQ    $0x40, R11
+	ADDQ    $0x20, R11
 	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 896(CX), Y5
-	VMOVDQU 928(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 960(CX), Y5
-	VMOVDQU 992(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2304(CX), Y7
+	VMOVDQU 2336(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2368(CX), Y7
+	VMOVDQU 2400(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2432(CX), Y7
+	VMOVDQU 2464(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2496(CX), Y7
+	VMOVDQU 2528(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2560(CX), Y7
+	VMOVDQU 2592(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 2624(CX), Y7
+	VMOVDQU 2656(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 64 bytes from input 8 to 2 outputs
+	// Load and process 32 bytes from input 7 to 6 outputs
 	VMOVDQU (R12), Y9
-	VMOVDQU 32(R12), Y11
-	ADDQ    $0x40, R12
+	ADDQ    $0x20, R12
 	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 1024(CX), Y5
-	VMOVDQU 1056(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1088(CX), Y5
-	VMOVDQU 1120(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 2688(CX), Y7
+	VMOVDQU 2720(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 2752(CX), Y7
+	VMOVDQU 2784(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 2816(CX), Y7
+	VMOVDQU 2848(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 2880(CX), Y7
+	VMOVDQU 2912(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 2944(CX), Y7
+	VMOVDQU 2976(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3008(CX), Y7
+	VMOVDQU 3040(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Load and process 64 bytes from input 9 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	// Load and process 32 bytes from input 8 to 6 outputs
+	VMOVDQU (R13), Y9
+	ADDQ    $0x20, R13
 	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 1152(CX), Y5
-	VMOVDQU 1184(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1216(CX), Y5
-	VMOVDQU 1248(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 3072(CX), Y7
+	VMOVDQU 3104(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 3136(CX), Y7
+	VMOVDQU 3168(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 3200(CX), Y7
+	VMOVDQU 3232(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 3264(CX), Y7
+	VMOVDQU 3296(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 3328(CX), Y7
+	VMOVDQU 3360(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3392(CX), Y7
+	VMOVDQU 3424(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+
+	// Load and process 32 bytes from input 9 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 3456(CX), Y7
+	VMOVDQU 3488(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y0)
+	VMOVDQU 3520(CX), Y7
+	VMOVDQU 3552(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y1)
+	VMOVDQU 3584(CX), Y7
+	VMOVDQU 3616(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y2)
+	VMOVDQU 3648(CX), Y7
+	VMOVDQU 3680(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+	VMOVDQU 3712(CX), Y7
+	VMOVDQU 3744(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU 3776(CX), Y7
+	VMOVDQU 3808(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
 
-	// Store 2 outputs
-	MOVQ    (R13), BP
-	VMOVDQU Y0, (BP)(R14*1)
-	VMOVDQU Y1, 32(BP)(R14*1)
-	MOVQ    24(R13), BP
-	VMOVDQU Y2, (BP)(R14*1)
-	VMOVDQU Y3, 32(BP)(R14*1)
+	// Store 6 outputs
+	MOVQ    (R14), BP
+	VMOVDQU Y0, (BP)(R15*1)
+	MOVQ    24(R14), BP
+	VMOVDQU Y1, (BP)(R15*1)
+	MOVQ    48(R14), BP
+	VMOVDQU Y2, (BP)(R15*1)
+	MOVQ    72(R14), BP
+	VMOVDQU Y3, (BP)(R15*1)
+	MOVQ    96(R14), BP
+	VMOVDQU Y4, (BP)(R15*1)
+	MOVQ    120(R14), BP
+	VMOVDQU Y5, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x40, R14
-	DECQ R15
-	JNZ  mulAvxTwo_10x2_64_loop
+	ADDQ $0x20, R15
+	DECQ AX
+	JNZ  mulAvxTwo_10x6Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_10x2_64_end:
+mulAvxTwo_10x6Xor_end:
 	RET
 
-// func mulAvxTwo_10x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x3(SB), NOSPLIT, $8-88
+// func mulAvxTwo_10x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x7(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 68 YMM used
+	// Destination kept on stack
+	// Full registers estimated 152 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_10x3_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), R11
-	MOVQ  192(AX), R12
-	MOVQ  216(AX), AX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  (R13), R14
-	MOVQ  24(R13), R15
-	MOVQ  48(R13), R13
-	MOVQ  start+72(FP), BP
-
-	// Add start offset to output
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R13
+	JZ    mulAvxTwo_10x7_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  start+72(FP), R15
 
 	// Add start offset to input
-	ADDQ         BP, DX
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, R9
-	ADDQ         BP, R10
-	ADDQ         BP, R11
-	ADDQ         BP, R12
-	ADDQ         BP, AX
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X3
-	VPBROADCASTB X3, Y3
-	MOVQ         n+80(FP), BP
-	SHRQ         $0x05, BP
-
-mulAvxTwo_10x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-
-	// Load and process 32 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y6
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU (CX), Y4
-	VMOVDQU 32(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 64(CX), Y4
-	VMOVDQU 96(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	MOVQ         BP, X7
+	VPBROADCASTB X7, Y7
 
-	// Load and process 32 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y6
+mulAvxTwo_10x7_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 192(CX), Y4
-	VMOVDQU 224(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 256(CX), Y4
-	VMOVDQU 288(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 320(CX), Y4
-	VMOVDQU 352(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y0
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y1
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y2
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y3
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y4
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y5
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y6
 
-	// Load and process 32 bytes from input 2 to 3 outputs
-	VMOVDQU (SI), Y6
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 384(CX), Y4
-	VMOVDQU 416(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 448(CX), Y4
-	VMOVDQU 480(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 512(CX), Y4
-	VMOVDQU 544(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 3 to 3 outputs
-	VMOVDQU (DI), Y6
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 576(CX), Y4
-	VMOVDQU 608(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 640(CX), Y4
-	VMOVDQU 672(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 704(CX), Y4
-	VMOVDQU 736(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 4 to 3 outputs
-	VMOVDQU (R8), Y6
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 768(CX), Y4
-	VMOVDQU 800(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 832(CX), Y4
-	VMOVDQU 864(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 896(CX), Y4
-	VMOVDQU 928(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 5 to 3 outputs
-	VMOVDQU (R9), Y6
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R9), Y10
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 960(CX), Y4
-	VMOVDQU 992(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1024(CX), Y4
-	VMOVDQU 1056(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1088(CX), Y4
-	VMOVDQU 1120(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 6 to 3 outputs
-	VMOVDQU (R10), Y6
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (R10), Y10
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 1152(CX), Y4
-	VMOVDQU 1184(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1216(CX), Y4
-	VMOVDQU 1248(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1280(CX), Y4
-	VMOVDQU 1312(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 7 to 3 outputs
-	VMOVDQU (R11), Y6
+	// Load and process 32 bytes from input 6 to 7 outputs
+	VMOVDQU (R11), Y10
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 1344(CX), Y4
-	VMOVDQU 1376(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1408(CX), Y4
-	VMOVDQU 1440(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1472(CX), Y4
-	VMOVDQU 1504(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2688(CX), Y8
+	VMOVDQU 2720(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2752(CX), Y8
+	VMOVDQU 2784(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2816(CX), Y8
+	VMOVDQU 2848(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2880(CX), Y8
+	VMOVDQU 2912(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2944(CX), Y8
+	VMOVDQU 2976(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3008(CX), Y8
+	VMOVDQU 3040(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3072(CX), Y8
+	VMOVDQU 3104(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 8 to 3 outputs
-	VMOVDQU (R12), Y6
+	// Load and process 32 bytes from input 7 to 7 outputs
+	VMOVDQU (R12), Y10
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 1536(CX), Y4
-	VMOVDQU 1568(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1600(CX), Y4
-	VMOVDQU 1632(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1664(CX), Y4
-	VMOVDQU 1696(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
-
-	// Load and process 32 bytes from input 9 to 3 outputs
-	VMOVDQU (AX), Y6
-	ADDQ    $0x20, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 1728(CX), Y4
-	VMOVDQU 1760(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 1792(CX), Y4
-	VMOVDQU 1824(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 1856(CX), Y4
-	VMOVDQU 1888(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3136(CX), Y8
+	VMOVDQU 3168(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3200(CX), Y8
+	VMOVDQU 3232(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3264(CX), Y8
+	VMOVDQU 3296(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3328(CX), Y8
+	VMOVDQU 3360(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3392(CX), Y8
+	VMOVDQU 3424(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3456(CX), Y8
+	VMOVDQU 3488(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3520(CX), Y8
+	VMOVDQU 3552(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Store 3 outputs
-	VMOVDQU Y0, (R14)
-	ADDQ    $0x20, R14
-	VMOVDQU Y1, (R15)
-	ADDQ    $0x20, R15
-	VMOVDQU Y2, (R13)
+	// Load and process 32 bytes from input 8 to 7 outputs
+	VMOVDQU (R13), Y10
 	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3584(CX), Y8
+	VMOVDQU 3616(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3648(CX), Y8
+	VMOVDQU 3680(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3712(CX), Y8
+	VMOVDQU 3744(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3776(CX), Y8
+	VMOVDQU 3808(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3840(CX), Y8
+	VMOVDQU 3872(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3904(CX), Y8
+	VMOVDQU 3936(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3968(CX), Y8
+	VMOVDQU 4000(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Load and process 32 bytes from input 9 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 4032(CX), Y8
+	VMOVDQU 4064(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 4096(CX), Y8
+	VMOVDQU 4128(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 4160(CX), Y8
+	VMOVDQU 4192(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 4224(CX), Y8
+	VMOVDQU 4256(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 4288(CX), Y8
+	VMOVDQU 4320(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 4352(CX), Y8
+	VMOVDQU 4384(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 4416(CX), Y8
+	VMOVDQU 4448(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
+
+	// Store 7 outputs
+	MOVQ    (R14), BP
+	VMOVDQU Y0, (BP)(R15*1)
+	MOVQ    24(R14), BP
+	VMOVDQU Y1, (BP)(R15*1)
+	MOVQ    48(R14), BP
+	VMOVDQU Y2, (BP)(R15*1)
+	MOVQ    72(R14), BP
+	VMOVDQU Y3, (BP)(R15*1)
+	MOVQ    96(R14), BP
+	VMOVDQU Y4, (BP)(R15*1)
+	MOVQ    120(R14), BP
+	VMOVDQU Y5, (BP)(R15*1)
+	MOVQ    144(R14), BP
+	VMOVDQU Y6, (BP)(R15*1)
 
 	// Prepare for next loop
-	DECQ BP
-	JNZ  mulAvxTwo_10x3_loop
+	ADDQ $0x20, R15
+	DECQ AX
+	JNZ  mulAvxTwo_10x7_loop
 	VZEROUPPER
 
-mulAvxTwo_10x3_end:
+mulAvxTwo_10x7_end:
 	RET
 
-// func mulAvxTwo_10x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x3_64(SB), $8-88
-	// Loading no tables to registers
+// func mulGFNI_10x7_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x7_64(SB), $8-88
+	// Loading 23 of 70 tables to registers
 	// Destination kept on stack
-	// Full registers estimated 68 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_10x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), R9
-	MOVQ  144(AX), R10
-	MOVQ  168(AX), R11
-	MOVQ  192(AX), R12
-	MOVQ  216(AX), AX
-	MOVQ  out_base+48(FP), R13
-	MOVQ  out_base+48(FP), R13
-	MOVQ  start+72(FP), R14
+	// Full registers estimated 79 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x7_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
 
 	// Add start offset to input
-	ADDQ         R14, DX
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, R9
-	ADDQ         R14, R10
-	ADDQ         R14, R11
-	ADDQ         R14, R12
-	ADDQ         R14, AX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X6
-	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R15
-	SHRQ         $0x06, R15
-
-mulAvxTwo_10x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
-	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU (CX), Y7
-	VMOVDQU 32(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 64(CX), Y7
-	VMOVDQU 96(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 128(CX), Y7
-	VMOVDQU 160(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
-
-	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 192(CX), Y7
-	VMOVDQU 224(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 256(CX), Y7
-	VMOVDQU 288(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 320(CX), Y7
-	VMOVDQU 352(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
-
-	// Load and process 64 bytes from input 2 to 3 outputs
-	VMOVDQU (SI), Y11
-	VMOVDQU 32(SI), Y13
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 384(CX), Y7
-	VMOVDQU 416(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 448(CX), Y7
-	VMOVDQU 480(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 512(CX), Y7
-	VMOVDQU 544(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
-
-	// Load and process 64 bytes from input 3 to 3 outputs
-	VMOVDQU (DI), Y11
-	VMOVDQU 32(DI), Y13
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 576(CX), Y7
-	VMOVDQU 608(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y7
-	VMOVDQU 672(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 704(CX), Y7
-	VMOVDQU 736(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
-
-	// Load and process 64 bytes from input 4 to 3 outputs
-	VMOVDQU (R8), Y11
-	VMOVDQU 32(R8), Y13
-	ADDQ    $0x40, R8
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 768(CX), Y7
-	VMOVDQU 800(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 832(CX), Y7
-	VMOVDQU 864(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 896(CX), Y7
-	VMOVDQU 928(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x7_64_loop:
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 7 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 7 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 7 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Load and process 64 bytes from input 5 to 3 outputs
-	VMOVDQU (R9), Y11
-	VMOVDQU 32(R9), Y13
-	ADDQ    $0x40, R9
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 960(CX), Y7
-	VMOVDQU 992(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1024(CX), Y7
-	VMOVDQU 1056(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1088(CX), Y7
-	VMOVDQU 1120(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	// Store 7 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z23, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      144(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
 
-	// Load and process 64 bytes from input 6 to 3 outputs
-	VMOVDQU (R10), Y11
-	VMOVDQU 32(R10), Y13
-	ADDQ    $0x40, R10
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 1152(CX), Y7
-	VMOVDQU 1184(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1216(CX), Y7
-	VMOVDQU 1248(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1280(CX), Y7
-	VMOVDQU 1312(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	// Prepare for next loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x7_64_loop
+	VZEROUPPER
 
-	// Load and process 64 bytes from input 7 to 3 outputs
-	VMOVDQU (R11), Y11
-	VMOVDQU 32(R11), Y13
-	ADDQ    $0x40, R11
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 1344(CX), Y7
-	VMOVDQU 1376(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1408(CX), Y7
-	VMOVDQU 1440(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1472(CX), Y7
-	VMOVDQU 1504(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+mulGFNI_10x7_64_end:
+	RET
 
-	// Load and process 64 bytes from input 8 to 3 outputs
-	VMOVDQU (R12), Y11
-	VMOVDQU 32(R12), Y13
-	ADDQ    $0x40, R12
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 1536(CX), Y7
-	VMOVDQU 1568(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1600(CX), Y7
-	VMOVDQU 1632(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1664(CX), Y7
-	VMOVDQU 1696(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+// func mulGFNI_10x7_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x7_64Xor(SB), $8-88
+	// Loading 23 of 70 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 79 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x7_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	VBROADCASTF32X2 176(CX), Z22
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
 
-	// Load and process 64 bytes from input 9 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y11, Y12
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y6, Y11, Y11
-	VPAND   Y6, Y13, Y13
-	VPAND   Y6, Y12, Y12
-	VPAND   Y6, Y14, Y14
-	VMOVDQU 1728(CX), Y7
-	VMOVDQU 1760(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1792(CX), Y7
-	VMOVDQU 1824(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1856(CX), Y7
-	VMOVDQU 1888(CX), Y8
-	VPSHUFB Y13, Y7, Y9
-	VPSHUFB Y11, Y7, Y7
-	VPSHUFB Y14, Y8, Y10
-	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x7_64Xor_loop:
+	// Load 7 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 (BP)(R15*1), Z23
+	MOVQ      24(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z24
+	MOVQ      48(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z25
+	MOVQ      72(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z26
+	MOVQ      96(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z27
+	MOVQ      120(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z28
+	MOVQ      144(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z29
+
+	// Load and process 64 bytes from input 0 to 7 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 7 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 7 outputs
+	VMOVDQU64      (DI), Z30
+	ADDQ           $0x40, DI
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z20, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 7 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z22, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 7 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 7 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 7 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 7 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 7 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 7 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Store 3 outputs
-	MOVQ    (R13), BP
-	VMOVDQU Y0, (BP)(R14*1)
-	VMOVDQU Y1, 32(BP)(R14*1)
-	MOVQ    24(R13), BP
-	VMOVDQU Y2, (BP)(R14*1)
-	VMOVDQU Y3, 32(BP)(R14*1)
-	MOVQ    48(R13), BP
-	VMOVDQU Y4, (BP)(R14*1)
-	VMOVDQU Y5, 32(BP)(R14*1)
+	// Store 7 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z23, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      144(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x40, R14
-	DECQ R15
-	JNZ  mulAvxTwo_10x3_64_loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x7_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_10x3_64_end:
+mulGFNI_10x7_64Xor_end:
 	RET
 
-// func mulAvxTwo_10x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x4(SB), NOSPLIT, $8-88
+// func mulAvxTwo_10x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x7Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 89 YMM used
+	// Full registers estimated 152 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_10x4_end
+	JZ    mulAvxTwo_10x7Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -31834,327 +89726,445 @@ TEXT Β·mulAvxTwo_10x4(SB), NOSPLIT, $8-88
 	ADDQ         R15, R13
 	ADDQ         R15, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X4
-	VPBROADCASTB X4, Y4
-
-mulAvxTwo_10x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	MOVQ         BP, X7
+	VPBROADCASTB X7, Y7
 
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (BX), Y7
+mulAvxTwo_10x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	MOVQ    (R14), BP
+	VMOVDQU (BP)(R15*1), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	MOVQ    24(R14), BP
+	VMOVDQU (BP)(R15*1), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	MOVQ    48(R14), BP
+	VMOVDQU (BP)(R15*1), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	MOVQ    72(R14), BP
+	VMOVDQU (BP)(R15*1), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	MOVQ    96(R14), BP
+	VMOVDQU (BP)(R15*1), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	MOVQ    120(R14), BP
+	VMOVDQU (BP)(R15*1), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	MOVQ    144(R14), BP
+	VMOVDQU (BP)(R15*1), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 1 to 4 outputs
-	VMOVDQU (SI), Y7
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 2 to 4 outputs
-	VMOVDQU (DI), Y7
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 3 to 4 outputs
-	VMOVDQU (R8), Y7
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 768(CX), Y5
-	VMOVDQU 800(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 832(CX), Y5
-	VMOVDQU 864(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 896(CX), Y5
-	VMOVDQU 928(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 960(CX), Y5
-	VMOVDQU 992(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 4 to 4 outputs
-	VMOVDQU (R9), Y7
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (R9), Y10
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1024(CX), Y5
-	VMOVDQU 1056(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1088(CX), Y5
-	VMOVDQU 1120(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1152(CX), Y5
-	VMOVDQU 1184(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1216(CX), Y5
-	VMOVDQU 1248(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 5 to 4 outputs
-	VMOVDQU (R10), Y7
+	// Load and process 32 bytes from input 5 to 7 outputs
+	VMOVDQU (R10), Y10
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1280(CX), Y5
-	VMOVDQU 1312(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1344(CX), Y5
-	VMOVDQU 1376(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1408(CX), Y5
-	VMOVDQU 1440(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1472(CX), Y5
-	VMOVDQU 1504(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2240(CX), Y8
+	VMOVDQU 2272(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2304(CX), Y8
+	VMOVDQU 2336(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2368(CX), Y8
+	VMOVDQU 2400(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2432(CX), Y8
+	VMOVDQU 2464(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2496(CX), Y8
+	VMOVDQU 2528(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 2560(CX), Y8
+	VMOVDQU 2592(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 2624(CX), Y8
+	VMOVDQU 2656(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 6 to 4 outputs
-	VMOVDQU (R11), Y7
+	// Load and process 32 bytes from input 6 to 7 outputs
+	VMOVDQU (R11), Y10
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1536(CX), Y5
-	VMOVDQU 1568(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1600(CX), Y5
-	VMOVDQU 1632(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1664(CX), Y5
-	VMOVDQU 1696(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1728(CX), Y5
-	VMOVDQU 1760(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 2688(CX), Y8
+	VMOVDQU 2720(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 2752(CX), Y8
+	VMOVDQU 2784(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 2816(CX), Y8
+	VMOVDQU 2848(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 2880(CX), Y8
+	VMOVDQU 2912(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 2944(CX), Y8
+	VMOVDQU 2976(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3008(CX), Y8
+	VMOVDQU 3040(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3072(CX), Y8
+	VMOVDQU 3104(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 7 to 4 outputs
-	VMOVDQU (R12), Y7
+	// Load and process 32 bytes from input 7 to 7 outputs
+	VMOVDQU (R12), Y10
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 1792(CX), Y5
-	VMOVDQU 1824(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 1856(CX), Y5
-	VMOVDQU 1888(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 1920(CX), Y5
-	VMOVDQU 1952(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 1984(CX), Y5
-	VMOVDQU 2016(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3136(CX), Y8
+	VMOVDQU 3168(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3200(CX), Y8
+	VMOVDQU 3232(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3264(CX), Y8
+	VMOVDQU 3296(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3328(CX), Y8
+	VMOVDQU 3360(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3392(CX), Y8
+	VMOVDQU 3424(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3456(CX), Y8
+	VMOVDQU 3488(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3520(CX), Y8
+	VMOVDQU 3552(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 8 to 4 outputs
-	VMOVDQU (R13), Y7
+	// Load and process 32 bytes from input 8 to 7 outputs
+	VMOVDQU (R13), Y10
 	ADDQ    $0x20, R13
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 2048(CX), Y5
-	VMOVDQU 2080(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 2112(CX), Y5
-	VMOVDQU 2144(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 2176(CX), Y5
-	VMOVDQU 2208(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 2240(CX), Y5
-	VMOVDQU 2272(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 3584(CX), Y8
+	VMOVDQU 3616(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 3648(CX), Y8
+	VMOVDQU 3680(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 3712(CX), Y8
+	VMOVDQU 3744(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 3776(CX), Y8
+	VMOVDQU 3808(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 3840(CX), Y8
+	VMOVDQU 3872(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 3904(CX), Y8
+	VMOVDQU 3936(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 3968(CX), Y8
+	VMOVDQU 4000(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Load and process 32 bytes from input 9 to 4 outputs
-	VMOVDQU (DX), Y7
+	// Load and process 32 bytes from input 9 to 7 outputs
+	VMOVDQU (DX), Y10
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 2304(CX), Y5
-	VMOVDQU 2336(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 2368(CX), Y5
-	VMOVDQU 2400(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 2432(CX), Y5
-	VMOVDQU 2464(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 2496(CX), Y5
-	VMOVDQU 2528(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 4032(CX), Y8
+	VMOVDQU 4064(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y0)
+	VMOVDQU 4096(CX), Y8
+	VMOVDQU 4128(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y1)
+	VMOVDQU 4160(CX), Y8
+	VMOVDQU 4192(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y2)
+	VMOVDQU 4224(CX), Y8
+	VMOVDQU 4256(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y3)
+	VMOVDQU 4288(CX), Y8
+	VMOVDQU 4320(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y4)
+	VMOVDQU 4352(CX), Y8
+	VMOVDQU 4384(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y5)
+	VMOVDQU 4416(CX), Y8
+	VMOVDQU 4448(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	XOR3WAY( $0x00, Y8, Y9, Y6)
 
-	// Store 4 outputs
+	// Store 7 outputs
 	MOVQ    (R14), BP
 	VMOVDQU Y0, (BP)(R15*1)
 	MOVQ    24(R14), BP
@@ -32163,27 +90173,33 @@ mulAvxTwo_10x4_loop:
 	VMOVDQU Y2, (BP)(R15*1)
 	MOVQ    72(R14), BP
 	VMOVDQU Y3, (BP)(R15*1)
+	MOVQ    96(R14), BP
+	VMOVDQU Y4, (BP)(R15*1)
+	MOVQ    120(R14), BP
+	VMOVDQU Y5, (BP)(R15*1)
+	MOVQ    144(R14), BP
+	VMOVDQU Y6, (BP)(R15*1)
 
 	// Prepare for next loop
 	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_10x4_loop
+	JNZ  mulAvxTwo_10x7Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_10x4_end:
+mulAvxTwo_10x7Xor_end:
 	RET
 
-// func mulAvxTwo_10x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x5(SB), NOSPLIT, $8-88
+// func mulAvxTwo_10x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x8(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 110 YMM used
+	// Full registers estimated 173 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_10x5_end
+	JZ    mulAvxTwo_10x8_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -32210,388 +90226,481 @@ TEXT Β·mulAvxTwo_10x5(SB), NOSPLIT, $8-88
 	ADDQ         R15, R13
 	ADDQ         R15, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X5
-	VPBROADCASTB X5, Y5
-
-mulAvxTwo_10x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
+	MOVQ         BP, X8
+	VPBROADCASTB X8, Y8
 
-	// Load and process 32 bytes from input 0 to 5 outputs
-	VMOVDQU (BX), Y8
+mulAvxTwo_10x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU (CX), Y6
-	VMOVDQU 32(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 64(CX), Y6
-	VMOVDQU 96(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 128(CX), Y6
-	VMOVDQU 160(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 256(CX), Y6
-	VMOVDQU 288(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
 
-	// Load and process 32 bytes from input 1 to 5 outputs
-	VMOVDQU (SI), Y8
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 320(CX), Y6
-	VMOVDQU 352(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 384(CX), Y6
-	VMOVDQU 416(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 448(CX), Y6
-	VMOVDQU 480(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 512(CX), Y6
-	VMOVDQU 544(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 576(CX), Y6
-	VMOVDQU 608(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 2 to 5 outputs
-	VMOVDQU (DI), Y8
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 640(CX), Y6
-	VMOVDQU 672(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 704(CX), Y6
-	VMOVDQU 736(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 768(CX), Y6
-	VMOVDQU 800(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 832(CX), Y6
-	VMOVDQU 864(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 896(CX), Y6
-	VMOVDQU 928(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 3 to 5 outputs
-	VMOVDQU (R8), Y8
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (R8), Y11
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 960(CX), Y6
-	VMOVDQU 992(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1024(CX), Y6
-	VMOVDQU 1056(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1088(CX), Y6
-	VMOVDQU 1120(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1152(CX), Y6
-	VMOVDQU 1184(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1216(CX), Y6
-	VMOVDQU 1248(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 4 to 5 outputs
-	VMOVDQU (R9), Y8
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (R9), Y11
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1280(CX), Y6
-	VMOVDQU 1312(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1344(CX), Y6
-	VMOVDQU 1376(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1408(CX), Y6
-	VMOVDQU 1440(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1472(CX), Y6
-	VMOVDQU 1504(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1536(CX), Y6
-	VMOVDQU 1568(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 5 to 5 outputs
-	VMOVDQU (R10), Y8
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (R10), Y11
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1600(CX), Y6
-	VMOVDQU 1632(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1664(CX), Y6
-	VMOVDQU 1696(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 1728(CX), Y6
-	VMOVDQU 1760(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 1792(CX), Y6
-	VMOVDQU 1824(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 1856(CX), Y6
-	VMOVDQU 1888(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 6 to 5 outputs
-	VMOVDQU (R11), Y8
+	// Load and process 32 bytes from input 6 to 8 outputs
+	VMOVDQU (R11), Y11
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 1920(CX), Y6
-	VMOVDQU 1952(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 1984(CX), Y6
-	VMOVDQU 2016(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 2048(CX), Y6
-	VMOVDQU 2080(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 2112(CX), Y6
-	VMOVDQU 2144(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 2176(CX), Y6
-	VMOVDQU 2208(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3072(CX), Y9
+	VMOVDQU 3104(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3136(CX), Y9
+	VMOVDQU 3168(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3200(CX), Y9
+	VMOVDQU 3232(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3264(CX), Y9
+	VMOVDQU 3296(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3328(CX), Y9
+	VMOVDQU 3360(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3392(CX), Y9
+	VMOVDQU 3424(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3456(CX), Y9
+	VMOVDQU 3488(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3520(CX), Y9
+	VMOVDQU 3552(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 7 to 5 outputs
-	VMOVDQU (R12), Y8
+	// Load and process 32 bytes from input 7 to 8 outputs
+	VMOVDQU (R12), Y11
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 2240(CX), Y6
-	VMOVDQU 2272(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 2304(CX), Y6
-	VMOVDQU 2336(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 2368(CX), Y6
-	VMOVDQU 2400(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 2432(CX), Y6
-	VMOVDQU 2464(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 2496(CX), Y6
-	VMOVDQU 2528(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3584(CX), Y9
+	VMOVDQU 3616(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3648(CX), Y9
+	VMOVDQU 3680(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3712(CX), Y9
+	VMOVDQU 3744(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3776(CX), Y9
+	VMOVDQU 3808(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3840(CX), Y9
+	VMOVDQU 3872(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3904(CX), Y9
+	VMOVDQU 3936(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3968(CX), Y9
+	VMOVDQU 4000(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4032(CX), Y9
+	VMOVDQU 4064(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 8 to 5 outputs
-	VMOVDQU (R13), Y8
+	// Load and process 32 bytes from input 8 to 8 outputs
+	VMOVDQU (R13), Y11
 	ADDQ    $0x20, R13
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 2560(CX), Y6
-	VMOVDQU 2592(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 2624(CX), Y6
-	VMOVDQU 2656(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 2688(CX), Y6
-	VMOVDQU 2720(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 2752(CX), Y6
-	VMOVDQU 2784(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 2816(CX), Y6
-	VMOVDQU 2848(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 4096(CX), Y9
+	VMOVDQU 4128(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 4160(CX), Y9
+	VMOVDQU 4192(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 4224(CX), Y9
+	VMOVDQU 4256(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 4288(CX), Y9
+	VMOVDQU 4320(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 4352(CX), Y9
+	VMOVDQU 4384(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 4416(CX), Y9
+	VMOVDQU 4448(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 4480(CX), Y9
+	VMOVDQU 4512(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4544(CX), Y9
+	VMOVDQU 4576(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 9 to 5 outputs
-	VMOVDQU (DX), Y8
+	// Load and process 32 bytes from input 9 to 8 outputs
+	VMOVDQU (DX), Y11
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 2880(CX), Y6
-	VMOVDQU 2912(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 2944(CX), Y6
-	VMOVDQU 2976(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 3008(CX), Y6
-	VMOVDQU 3040(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 3072(CX), Y6
-	VMOVDQU 3104(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 3136(CX), Y6
-	VMOVDQU 3168(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 4608(CX), Y9
+	VMOVDQU 4640(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 4672(CX), Y9
+	VMOVDQU 4704(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 4736(CX), Y9
+	VMOVDQU 4768(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 4800(CX), Y9
+	VMOVDQU 4832(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 4864(CX), Y9
+	VMOVDQU 4896(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 4928(CX), Y9
+	VMOVDQU 4960(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 4992(CX), Y9
+	VMOVDQU 5024(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 5056(CX), Y9
+	VMOVDQU 5088(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Store 5 outputs
+	// Store 8 outputs
 	MOVQ    (R14), BP
 	VMOVDQU Y0, (BP)(R15*1)
 	MOVQ    24(R14), BP
@@ -32602,529 +90711,619 @@ mulAvxTwo_10x5_loop:
 	VMOVDQU Y3, (BP)(R15*1)
 	MOVQ    96(R14), BP
 	VMOVDQU Y4, (BP)(R15*1)
+	MOVQ    120(R14), BP
+	VMOVDQU Y5, (BP)(R15*1)
+	MOVQ    144(R14), BP
+	VMOVDQU Y6, (BP)(R15*1)
+	MOVQ    168(R14), BP
+	VMOVDQU Y7, (BP)(R15*1)
 
 	// Prepare for next loop
 	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_10x5_loop
+	JNZ  mulAvxTwo_10x8_loop
 	VZEROUPPER
 
-mulAvxTwo_10x5_end:
+mulAvxTwo_10x8_end:
 	RET
 
-// func mulAvxTwo_10x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x6(SB), NOSPLIT, $8-88
-	// Loading no tables to registers
+// func mulGFNI_10x8_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x8_64(SB), $8-88
+	// Loading 22 of 80 tables to registers
 	// Destination kept on stack
-	// Full registers estimated 131 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_10x6_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), R10
-	MOVQ  144(DX), R11
-	MOVQ  168(DX), R12
-	MOVQ  192(DX), R13
-	MOVQ  216(DX), DX
-	MOVQ  out_base+48(FP), R14
-	MOVQ  start+72(FP), R15
+	// Full registers estimated 90 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x8_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, SI
-	ADDQ         R15, DI
-	ADDQ         R15, R8
-	ADDQ         R15, R9
-	ADDQ         R15, R10
-	ADDQ         R15, R11
-	ADDQ         R15, R12
-	ADDQ         R15, R13
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X6
-	VPBROADCASTB X6, Y6
-
-mulAvxTwo_10x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
-	// Load and process 32 bytes from input 0 to 6 outputs
-	VMOVDQU (BX), Y9
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU (CX), Y7
-	VMOVDQU 32(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 64(CX), Y7
-	VMOVDQU 96(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 128(CX), Y7
-	VMOVDQU 160(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 192(CX), Y7
-	VMOVDQU 224(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 256(CX), Y7
-	VMOVDQU 288(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 320(CX), Y7
-	VMOVDQU 352(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 1 to 6 outputs
-	VMOVDQU (SI), Y9
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 384(CX), Y7
-	VMOVDQU 416(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 448(CX), Y7
-	VMOVDQU 480(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 512(CX), Y7
-	VMOVDQU 544(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 576(CX), Y7
-	VMOVDQU 608(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 640(CX), Y7
-	VMOVDQU 672(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 704(CX), Y7
-	VMOVDQU 736(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 2 to 6 outputs
-	VMOVDQU (DI), Y9
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 768(CX), Y7
-	VMOVDQU 800(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 832(CX), Y7
-	VMOVDQU 864(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 896(CX), Y7
-	VMOVDQU 928(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 960(CX), Y7
-	VMOVDQU 992(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1024(CX), Y7
-	VMOVDQU 1056(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1088(CX), Y7
-	VMOVDQU 1120(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 3 to 6 outputs
-	VMOVDQU (R8), Y9
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1152(CX), Y7
-	VMOVDQU 1184(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1216(CX), Y7
-	VMOVDQU 1248(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1280(CX), Y7
-	VMOVDQU 1312(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 1344(CX), Y7
-	VMOVDQU 1376(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1408(CX), Y7
-	VMOVDQU 1440(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1472(CX), Y7
-	VMOVDQU 1504(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 4 to 6 outputs
-	VMOVDQU (R9), Y9
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1536(CX), Y7
-	VMOVDQU 1568(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1600(CX), Y7
-	VMOVDQU 1632(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 1664(CX), Y7
-	VMOVDQU 1696(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 1728(CX), Y7
-	VMOVDQU 1760(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 1792(CX), Y7
-	VMOVDQU 1824(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 1856(CX), Y7
-	VMOVDQU 1888(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 5 to 6 outputs
-	VMOVDQU (R10), Y9
-	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 1920(CX), Y7
-	VMOVDQU 1952(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 1984(CX), Y7
-	VMOVDQU 2016(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2048(CX), Y7
-	VMOVDQU 2080(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2112(CX), Y7
-	VMOVDQU 2144(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2176(CX), Y7
-	VMOVDQU 2208(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 2240(CX), Y7
-	VMOVDQU 2272(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 6 to 6 outputs
-	VMOVDQU (R11), Y9
-	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 2304(CX), Y7
-	VMOVDQU 2336(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 2368(CX), Y7
-	VMOVDQU 2400(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2432(CX), Y7
-	VMOVDQU 2464(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2496(CX), Y7
-	VMOVDQU 2528(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2560(CX), Y7
-	VMOVDQU 2592(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 2624(CX), Y7
-	VMOVDQU 2656(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 7 to 6 outputs
-	VMOVDQU (R12), Y9
-	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 2688(CX), Y7
-	VMOVDQU 2720(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 2752(CX), Y7
-	VMOVDQU 2784(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 2816(CX), Y7
-	VMOVDQU 2848(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 2880(CX), Y7
-	VMOVDQU 2912(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 2944(CX), Y7
-	VMOVDQU 2976(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 3008(CX), Y7
-	VMOVDQU 3040(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 8 to 6 outputs
-	VMOVDQU (R13), Y9
-	ADDQ    $0x20, R13
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 3072(CX), Y7
-	VMOVDQU 3104(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 3136(CX), Y7
-	VMOVDQU 3168(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 3200(CX), Y7
-	VMOVDQU 3232(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 3264(CX), Y7
-	VMOVDQU 3296(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 3328(CX), Y7
-	VMOVDQU 3360(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 3392(CX), Y7
-	VMOVDQU 3424(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
-
-	// Load and process 32 bytes from input 9 to 6 outputs
-	VMOVDQU (DX), Y9
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
-	VPAND   Y6, Y10, Y10
-	VMOVDQU 3456(CX), Y7
-	VMOVDQU 3488(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
-	VMOVDQU 3520(CX), Y7
-	VMOVDQU 3552(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 3584(CX), Y7
-	VMOVDQU 3616(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
-	VMOVDQU 3648(CX), Y7
-	VMOVDQU 3680(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
-	VMOVDQU 3712(CX), Y7
-	VMOVDQU 3744(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
-	VMOVDQU 3776(CX), Y7
-	VMOVDQU 3808(CX), Y8
-	VPSHUFB Y9, Y7, Y7
-	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x8_64_loop:
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 8 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 8 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 8 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
 
-	// Store 6 outputs
-	MOVQ    (R14), BP
-	VMOVDQU Y0, (BP)(R15*1)
-	MOVQ    24(R14), BP
-	VMOVDQU Y1, (BP)(R15*1)
-	MOVQ    48(R14), BP
-	VMOVDQU Y2, (BP)(R15*1)
-	MOVQ    72(R14), BP
-	VMOVDQU Y3, (BP)(R15*1)
-	MOVQ    96(R14), BP
-	VMOVDQU Y4, (BP)(R15*1)
-	MOVQ    120(R14), BP
-	VMOVDQU Y5, (BP)(R15*1)
+	// Store 8 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z22, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z23, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      144(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      168(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R15
+	ADDQ $0x40, R15
 	DECQ AX
-	JNZ  mulAvxTwo_10x6_loop
+	JNZ  mulGFNI_10x8_64_loop
 	VZEROUPPER
 
-mulAvxTwo_10x6_end:
+mulGFNI_10x8_64_end:
 	RET
 
-// func mulAvxTwo_10x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x7(SB), NOSPLIT, $8-88
+// func mulGFNI_10x8_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x8_64Xor(SB), $8-88
+	// Loading 22 of 80 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 90 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x8_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	VBROADCASTF32X2 168(CX), Z21
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x8_64Xor_loop:
+	// Load 8 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 (BP)(R15*1), Z22
+	MOVQ      24(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z23
+	MOVQ      48(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z24
+	MOVQ      72(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z25
+	MOVQ      96(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z26
+	MOVQ      120(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z27
+	MOVQ      144(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z28
+	MOVQ      168(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z29
+
+	// Load and process 64 bytes from input 0 to 8 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 8 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 8 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z16, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z17, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB      $0x00, Z21, Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 8 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 8 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 8 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 8 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 8 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 8 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 8 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 8 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z22, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z23, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      144(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      168(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x8_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_10x8_64Xor_end:
+	RET
+
+// func mulAvxTwo_10x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x8Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 152 YMM used
+	// Full registers estimated 173 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_10x7_end
+	JZ    mulAvxTwo_10x8Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -33151,510 +91350,1085 @@ TEXT Β·mulAvxTwo_10x7(SB), NOSPLIT, $8-88
 	ADDQ         R15, R13
 	ADDQ         R15, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X7
-	VPBROADCASTB X7, Y7
-
-mulAvxTwo_10x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
+	MOVQ         BP, X8
+	VPBROADCASTB X8, Y8
 
-	// Load and process 32 bytes from input 0 to 7 outputs
-	VMOVDQU (BX), Y10
+mulAvxTwo_10x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU (CX), Y8
-	VMOVDQU 32(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	MOVQ    (R14), BP
+	VMOVDQU (BP)(R15*1), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 64(CX), Y8
-	VMOVDQU 96(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	MOVQ    24(R14), BP
+	VMOVDQU (BP)(R15*1), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 128(CX), Y8
-	VMOVDQU 160(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	MOVQ    48(R14), BP
+	VMOVDQU (BP)(R15*1), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 192(CX), Y8
-	VMOVDQU 224(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	MOVQ    72(R14), BP
+	VMOVDQU (BP)(R15*1), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	MOVQ    96(R14), BP
+	VMOVDQU (BP)(R15*1), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 320(CX), Y8
-	VMOVDQU 352(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	MOVQ    120(R14), BP
+	VMOVDQU (BP)(R15*1), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 384(CX), Y8
-	VMOVDQU 416(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	MOVQ    144(R14), BP
+	VMOVDQU (BP)(R15*1), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	MOVQ    168(R14), BP
+	VMOVDQU (BP)(R15*1), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 1 to 7 outputs
-	VMOVDQU (SI), Y10
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 448(CX), Y8
-	VMOVDQU 480(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 512(CX), Y8
-	VMOVDQU 544(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 576(CX), Y8
-	VMOVDQU 608(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 640(CX), Y8
-	VMOVDQU 672(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 704(CX), Y8
-	VMOVDQU 736(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 768(CX), Y8
-	VMOVDQU 800(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 832(CX), Y8
-	VMOVDQU 864(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 2 to 7 outputs
-	VMOVDQU (DI), Y10
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 896(CX), Y8
-	VMOVDQU 928(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 960(CX), Y8
-	VMOVDQU 992(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1024(CX), Y8
-	VMOVDQU 1056(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1088(CX), Y8
-	VMOVDQU 1120(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1152(CX), Y8
-	VMOVDQU 1184(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1216(CX), Y8
-	VMOVDQU 1248(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1280(CX), Y8
-	VMOVDQU 1312(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 3 to 7 outputs
-	VMOVDQU (R8), Y10
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (R8), Y11
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1344(CX), Y8
-	VMOVDQU 1376(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1408(CX), Y8
-	VMOVDQU 1440(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1472(CX), Y8
-	VMOVDQU 1504(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1536(CX), Y8
-	VMOVDQU 1568(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 1600(CX), Y8
-	VMOVDQU 1632(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 1664(CX), Y8
-	VMOVDQU 1696(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 1728(CX), Y8
-	VMOVDQU 1760(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 4 to 7 outputs
-	VMOVDQU (R9), Y10
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (R9), Y11
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 1792(CX), Y8
-	VMOVDQU 1824(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 1856(CX), Y8
-	VMOVDQU 1888(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 1920(CX), Y8
-	VMOVDQU 1952(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 1984(CX), Y8
-	VMOVDQU 2016(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2048(CX), Y8
-	VMOVDQU 2080(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2112(CX), Y8
-	VMOVDQU 2144(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2176(CX), Y8
-	VMOVDQU 2208(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 5 to 7 outputs
-	VMOVDQU (R10), Y10
+	// Load and process 32 bytes from input 5 to 8 outputs
+	VMOVDQU (R10), Y11
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 2240(CX), Y8
-	VMOVDQU 2272(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2560(CX), Y9
+	VMOVDQU 2592(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 2304(CX), Y8
-	VMOVDQU 2336(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 2624(CX), Y9
+	VMOVDQU 2656(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 2368(CX), Y8
-	VMOVDQU 2400(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 2688(CX), Y9
+	VMOVDQU 2720(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 2432(CX), Y8
-	VMOVDQU 2464(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 2752(CX), Y9
+	VMOVDQU 2784(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2496(CX), Y8
-	VMOVDQU 2528(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 2816(CX), Y9
+	VMOVDQU 2848(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 2560(CX), Y8
-	VMOVDQU 2592(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 2880(CX), Y9
+	VMOVDQU 2912(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 2624(CX), Y8
-	VMOVDQU 2656(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 2944(CX), Y9
+	VMOVDQU 2976(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3008(CX), Y9
+	VMOVDQU 3040(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 6 to 7 outputs
-	VMOVDQU (R11), Y10
+	// Load and process 32 bytes from input 6 to 8 outputs
+	VMOVDQU (R11), Y11
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 2688(CX), Y8
-	VMOVDQU 2720(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3072(CX), Y9
+	VMOVDQU 3104(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 2752(CX), Y8
-	VMOVDQU 2784(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3136(CX), Y9
+	VMOVDQU 3168(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 2816(CX), Y8
-	VMOVDQU 2848(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3200(CX), Y9
+	VMOVDQU 3232(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 2880(CX), Y8
-	VMOVDQU 2912(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3264(CX), Y9
+	VMOVDQU 3296(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 2944(CX), Y8
-	VMOVDQU 2976(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3328(CX), Y9
+	VMOVDQU 3360(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 3008(CX), Y8
-	VMOVDQU 3040(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3392(CX), Y9
+	VMOVDQU 3424(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 3072(CX), Y8
-	VMOVDQU 3104(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3456(CX), Y9
+	VMOVDQU 3488(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 3520(CX), Y9
+	VMOVDQU 3552(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 7 to 7 outputs
-	VMOVDQU (R12), Y10
+	// Load and process 32 bytes from input 7 to 8 outputs
+	VMOVDQU (R12), Y11
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 3136(CX), Y8
-	VMOVDQU 3168(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 3584(CX), Y9
+	VMOVDQU 3616(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 3648(CX), Y9
+	VMOVDQU 3680(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 3712(CX), Y9
+	VMOVDQU 3744(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 3776(CX), Y9
+	VMOVDQU 3808(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 3840(CX), Y9
+	VMOVDQU 3872(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 3904(CX), Y9
+	VMOVDQU 3936(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 3968(CX), Y9
+	VMOVDQU 4000(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4032(CX), Y9
+	VMOVDQU 4064(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Load and process 32 bytes from input 8 to 8 outputs
+	VMOVDQU (R13), Y11
+	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 4096(CX), Y9
+	VMOVDQU 4128(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 4160(CX), Y9
+	VMOVDQU 4192(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 3200(CX), Y8
-	VMOVDQU 3232(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 4224(CX), Y9
+	VMOVDQU 4256(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 3264(CX), Y8
-	VMOVDQU 3296(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 4288(CX), Y9
+	VMOVDQU 4320(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 3328(CX), Y8
-	VMOVDQU 3360(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 4352(CX), Y9
+	VMOVDQU 4384(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 3392(CX), Y8
-	VMOVDQU 3424(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 4416(CX), Y9
+	VMOVDQU 4448(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 3456(CX), Y8
-	VMOVDQU 3488(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 4480(CX), Y9
+	VMOVDQU 4512(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 3520(CX), Y8
-	VMOVDQU 3552(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 4544(CX), Y9
+	VMOVDQU 4576(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
 
-	// Load and process 32 bytes from input 8 to 7 outputs
-	VMOVDQU (R13), Y10
-	ADDQ    $0x20, R13
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 3584(CX), Y8
-	VMOVDQU 3616(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	// Load and process 32 bytes from input 9 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 4608(CX), Y9
+	VMOVDQU 4640(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 3648(CX), Y8
-	VMOVDQU 3680(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y0)
+	VMOVDQU 4672(CX), Y9
+	VMOVDQU 4704(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 3712(CX), Y8
-	VMOVDQU 3744(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y1)
+	VMOVDQU 4736(CX), Y9
+	VMOVDQU 4768(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 3776(CX), Y8
-	VMOVDQU 3808(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y2)
+	VMOVDQU 4800(CX), Y9
+	VMOVDQU 4832(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 3840(CX), Y8
-	VMOVDQU 3872(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y3)
+	VMOVDQU 4864(CX), Y9
+	VMOVDQU 4896(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 3904(CX), Y8
-	VMOVDQU 3936(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y4)
+	VMOVDQU 4928(CX), Y9
+	VMOVDQU 4960(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 3968(CX), Y8
-	VMOVDQU 4000(CX), Y9
-	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+	VMOVDQU 4992(CX), Y9
+	VMOVDQU 5024(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y6)
+	VMOVDQU 5056(CX), Y9
+	VMOVDQU 5088(CX), Y10
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSHUFB Y12, Y10, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+
+	// Store 8 outputs
+	MOVQ    (R14), BP
+	VMOVDQU Y0, (BP)(R15*1)
+	MOVQ    24(R14), BP
+	VMOVDQU Y1, (BP)(R15*1)
+	MOVQ    48(R14), BP
+	VMOVDQU Y2, (BP)(R15*1)
+	MOVQ    72(R14), BP
+	VMOVDQU Y3, (BP)(R15*1)
+	MOVQ    96(R14), BP
+	VMOVDQU Y4, (BP)(R15*1)
+	MOVQ    120(R14), BP
+	VMOVDQU Y5, (BP)(R15*1)
+	MOVQ    144(R14), BP
+	VMOVDQU Y6, (BP)(R15*1)
+	MOVQ    168(R14), BP
+	VMOVDQU Y7, (BP)(R15*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R15
+	DECQ AX
+	JNZ  mulAvxTwo_10x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_10x8Xor_end:
+	RET
+
+// func mulAvxTwo_10x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x9(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 194 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_10x9_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), R10
+	MOVQ  144(DX), R11
+	MOVQ  168(DX), R12
+	MOVQ  192(DX), R13
+	MOVQ  216(DX), DX
+	MOVQ  out_base+48(FP), R14
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, R9
+	ADDQ         R15, R10
+	ADDQ         R15, R11
+	ADDQ         R15, R12
+	ADDQ         R15, R13
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_10x9_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y2
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y3
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y4
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y5
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y6
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y7
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y8
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
+	ADDQ    $0x20, R9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (R10), Y12
+	ADDQ    $0x20, R10
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2880(CX), Y10
+	VMOVDQU 2912(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 6 to 9 outputs
+	VMOVDQU (R11), Y12
+	ADDQ    $0x20, R11
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 3456(CX), Y10
+	VMOVDQU 3488(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 3520(CX), Y10
+	VMOVDQU 3552(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3584(CX), Y10
+	VMOVDQU 3616(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3648(CX), Y10
+	VMOVDQU 3680(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3712(CX), Y10
+	VMOVDQU 3744(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3776(CX), Y10
+	VMOVDQU 3808(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3840(CX), Y10
+	VMOVDQU 3872(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3904(CX), Y10
+	VMOVDQU 3936(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3968(CX), Y10
+	VMOVDQU 4000(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 7 to 9 outputs
+	VMOVDQU (R12), Y12
+	ADDQ    $0x20, R12
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4032(CX), Y10
+	VMOVDQU 4064(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4096(CX), Y10
+	VMOVDQU 4128(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4160(CX), Y10
+	VMOVDQU 4192(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4224(CX), Y10
+	VMOVDQU 4256(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4288(CX), Y10
+	VMOVDQU 4320(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4352(CX), Y10
+	VMOVDQU 4384(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4416(CX), Y10
+	VMOVDQU 4448(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 4480(CX), Y10
+	VMOVDQU 4512(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 4544(CX), Y10
+	VMOVDQU 4576(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
+
+	// Load and process 32 bytes from input 8 to 9 outputs
+	VMOVDQU (R13), Y12
+	ADDQ    $0x20, R13
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4608(CX), Y10
+	VMOVDQU 4640(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4672(CX), Y10
+	VMOVDQU 4704(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4736(CX), Y10
+	VMOVDQU 4768(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4800(CX), Y10
+	VMOVDQU 4832(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4864(CX), Y10
+	VMOVDQU 4896(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4928(CX), Y10
+	VMOVDQU 4960(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4992(CX), Y10
+	VMOVDQU 5024(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 5056(CX), Y10
+	VMOVDQU 5088(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 5120(CX), Y10
+	VMOVDQU 5152(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 9 to 7 outputs
-	VMOVDQU (DX), Y10
+	// Load and process 32 bytes from input 9 to 9 outputs
+	VMOVDQU (DX), Y12
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU 4032(CX), Y8
-	VMOVDQU 4064(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 4096(CX), Y8
-	VMOVDQU 4128(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 4160(CX), Y8
-	VMOVDQU 4192(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 4224(CX), Y8
-	VMOVDQU 4256(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 4288(CX), Y8
-	VMOVDQU 4320(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 4352(CX), Y8
-	VMOVDQU 4384(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 4416(CX), Y8
-	VMOVDQU 4448(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 5184(CX), Y10
+	VMOVDQU 5216(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 5248(CX), Y10
+	VMOVDQU 5280(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 5312(CX), Y10
+	VMOVDQU 5344(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 5376(CX), Y10
+	VMOVDQU 5408(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 5440(CX), Y10
+	VMOVDQU 5472(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 5504(CX), Y10
+	VMOVDQU 5536(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 5568(CX), Y10
+	VMOVDQU 5600(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 5632(CX), Y10
+	VMOVDQU 5664(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 5696(CX), Y10
+	VMOVDQU 5728(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Store 7 outputs
+	// Store 9 outputs
 	MOVQ    (R14), BP
 	VMOVDQU Y0, (BP)(R15*1)
 	MOVQ    24(R14), BP
@@ -33669,27 +92443,660 @@ mulAvxTwo_10x7_loop:
 	VMOVDQU Y5, (BP)(R15*1)
 	MOVQ    144(R14), BP
 	VMOVDQU Y6, (BP)(R15*1)
+	MOVQ    168(R14), BP
+	VMOVDQU Y7, (BP)(R15*1)
+	MOVQ    192(R14), BP
+	VMOVDQU Y8, (BP)(R15*1)
 
 	// Prepare for next loop
 	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_10x7_loop
+	JNZ  mulAvxTwo_10x9_loop
 	VZEROUPPER
 
-mulAvxTwo_10x7_end:
+mulAvxTwo_10x9_end:
 	RET
 
-// func mulAvxTwo_10x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x8(SB), NOSPLIT, $8-88
+// func mulGFNI_10x9_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x9_64(SB), $8-88
+	// Loading 21 of 90 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 101 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x9_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x9_64_loop:
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 9 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 9 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 9 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 640(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 648(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 656(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 664(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 672(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 680(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 688(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 696(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 704(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 712(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z21, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z22, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z23, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      144(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      168(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      192(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x9_64_loop
+	VZEROUPPER
+
+mulGFNI_10x9_64_end:
+	RET
+
+// func mulGFNI_10x9_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x9_64Xor(SB), $8-88
+	// Loading 21 of 90 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 101 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x9_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	VBROADCASTF32X2 160(CX), Z20
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x9_64Xor_loop:
+	// Load 9 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 (BP)(R15*1), Z21
+	MOVQ      24(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z22
+	MOVQ      48(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z23
+	MOVQ      72(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z24
+	MOVQ      96(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z25
+	MOVQ      120(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z26
+	MOVQ      144(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z27
+	MOVQ      168(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z28
+	MOVQ      192(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z29
+
+	// Load and process 64 bytes from input 0 to 9 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 9 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 9 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB      $0x00, Z18, Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB      $0x00, Z19, Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB      $0x00, Z20, Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 9 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 9 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 9 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 9 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 9 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 9 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 640(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 9 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 648(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 656(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 664(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 672(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 680(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 688(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 696(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 704(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 712(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 9 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z21, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z22, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z23, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      144(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      168(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      192(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x9_64Xor_loop
+	VZEROUPPER
+
+mulGFNI_10x9_64Xor_end:
+	RET
+
+// func mulAvxTwo_10x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x9Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 173 YMM used
+	// Full registers estimated 194 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_10x8_end
+	JZ    mulAvxTwo_10x9Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -33716,571 +93123,549 @@ TEXT Β·mulAvxTwo_10x8(SB), NOSPLIT, $8-88
 	ADDQ         R15, R13
 	ADDQ         R15, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X8
-	VPBROADCASTB X8, Y8
-
-mulAvxTwo_10x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	MOVQ         BP, X9
+	VPBROADCASTB X9, Y9
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (BX), Y11
+mulAvxTwo_10x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	MOVQ    (R14), BP
+	VMOVDQU (BP)(R15*1), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	MOVQ    24(R14), BP
+	VMOVDQU (BP)(R15*1), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	MOVQ    48(R14), BP
+	VMOVDQU (BP)(R15*1), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	MOVQ    72(R14), BP
+	VMOVDQU (BP)(R15*1), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	MOVQ    96(R14), BP
+	VMOVDQU (BP)(R15*1), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	MOVQ    120(R14), BP
+	VMOVDQU (BP)(R15*1), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	MOVQ    144(R14), BP
+	VMOVDQU (BP)(R15*1), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	MOVQ    168(R14), BP
+	VMOVDQU (BP)(R15*1), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	MOVQ    192(R14), BP
+	VMOVDQU (BP)(R15*1), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 1 to 8 outputs
-	VMOVDQU (SI), Y11
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 512(CX), Y9
-	VMOVDQU 544(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 576(CX), Y9
-	VMOVDQU 608(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y9
-	VMOVDQU 672(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 704(CX), Y9
-	VMOVDQU 736(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 768(CX), Y9
-	VMOVDQU 800(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 832(CX), Y9
-	VMOVDQU 864(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 896(CX), Y9
-	VMOVDQU 928(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 960(CX), Y9
-	VMOVDQU 992(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 2 to 8 outputs
-	VMOVDQU (DI), Y11
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1024(CX), Y9
-	VMOVDQU 1056(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1088(CX), Y9
-	VMOVDQU 1120(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1152(CX), Y9
-	VMOVDQU 1184(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1216(CX), Y9
-	VMOVDQU 1248(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1280(CX), Y9
-	VMOVDQU 1312(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1344(CX), Y9
-	VMOVDQU 1376(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1408(CX), Y9
-	VMOVDQU 1440(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1472(CX), Y9
-	VMOVDQU 1504(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 3 to 8 outputs
-	VMOVDQU (R8), Y11
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 1536(CX), Y9
-	VMOVDQU 1568(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 1600(CX), Y9
-	VMOVDQU 1632(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 1664(CX), Y9
-	VMOVDQU 1696(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 1728(CX), Y9
-	VMOVDQU 1760(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 1792(CX), Y9
-	VMOVDQU 1824(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 1856(CX), Y9
-	VMOVDQU 1888(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 1920(CX), Y9
-	VMOVDQU 1952(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 1984(CX), Y9
-	VMOVDQU 2016(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 4 to 8 outputs
-	VMOVDQU (R9), Y11
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (R9), Y12
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 2048(CX), Y9
-	VMOVDQU 2080(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2112(CX), Y9
-	VMOVDQU 2144(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2176(CX), Y9
-	VMOVDQU 2208(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2240(CX), Y9
-	VMOVDQU 2272(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2304(CX), Y9
-	VMOVDQU 2336(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2368(CX), Y9
-	VMOVDQU 2400(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2432(CX), Y9
-	VMOVDQU 2464(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 2496(CX), Y9
-	VMOVDQU 2528(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 5 to 8 outputs
-	VMOVDQU (R10), Y11
+	// Load and process 32 bytes from input 5 to 9 outputs
+	VMOVDQU (R10), Y12
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 2560(CX), Y9
-	VMOVDQU 2592(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2880(CX), Y10
+	VMOVDQU 2912(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 2624(CX), Y9
-	VMOVDQU 2656(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 2944(CX), Y10
+	VMOVDQU 2976(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 2688(CX), Y9
-	VMOVDQU 2720(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3008(CX), Y10
+	VMOVDQU 3040(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 2752(CX), Y9
-	VMOVDQU 2784(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3072(CX), Y10
+	VMOVDQU 3104(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 2816(CX), Y9
-	VMOVDQU 2848(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3136(CX), Y10
+	VMOVDQU 3168(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 2880(CX), Y9
-	VMOVDQU 2912(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3200(CX), Y10
+	VMOVDQU 3232(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 2944(CX), Y9
-	VMOVDQU 2976(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3264(CX), Y10
+	VMOVDQU 3296(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 3008(CX), Y9
-	VMOVDQU 3040(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3328(CX), Y10
+	VMOVDQU 3360(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3392(CX), Y10
+	VMOVDQU 3424(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 6 to 8 outputs
-	VMOVDQU (R11), Y11
+	// Load and process 32 bytes from input 6 to 9 outputs
+	VMOVDQU (R11), Y12
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 3072(CX), Y9
-	VMOVDQU 3104(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 3456(CX), Y10
+	VMOVDQU 3488(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 3136(CX), Y9
-	VMOVDQU 3168(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 3520(CX), Y10
+	VMOVDQU 3552(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 3200(CX), Y9
-	VMOVDQU 3232(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 3584(CX), Y10
+	VMOVDQU 3616(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 3264(CX), Y9
-	VMOVDQU 3296(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 3648(CX), Y10
+	VMOVDQU 3680(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 3328(CX), Y9
-	VMOVDQU 3360(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 3712(CX), Y10
+	VMOVDQU 3744(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 3392(CX), Y9
-	VMOVDQU 3424(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 3776(CX), Y10
+	VMOVDQU 3808(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 3456(CX), Y9
-	VMOVDQU 3488(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 3840(CX), Y10
+	VMOVDQU 3872(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 3520(CX), Y9
-	VMOVDQU 3552(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 3904(CX), Y10
+	VMOVDQU 3936(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 3968(CX), Y10
+	VMOVDQU 4000(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 7 to 8 outputs
-	VMOVDQU (R12), Y11
+	// Load and process 32 bytes from input 7 to 9 outputs
+	VMOVDQU (R12), Y12
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 3584(CX), Y9
-	VMOVDQU 3616(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4032(CX), Y10
+	VMOVDQU 4064(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 3648(CX), Y9
-	VMOVDQU 3680(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4096(CX), Y10
+	VMOVDQU 4128(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 3712(CX), Y9
-	VMOVDQU 3744(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4160(CX), Y10
+	VMOVDQU 4192(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 3776(CX), Y9
-	VMOVDQU 3808(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4224(CX), Y10
+	VMOVDQU 4256(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 3840(CX), Y9
-	VMOVDQU 3872(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4288(CX), Y10
+	VMOVDQU 4320(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 3904(CX), Y9
-	VMOVDQU 3936(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4352(CX), Y10
+	VMOVDQU 4384(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 3968(CX), Y9
-	VMOVDQU 4000(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4416(CX), Y10
+	VMOVDQU 4448(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 4032(CX), Y9
-	VMOVDQU 4064(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 4480(CX), Y10
+	VMOVDQU 4512(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 4544(CX), Y10
+	VMOVDQU 4576(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 8 to 8 outputs
-	VMOVDQU (R13), Y11
+	// Load and process 32 bytes from input 8 to 9 outputs
+	VMOVDQU (R13), Y12
 	ADDQ    $0x20, R13
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 4096(CX), Y9
-	VMOVDQU 4128(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 4608(CX), Y10
+	VMOVDQU 4640(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 4160(CX), Y9
-	VMOVDQU 4192(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 4672(CX), Y10
+	VMOVDQU 4704(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 4224(CX), Y9
-	VMOVDQU 4256(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 4736(CX), Y10
+	VMOVDQU 4768(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 4288(CX), Y9
-	VMOVDQU 4320(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 4800(CX), Y10
+	VMOVDQU 4832(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 4352(CX), Y9
-	VMOVDQU 4384(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 4864(CX), Y10
+	VMOVDQU 4896(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 4416(CX), Y9
-	VMOVDQU 4448(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 4928(CX), Y10
+	VMOVDQU 4960(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 4480(CX), Y9
-	VMOVDQU 4512(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 4992(CX), Y10
+	VMOVDQU 5024(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 4544(CX), Y9
-	VMOVDQU 4576(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 5056(CX), Y10
+	VMOVDQU 5088(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 5120(CX), Y10
+	VMOVDQU 5152(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Load and process 32 bytes from input 9 to 8 outputs
-	VMOVDQU (DX), Y11
+	// Load and process 32 bytes from input 9 to 9 outputs
+	VMOVDQU (DX), Y12
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 4608(CX), Y9
-	VMOVDQU 4640(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 5184(CX), Y10
+	VMOVDQU 5216(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 4672(CX), Y9
-	VMOVDQU 4704(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y0)
+	VMOVDQU 5248(CX), Y10
+	VMOVDQU 5280(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 4736(CX), Y9
-	VMOVDQU 4768(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y1)
+	VMOVDQU 5312(CX), Y10
+	VMOVDQU 5344(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 4800(CX), Y9
-	VMOVDQU 4832(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y2)
+	VMOVDQU 5376(CX), Y10
+	VMOVDQU 5408(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 4864(CX), Y9
-	VMOVDQU 4896(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y3)
+	VMOVDQU 5440(CX), Y10
+	VMOVDQU 5472(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 4928(CX), Y9
-	VMOVDQU 4960(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y4)
+	VMOVDQU 5504(CX), Y10
+	VMOVDQU 5536(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 4992(CX), Y9
-	VMOVDQU 5024(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y5)
+	VMOVDQU 5568(CX), Y10
+	VMOVDQU 5600(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 5056(CX), Y9
-	VMOVDQU 5088(CX), Y10
-	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y6)
+	VMOVDQU 5632(CX), Y10
+	VMOVDQU 5664(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y7)
+	VMOVDQU 5696(CX), Y10
+	VMOVDQU 5728(CX), Y11
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPSHUFB Y13, Y11, Y11
+	XOR3WAY( $0x00, Y10, Y11, Y8)
 
-	// Store 8 outputs
+	// Store 9 outputs
 	MOVQ    (R14), BP
 	VMOVDQU Y0, (BP)(R15*1)
 	MOVQ    24(R14), BP
@@ -34297,27 +93682,29 @@ mulAvxTwo_10x8_loop:
 	VMOVDQU Y6, (BP)(R15*1)
 	MOVQ    168(R14), BP
 	VMOVDQU Y7, (BP)(R15*1)
+	MOVQ    192(R14), BP
+	VMOVDQU Y8, (BP)(R15*1)
 
 	// Prepare for next loop
 	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_10x8_loop
+	JNZ  mulAvxTwo_10x9Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_10x8_end:
+mulAvxTwo_10x9Xor_end:
 	RET
 
-// func mulAvxTwo_10x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x9(SB), NOSPLIT, $8-88
+// func mulAvxTwo_10x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x10(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 194 YMM used
+	// Full registers estimated 215 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_10x9_end
+	JZ    mulAvxTwo_10x10_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -34344,632 +93731,581 @@ TEXT Β·mulAvxTwo_10x9(SB), NOSPLIT, $8-88
 	ADDQ         R15, R13
 	ADDQ         R15, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X9
-	VPBROADCASTB X9, Y9
-
-mulAvxTwo_10x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	MOVQ         BP, X10
+	VPBROADCASTB X10, Y10
 
-	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (BX), Y12
+mulAvxTwo_10x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 512(CX), Y10
-	VMOVDQU 544(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
 
-	// Load and process 32 bytes from input 1 to 9 outputs
-	VMOVDQU (SI), Y12
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 576(CX), Y10
-	VMOVDQU 608(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 640(CX), Y10
-	VMOVDQU 672(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 704(CX), Y10
-	VMOVDQU 736(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 768(CX), Y10
-	VMOVDQU 800(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 832(CX), Y10
-	VMOVDQU 864(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 896(CX), Y10
-	VMOVDQU 928(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 960(CX), Y10
-	VMOVDQU 992(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1024(CX), Y10
-	VMOVDQU 1056(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1088(CX), Y10
-	VMOVDQU 1120(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 2 to 9 outputs
-	VMOVDQU (DI), Y12
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1152(CX), Y10
-	VMOVDQU 1184(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1216(CX), Y10
-	VMOVDQU 1248(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1280(CX), Y10
-	VMOVDQU 1312(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1344(CX), Y10
-	VMOVDQU 1376(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1408(CX), Y10
-	VMOVDQU 1440(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 1472(CX), Y10
-	VMOVDQU 1504(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 1536(CX), Y10
-	VMOVDQU 1568(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 1600(CX), Y10
-	VMOVDQU 1632(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 1664(CX), Y10
-	VMOVDQU 1696(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 3 to 9 outputs
-	VMOVDQU (R8), Y12
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 1728(CX), Y10
-	VMOVDQU 1760(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 1792(CX), Y10
-	VMOVDQU 1824(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 1856(CX), Y10
-	VMOVDQU 1888(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 1920(CX), Y10
-	VMOVDQU 1952(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 1984(CX), Y10
-	VMOVDQU 2016(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2048(CX), Y10
-	VMOVDQU 2080(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2112(CX), Y10
-	VMOVDQU 2144(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2176(CX), Y10
-	VMOVDQU 2208(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2240(CX), Y10
-	VMOVDQU 2272(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 4 to 9 outputs
-	VMOVDQU (R9), Y12
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (R9), Y13
 	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 2304(CX), Y10
-	VMOVDQU 2336(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2368(CX), Y10
-	VMOVDQU 2400(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 2432(CX), Y10
-	VMOVDQU 2464(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 2496(CX), Y10
-	VMOVDQU 2528(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 2560(CX), Y10
-	VMOVDQU 2592(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 2624(CX), Y10
-	VMOVDQU 2656(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 2688(CX), Y10
-	VMOVDQU 2720(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 2752(CX), Y10
-	VMOVDQU 2784(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 2816(CX), Y10
-	VMOVDQU 2848(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 5 to 9 outputs
-	VMOVDQU (R10), Y12
+	// Load and process 32 bytes from input 5 to 10 outputs
+	VMOVDQU (R10), Y13
 	ADDQ    $0x20, R10
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 2880(CX), Y10
-	VMOVDQU 2912(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3200(CX), Y11
+	VMOVDQU 3232(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 2944(CX), Y10
-	VMOVDQU 2976(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3264(CX), Y11
+	VMOVDQU 3296(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 3008(CX), Y10
-	VMOVDQU 3040(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3328(CX), Y11
+	VMOVDQU 3360(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 3072(CX), Y10
-	VMOVDQU 3104(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 3392(CX), Y11
+	VMOVDQU 3424(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 3136(CX), Y10
-	VMOVDQU 3168(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 3456(CX), Y11
+	VMOVDQU 3488(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 3200(CX), Y10
-	VMOVDQU 3232(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 3520(CX), Y11
+	VMOVDQU 3552(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 3264(CX), Y10
-	VMOVDQU 3296(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 3584(CX), Y11
+	VMOVDQU 3616(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 3328(CX), Y10
-	VMOVDQU 3360(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 3648(CX), Y11
+	VMOVDQU 3680(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 3392(CX), Y10
-	VMOVDQU 3424(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 3712(CX), Y11
+	VMOVDQU 3744(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 3776(CX), Y11
+	VMOVDQU 3808(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 6 to 9 outputs
-	VMOVDQU (R11), Y12
+	// Load and process 32 bytes from input 6 to 10 outputs
+	VMOVDQU (R11), Y13
 	ADDQ    $0x20, R11
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 3456(CX), Y10
-	VMOVDQU 3488(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 3840(CX), Y11
+	VMOVDQU 3872(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 3520(CX), Y10
-	VMOVDQU 3552(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 3904(CX), Y11
+	VMOVDQU 3936(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 3584(CX), Y10
-	VMOVDQU 3616(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 3968(CX), Y11
+	VMOVDQU 4000(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 3648(CX), Y10
-	VMOVDQU 3680(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4032(CX), Y11
+	VMOVDQU 4064(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 3712(CX), Y10
-	VMOVDQU 3744(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4096(CX), Y11
+	VMOVDQU 4128(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 3776(CX), Y10
-	VMOVDQU 3808(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4160(CX), Y11
+	VMOVDQU 4192(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 3840(CX), Y10
-	VMOVDQU 3872(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4224(CX), Y11
+	VMOVDQU 4256(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 3904(CX), Y10
-	VMOVDQU 3936(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4288(CX), Y11
+	VMOVDQU 4320(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 3968(CX), Y10
-	VMOVDQU 4000(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4352(CX), Y11
+	VMOVDQU 4384(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 4416(CX), Y11
+	VMOVDQU 4448(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 7 to 9 outputs
-	VMOVDQU (R12), Y12
+	// Load and process 32 bytes from input 7 to 10 outputs
+	VMOVDQU (R12), Y13
 	ADDQ    $0x20, R12
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 4032(CX), Y10
-	VMOVDQU 4064(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 4480(CX), Y11
+	VMOVDQU 4512(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 4096(CX), Y10
-	VMOVDQU 4128(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 4544(CX), Y11
+	VMOVDQU 4576(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 4160(CX), Y10
-	VMOVDQU 4192(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 4608(CX), Y11
+	VMOVDQU 4640(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 4224(CX), Y10
-	VMOVDQU 4256(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 4672(CX), Y11
+	VMOVDQU 4704(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 4288(CX), Y10
-	VMOVDQU 4320(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 4736(CX), Y11
+	VMOVDQU 4768(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 4352(CX), Y10
-	VMOVDQU 4384(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 4800(CX), Y11
+	VMOVDQU 4832(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 4416(CX), Y10
-	VMOVDQU 4448(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 4864(CX), Y11
+	VMOVDQU 4896(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 4480(CX), Y10
-	VMOVDQU 4512(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 4928(CX), Y11
+	VMOVDQU 4960(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 4544(CX), Y10
-	VMOVDQU 4576(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 4992(CX), Y11
+	VMOVDQU 5024(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 5056(CX), Y11
+	VMOVDQU 5088(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 8 to 9 outputs
-	VMOVDQU (R13), Y12
+	// Load and process 32 bytes from input 8 to 10 outputs
+	VMOVDQU (R13), Y13
 	ADDQ    $0x20, R13
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 4608(CX), Y10
-	VMOVDQU 4640(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 5120(CX), Y11
+	VMOVDQU 5152(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 4672(CX), Y10
-	VMOVDQU 4704(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 5184(CX), Y11
+	VMOVDQU 5216(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 4736(CX), Y10
-	VMOVDQU 4768(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 5248(CX), Y11
+	VMOVDQU 5280(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 4800(CX), Y10
-	VMOVDQU 4832(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 5312(CX), Y11
+	VMOVDQU 5344(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 4864(CX), Y10
-	VMOVDQU 4896(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 5376(CX), Y11
+	VMOVDQU 5408(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 4928(CX), Y10
-	VMOVDQU 4960(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 5440(CX), Y11
+	VMOVDQU 5472(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 4992(CX), Y10
-	VMOVDQU 5024(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 5504(CX), Y11
+	VMOVDQU 5536(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 5056(CX), Y10
-	VMOVDQU 5088(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 5568(CX), Y11
+	VMOVDQU 5600(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 5120(CX), Y10
-	VMOVDQU 5152(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 5632(CX), Y11
+	VMOVDQU 5664(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 5696(CX), Y11
+	VMOVDQU 5728(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Load and process 32 bytes from input 9 to 9 outputs
-	VMOVDQU (DX), Y12
+	// Load and process 32 bytes from input 9 to 10 outputs
+	VMOVDQU (DX), Y13
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 5184(CX), Y10
-	VMOVDQU 5216(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 5760(CX), Y11
+	VMOVDQU 5792(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 5248(CX), Y10
-	VMOVDQU 5280(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	VMOVDQU 5824(CX), Y11
+	VMOVDQU 5856(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 5312(CX), Y10
-	VMOVDQU 5344(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	VMOVDQU 5888(CX), Y11
+	VMOVDQU 5920(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 5376(CX), Y10
-	VMOVDQU 5408(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	VMOVDQU 5952(CX), Y11
+	VMOVDQU 5984(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 5440(CX), Y10
-	VMOVDQU 5472(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	VMOVDQU 6016(CX), Y11
+	VMOVDQU 6048(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 5504(CX), Y10
-	VMOVDQU 5536(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VMOVDQU 6080(CX), Y11
+	VMOVDQU 6112(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 5568(CX), Y10
-	VMOVDQU 5600(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	VMOVDQU 6144(CX), Y11
+	VMOVDQU 6176(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 5632(CX), Y10
-	VMOVDQU 5664(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU 6208(CX), Y11
+	VMOVDQU 6240(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 5696(CX), Y10
-	VMOVDQU 5728(CX), Y11
-	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	VMOVDQU 6272(CX), Y11
+	VMOVDQU 6304(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU 6336(CX), Y11
+	VMOVDQU 6368(CX), Y12
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPSHUFB Y14, Y12, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
-	// Store 9 outputs
+	// Store 10 outputs
 	MOVQ    (R14), BP
 	VMOVDQU Y0, (BP)(R15*1)
 	MOVQ    24(R14), BP
@@ -34988,19 +94324,693 @@ mulAvxTwo_10x9_loop:
 	VMOVDQU Y7, (BP)(R15*1)
 	MOVQ    192(R14), BP
 	VMOVDQU Y8, (BP)(R15*1)
+	MOVQ    216(R14), BP
+	VMOVDQU Y9, (BP)(R15*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R15
+	DECQ AX
+	JNZ  mulAvxTwo_10x10_loop
+	VZEROUPPER
+
+mulAvxTwo_10x10_end:
+	RET
+
+// func mulGFNI_10x10_64(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x10_64(SB), $8-88
+	// Loading 20 of 100 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 112 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x10_64_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x10_64_loop:
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 10 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 10 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 10 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 640(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 648(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 656(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 664(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 672(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 680(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 688(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 696(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 704(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 712(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 720(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 728(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 736(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 744(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 752(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 760(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 768(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 776(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 784(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 792(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z20, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z21, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z22, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z23, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      144(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      168(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      192(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      216(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
+
+	// Prepare for next loop
+	ADDQ $0x40, R15
+	DECQ AX
+	JNZ  mulGFNI_10x10_64_loop
+	VZEROUPPER
+
+mulGFNI_10x10_64_end:
+	RET
+
+// func mulGFNI_10x10_64Xor(matrix []uint64, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·mulGFNI_10x10_64Xor(SB), $8-88
+	// Loading 20 of 100 tables to registers
+	// Destination kept on stack
+	// Full registers estimated 112 YMM used
+	MOVQ            n+80(FP), AX
+	MOVQ            matrix_base+0(FP), CX
+	SHRQ            $0x06, AX
+	TESTQ           AX, AX
+	JZ              mulGFNI_10x10_64Xor_end
+	VBROADCASTF32X2 (CX), Z0
+	VBROADCASTF32X2 8(CX), Z1
+	VBROADCASTF32X2 16(CX), Z2
+	VBROADCASTF32X2 24(CX), Z3
+	VBROADCASTF32X2 32(CX), Z4
+	VBROADCASTF32X2 40(CX), Z5
+	VBROADCASTF32X2 48(CX), Z6
+	VBROADCASTF32X2 56(CX), Z7
+	VBROADCASTF32X2 64(CX), Z8
+	VBROADCASTF32X2 72(CX), Z9
+	VBROADCASTF32X2 80(CX), Z10
+	VBROADCASTF32X2 88(CX), Z11
+	VBROADCASTF32X2 96(CX), Z12
+	VBROADCASTF32X2 104(CX), Z13
+	VBROADCASTF32X2 112(CX), Z14
+	VBROADCASTF32X2 120(CX), Z15
+	VBROADCASTF32X2 128(CX), Z16
+	VBROADCASTF32X2 136(CX), Z17
+	VBROADCASTF32X2 144(CX), Z18
+	VBROADCASTF32X2 152(CX), Z19
+	MOVQ            in_base+24(FP), DX
+	MOVQ            (DX), BX
+	MOVQ            24(DX), SI
+	MOVQ            48(DX), DI
+	MOVQ            72(DX), R8
+	MOVQ            96(DX), R9
+	MOVQ            120(DX), R10
+	MOVQ            144(DX), R11
+	MOVQ            168(DX), R12
+	MOVQ            192(DX), R13
+	MOVQ            216(DX), DX
+	MOVQ            out_base+48(FP), R14
+	MOVQ            out_base+48(FP), R14
+	MOVQ            start+72(FP), R15
+
+	// Add start offset to input
+	ADDQ R15, BX
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, DX
+
+mulGFNI_10x10_64Xor_loop:
+	// Load 10 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 (BP)(R15*1), Z20
+	MOVQ      24(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z21
+	MOVQ      48(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z22
+	MOVQ      72(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z23
+	MOVQ      96(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z24
+	MOVQ      120(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z25
+	MOVQ      144(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z26
+	MOVQ      168(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z27
+	MOVQ      192(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z28
+	MOVQ      216(R14), BP
+	VMOVDQU64 (BP)(R15*1), Z29
+
+	// Load and process 64 bytes from input 0 to 10 outputs
+	VMOVDQU64      (BX), Z30
+	ADDQ           $0x40, BX
+	VGF2P8AFFINEQB $0x00, Z0, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z1, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z2, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z3, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z4, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z5, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z6, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z7, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z8, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z9, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 1 to 10 outputs
+	VMOVDQU64      (SI), Z30
+	ADDQ           $0x40, SI
+	VGF2P8AFFINEQB $0x00, Z10, Z30, Z31
+	VXORPD         Z20, Z31, Z20
+	VGF2P8AFFINEQB $0x00, Z11, Z30, Z31
+	VXORPD         Z21, Z31, Z21
+	VGF2P8AFFINEQB $0x00, Z12, Z30, Z31
+	VXORPD         Z22, Z31, Z22
+	VGF2P8AFFINEQB $0x00, Z13, Z30, Z31
+	VXORPD         Z23, Z31, Z23
+	VGF2P8AFFINEQB $0x00, Z14, Z30, Z31
+	VXORPD         Z24, Z31, Z24
+	VGF2P8AFFINEQB $0x00, Z15, Z30, Z31
+	VXORPD         Z25, Z31, Z25
+	VGF2P8AFFINEQB $0x00, Z16, Z30, Z31
+	VXORPD         Z26, Z31, Z26
+	VGF2P8AFFINEQB $0x00, Z17, Z30, Z31
+	VXORPD         Z27, Z31, Z27
+	VGF2P8AFFINEQB $0x00, Z18, Z30, Z31
+	VXORPD         Z28, Z31, Z28
+	VGF2P8AFFINEQB $0x00, Z19, Z30, Z31
+	VXORPD         Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 2 to 10 outputs
+	VMOVDQU64           (DI), Z30
+	ADDQ                $0x40, DI
+	VGF2P8AFFINEQB.BCST $0x00, 160(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 168(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 176(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 184(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 192(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 200(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 208(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 216(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 224(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 232(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 3 to 10 outputs
+	VMOVDQU64           (R8), Z30
+	ADDQ                $0x40, R8
+	VGF2P8AFFINEQB.BCST $0x00, 240(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 248(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 256(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 264(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 272(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 280(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 288(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 296(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 304(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 312(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 4 to 10 outputs
+	VMOVDQU64           (R9), Z30
+	ADDQ                $0x40, R9
+	VGF2P8AFFINEQB.BCST $0x00, 320(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 328(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 336(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 344(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 352(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 360(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 368(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 376(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 384(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 392(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 5 to 10 outputs
+	VMOVDQU64           (R10), Z30
+	ADDQ                $0x40, R10
+	VGF2P8AFFINEQB.BCST $0x00, 400(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 408(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 416(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 424(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 432(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 440(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 448(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 456(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 464(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 472(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 6 to 10 outputs
+	VMOVDQU64           (R11), Z30
+	ADDQ                $0x40, R11
+	VGF2P8AFFINEQB.BCST $0x00, 480(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 488(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 496(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 504(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 512(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 520(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 528(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 536(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 544(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 552(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 7 to 10 outputs
+	VMOVDQU64           (R12), Z30
+	ADDQ                $0x40, R12
+	VGF2P8AFFINEQB.BCST $0x00, 560(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 568(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 576(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 584(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 592(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 600(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 608(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 616(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 624(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 632(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 8 to 10 outputs
+	VMOVDQU64           (R13), Z30
+	ADDQ                $0x40, R13
+	VGF2P8AFFINEQB.BCST $0x00, 640(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 648(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 656(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 664(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 672(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 680(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 688(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 696(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 704(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 712(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Load and process 64 bytes from input 9 to 10 outputs
+	VMOVDQU64           (DX), Z30
+	ADDQ                $0x40, DX
+	VGF2P8AFFINEQB.BCST $0x00, 720(CX), Z30, Z31
+	VXORPD              Z20, Z31, Z20
+	VGF2P8AFFINEQB.BCST $0x00, 728(CX), Z30, Z31
+	VXORPD              Z21, Z31, Z21
+	VGF2P8AFFINEQB.BCST $0x00, 736(CX), Z30, Z31
+	VXORPD              Z22, Z31, Z22
+	VGF2P8AFFINEQB.BCST $0x00, 744(CX), Z30, Z31
+	VXORPD              Z23, Z31, Z23
+	VGF2P8AFFINEQB.BCST $0x00, 752(CX), Z30, Z31
+	VXORPD              Z24, Z31, Z24
+	VGF2P8AFFINEQB.BCST $0x00, 760(CX), Z30, Z31
+	VXORPD              Z25, Z31, Z25
+	VGF2P8AFFINEQB.BCST $0x00, 768(CX), Z30, Z31
+	VXORPD              Z26, Z31, Z26
+	VGF2P8AFFINEQB.BCST $0x00, 776(CX), Z30, Z31
+	VXORPD              Z27, Z31, Z27
+	VGF2P8AFFINEQB.BCST $0x00, 784(CX), Z30, Z31
+	VXORPD              Z28, Z31, Z28
+	VGF2P8AFFINEQB.BCST $0x00, 792(CX), Z30, Z31
+	VXORPD              Z29, Z31, Z29
+
+	// Store 10 outputs
+	MOVQ      (R14), BP
+	VMOVDQU64 Z20, (BP)(R15*1)
+	MOVQ      24(R14), BP
+	VMOVDQU64 Z21, (BP)(R15*1)
+	MOVQ      48(R14), BP
+	VMOVDQU64 Z22, (BP)(R15*1)
+	MOVQ      72(R14), BP
+	VMOVDQU64 Z23, (BP)(R15*1)
+	MOVQ      96(R14), BP
+	VMOVDQU64 Z24, (BP)(R15*1)
+	MOVQ      120(R14), BP
+	VMOVDQU64 Z25, (BP)(R15*1)
+	MOVQ      144(R14), BP
+	VMOVDQU64 Z26, (BP)(R15*1)
+	MOVQ      168(R14), BP
+	VMOVDQU64 Z27, (BP)(R15*1)
+	MOVQ      192(R14), BP
+	VMOVDQU64 Z28, (BP)(R15*1)
+	MOVQ      216(R14), BP
+	VMOVDQU64 Z29, (BP)(R15*1)
 
 	// Prepare for next loop
-	ADDQ $0x20, R15
+	ADDQ $0x40, R15
 	DECQ AX
-	JNZ  mulAvxTwo_10x9_loop
+	JNZ  mulGFNI_10x10_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_10x9_end:
+mulGFNI_10x10_64Xor_end:
 	RET
 
-// func mulAvxTwo_10x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT Β·mulAvxTwo_10x10(SB), NOSPLIT, $8-88
+// func mulAvxTwo_10x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·mulAvxTwo_10x10Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept on stack
 	// Full registers estimated 215 YMM used
@@ -35008,7 +95018,7 @@ TEXT Β·mulAvxTwo_10x10(SB), NOSPLIT, $8-88
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_10x10_end
+	JZ    mulAvxTwo_10x10Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -35038,85 +95048,83 @@ TEXT Β·mulAvxTwo_10x10(SB), NOSPLIT, $8-88
 	MOVQ         BP, X10
 	VPBROADCASTB X10, Y10
 
-mulAvxTwo_10x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
-
+mulAvxTwo_10x10Xor_loop:
 	// Load and process 32 bytes from input 0 to 10 outputs
 	VMOVDQU (BX), Y13
 	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y10, Y13, Y13
 	VPAND   Y10, Y14, Y14
+	MOVQ    (R14), BP
+	VMOVDQU (BP)(R15*1), Y0
 	VMOVDQU (CX), Y11
 	VMOVDQU 32(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
+	MOVQ    24(R14), BP
+	VMOVDQU (BP)(R15*1), Y1
 	VMOVDQU 64(CX), Y11
 	VMOVDQU 96(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
+	MOVQ    48(R14), BP
+	VMOVDQU (BP)(R15*1), Y2
 	VMOVDQU 128(CX), Y11
 	VMOVDQU 160(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
+	MOVQ    72(R14), BP
+	VMOVDQU (BP)(R15*1), Y3
 	VMOVDQU 192(CX), Y11
 	VMOVDQU 224(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+	MOVQ    96(R14), BP
+	VMOVDQU (BP)(R15*1), Y4
 	VMOVDQU 256(CX), Y11
 	VMOVDQU 288(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	MOVQ    120(R14), BP
+	VMOVDQU (BP)(R15*1), Y5
 	VMOVDQU 320(CX), Y11
 	VMOVDQU 352(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+	MOVQ    144(R14), BP
+	VMOVDQU (BP)(R15*1), Y6
 	VMOVDQU 384(CX), Y11
 	VMOVDQU 416(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	MOVQ    168(R14), BP
+	VMOVDQU (BP)(R15*1), Y7
 	VMOVDQU 448(CX), Y11
 	VMOVDQU 480(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+	MOVQ    192(R14), BP
+	VMOVDQU (BP)(R15*1), Y8
 	VMOVDQU 512(CX), Y11
 	VMOVDQU 544(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	MOVQ    216(R14), BP
+	VMOVDQU (BP)(R15*1), Y9
 	VMOVDQU 576(CX), Y11
 	VMOVDQU 608(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 1 to 10 outputs
 	VMOVDQU (SI), Y13
@@ -35128,62 +95136,52 @@ mulAvxTwo_10x10_loop:
 	VMOVDQU 672(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 704(CX), Y11
 	VMOVDQU 736(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 768(CX), Y11
 	VMOVDQU 800(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 832(CX), Y11
 	VMOVDQU 864(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 896(CX), Y11
 	VMOVDQU 928(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 960(CX), Y11
 	VMOVDQU 992(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 1024(CX), Y11
 	VMOVDQU 1056(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 1088(CX), Y11
 	VMOVDQU 1120(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 1152(CX), Y11
 	VMOVDQU 1184(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 1216(CX), Y11
 	VMOVDQU 1248(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 2 to 10 outputs
 	VMOVDQU (DI), Y13
@@ -35195,62 +95193,52 @@ mulAvxTwo_10x10_loop:
 	VMOVDQU 1312(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 1344(CX), Y11
 	VMOVDQU 1376(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 1408(CX), Y11
 	VMOVDQU 1440(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 1472(CX), Y11
 	VMOVDQU 1504(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 1536(CX), Y11
 	VMOVDQU 1568(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 1600(CX), Y11
 	VMOVDQU 1632(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 1664(CX), Y11
 	VMOVDQU 1696(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 1728(CX), Y11
 	VMOVDQU 1760(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 1792(CX), Y11
 	VMOVDQU 1824(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 1856(CX), Y11
 	VMOVDQU 1888(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 3 to 10 outputs
 	VMOVDQU (R8), Y13
@@ -35262,62 +95250,52 @@ mulAvxTwo_10x10_loop:
 	VMOVDQU 1952(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 1984(CX), Y11
 	VMOVDQU 2016(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 2048(CX), Y11
 	VMOVDQU 2080(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 2112(CX), Y11
 	VMOVDQU 2144(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 2176(CX), Y11
 	VMOVDQU 2208(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 2240(CX), Y11
 	VMOVDQU 2272(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 2304(CX), Y11
 	VMOVDQU 2336(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 2368(CX), Y11
 	VMOVDQU 2400(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 2432(CX), Y11
 	VMOVDQU 2464(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 2496(CX), Y11
 	VMOVDQU 2528(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 4 to 10 outputs
 	VMOVDQU (R9), Y13
@@ -35329,62 +95307,52 @@ mulAvxTwo_10x10_loop:
 	VMOVDQU 2592(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 2624(CX), Y11
 	VMOVDQU 2656(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 2688(CX), Y11
 	VMOVDQU 2720(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 2752(CX), Y11
 	VMOVDQU 2784(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 2816(CX), Y11
 	VMOVDQU 2848(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 2880(CX), Y11
 	VMOVDQU 2912(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 2944(CX), Y11
 	VMOVDQU 2976(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 3008(CX), Y11
 	VMOVDQU 3040(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 3072(CX), Y11
 	VMOVDQU 3104(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 3136(CX), Y11
 	VMOVDQU 3168(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 5 to 10 outputs
 	VMOVDQU (R10), Y13
@@ -35396,62 +95364,52 @@ mulAvxTwo_10x10_loop:
 	VMOVDQU 3232(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 3264(CX), Y11
 	VMOVDQU 3296(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 3328(CX), Y11
 	VMOVDQU 3360(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 3392(CX), Y11
 	VMOVDQU 3424(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 3456(CX), Y11
 	VMOVDQU 3488(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 3520(CX), Y11
 	VMOVDQU 3552(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 3584(CX), Y11
 	VMOVDQU 3616(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 3648(CX), Y11
 	VMOVDQU 3680(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 3712(CX), Y11
 	VMOVDQU 3744(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 3776(CX), Y11
 	VMOVDQU 3808(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 6 to 10 outputs
 	VMOVDQU (R11), Y13
@@ -35463,62 +95421,52 @@ mulAvxTwo_10x10_loop:
 	VMOVDQU 3872(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 3904(CX), Y11
 	VMOVDQU 3936(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 3968(CX), Y11
 	VMOVDQU 4000(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 4032(CX), Y11
 	VMOVDQU 4064(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 4096(CX), Y11
 	VMOVDQU 4128(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 4160(CX), Y11
 	VMOVDQU 4192(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 4224(CX), Y11
 	VMOVDQU 4256(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 4288(CX), Y11
 	VMOVDQU 4320(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 4352(CX), Y11
 	VMOVDQU 4384(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 4416(CX), Y11
 	VMOVDQU 4448(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 7 to 10 outputs
 	VMOVDQU (R12), Y13
@@ -35530,62 +95478,52 @@ mulAvxTwo_10x10_loop:
 	VMOVDQU 4512(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 4544(CX), Y11
 	VMOVDQU 4576(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 4608(CX), Y11
 	VMOVDQU 4640(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 4672(CX), Y11
 	VMOVDQU 4704(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 4736(CX), Y11
 	VMOVDQU 4768(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 4800(CX), Y11
 	VMOVDQU 4832(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 4864(CX), Y11
 	VMOVDQU 4896(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 4928(CX), Y11
 	VMOVDQU 4960(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 4992(CX), Y11
 	VMOVDQU 5024(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 5056(CX), Y11
 	VMOVDQU 5088(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 8 to 10 outputs
 	VMOVDQU (R13), Y13
@@ -35597,62 +95535,52 @@ mulAvxTwo_10x10_loop:
 	VMOVDQU 5152(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 5184(CX), Y11
 	VMOVDQU 5216(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 5248(CX), Y11
 	VMOVDQU 5280(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 5312(CX), Y11
 	VMOVDQU 5344(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 5376(CX), Y11
 	VMOVDQU 5408(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 5440(CX), Y11
 	VMOVDQU 5472(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 5504(CX), Y11
 	VMOVDQU 5536(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 5568(CX), Y11
 	VMOVDQU 5600(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 5632(CX), Y11
 	VMOVDQU 5664(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 5696(CX), Y11
 	VMOVDQU 5728(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Load and process 32 bytes from input 9 to 10 outputs
 	VMOVDQU (DX), Y13
@@ -35664,62 +95592,52 @@ mulAvxTwo_10x10_loop:
 	VMOVDQU 5792(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	XOR3WAY( $0x00, Y11, Y12, Y0)
 	VMOVDQU 5824(CX), Y11
 	VMOVDQU 5856(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	XOR3WAY( $0x00, Y11, Y12, Y1)
 	VMOVDQU 5888(CX), Y11
 	VMOVDQU 5920(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	XOR3WAY( $0x00, Y11, Y12, Y2)
 	VMOVDQU 5952(CX), Y11
 	VMOVDQU 5984(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	XOR3WAY( $0x00, Y11, Y12, Y3)
 	VMOVDQU 6016(CX), Y11
 	VMOVDQU 6048(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	XOR3WAY( $0x00, Y11, Y12, Y4)
 	VMOVDQU 6080(CX), Y11
 	VMOVDQU 6112(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	XOR3WAY( $0x00, Y11, Y12, Y5)
 	VMOVDQU 6144(CX), Y11
 	VMOVDQU 6176(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	XOR3WAY( $0x00, Y11, Y12, Y6)
 	VMOVDQU 6208(CX), Y11
 	VMOVDQU 6240(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	XOR3WAY( $0x00, Y11, Y12, Y7)
 	VMOVDQU 6272(CX), Y11
 	VMOVDQU 6304(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	XOR3WAY( $0x00, Y11, Y12, Y8)
 	VMOVDQU 6336(CX), Y11
 	VMOVDQU 6368(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	XOR3WAY( $0x00, Y11, Y12, Y9)
 
 	// Store 10 outputs
 	MOVQ    (R14), BP
@@ -35746,8 +95664,6446 @@ mulAvxTwo_10x10_loop:
 	// Prepare for next loop
 	ADDQ $0x20, R15
 	DECQ AX
-	JNZ  mulAvxTwo_10x10_loop
+	JNZ  mulAvxTwo_10x10Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_10x10_end:
+mulAvxTwo_10x10Xor_end:
+	RET
+
+// func ifftDIT2_avx2(x []byte, y []byte, table *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT2_avx2(SB), NOSPLIT, $0-56
+	MOVQ           table+48(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 64(AX), Y1
+	VBROADCASTI128 16(AX), Y2
+	VBROADCASTI128 80(AX), Y3
+	VBROADCASTI128 32(AX), Y4
+	VBROADCASTI128 96(AX), Y5
+	VBROADCASTI128 48(AX), Y6
+	VBROADCASTI128 112(AX), Y7
+	MOVQ           x_len+8(FP), AX
+	MOVQ           x_base+0(FP), CX
+	MOVQ           y_base+24(FP), DX
+	MOVQ           $0x0000000f, BX
+	MOVQ           BX, X8
+	VPBROADCASTB   X8, Y8
+
+loop:
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y12
+	VPXOR   Y11, Y9, Y11
+	VPXOR   Y12, Y10, Y12
+	VMOVDQU Y11, (DX)
+	VMOVDQU Y12, 32(DX)
+	VPSRLQ  $0x04, Y11, Y13
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y13, Y13
+	VPSHUFB Y11, Y0, Y14
+	VPSHUFB Y11, Y1, Y11
+	VPSHUFB Y13, Y2, Y15
+	VPSHUFB Y13, Y3, Y13
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y11, Y13, Y11
+	VPAND   Y12, Y8, Y13
+	VPSRLQ  $0x04, Y12, Y12
+	VPAND   Y8, Y12, Y12
+	VPSHUFB Y13, Y4, Y15
+	VPSHUFB Y13, Y5, Y13
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y11, Y13, Y11
+	VPSHUFB Y12, Y6, Y15
+	VPSHUFB Y12, Y7, Y13
+	XOR3WAY( $0x00, Y14, Y15, Y9)
+	XOR3WAY( $0x00, Y11, Y13, Y10)
+	VMOVDQU Y9, (CX)
+	VMOVDQU Y10, 32(CX)
+	ADDQ    $0x40, CX
+	ADDQ    $0x40, DX
+	SUBQ    $0x40, AX
+	JNZ     loop
+	VZEROUPPER
+	RET
+
+// func fftDIT2_avx2(x []byte, y []byte, table *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT2_avx2(SB), NOSPLIT, $0-56
+	MOVQ           table+48(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 64(AX), Y1
+	VBROADCASTI128 16(AX), Y2
+	VBROADCASTI128 80(AX), Y3
+	VBROADCASTI128 32(AX), Y4
+	VBROADCASTI128 96(AX), Y5
+	VBROADCASTI128 48(AX), Y6
+	VBROADCASTI128 112(AX), Y7
+	MOVQ           x_len+8(FP), AX
+	MOVQ           x_base+0(FP), CX
+	MOVQ           y_base+24(FP), DX
+	MOVQ           $0x0000000f, BX
+	MOVQ           BX, X8
+	VPBROADCASTB   X8, Y8
+
+loop:
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y12
+	VPSRLQ  $0x04, Y11, Y13
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y13, Y13
+	VPSHUFB Y11, Y0, Y14
+	VPSHUFB Y11, Y1, Y11
+	VPSHUFB Y13, Y2, Y15
+	VPSHUFB Y13, Y3, Y13
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y11, Y13, Y11
+	VPAND   Y12, Y8, Y13
+	VPSRLQ  $0x04, Y12, Y12
+	VPAND   Y8, Y12, Y12
+	VPSHUFB Y13, Y4, Y15
+	VPSHUFB Y13, Y5, Y13
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y11, Y13, Y11
+	VPSHUFB Y12, Y6, Y15
+	VPSHUFB Y12, Y7, Y13
+	XOR3WAY( $0x00, Y14, Y15, Y9)
+	XOR3WAY( $0x00, Y11, Y13, Y10)
+	VMOVDQU Y9, (CX)
+	VMOVDQU Y10, 32(CX)
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y12
+	VPXOR   Y11, Y9, Y11
+	VPXOR   Y12, Y10, Y12
+	VMOVDQU Y11, (DX)
+	VMOVDQU Y12, 32(DX)
+	ADDQ    $0x40, CX
+	ADDQ    $0x40, DX
+	SUBQ    $0x40, AX
+	JNZ     loop
+	VZEROUPPER
+	RET
+
+// func mulgf16_avx2(x []byte, y []byte, table *[128]uint8)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·mulgf16_avx2(SB), NOSPLIT, $0-56
+	MOVQ           table+48(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 64(AX), Y1
+	VBROADCASTI128 16(AX), Y2
+	VBROADCASTI128 80(AX), Y3
+	VBROADCASTI128 32(AX), Y4
+	VBROADCASTI128 96(AX), Y5
+	VBROADCASTI128 48(AX), Y6
+	VBROADCASTI128 112(AX), Y7
+	MOVQ           x_len+8(FP), AX
+	MOVQ           x_base+0(FP), CX
+	MOVQ           y_base+24(FP), DX
+	MOVQ           $0x0000000f, BX
+	MOVQ           BX, X8
+	VPBROADCASTB   X8, Y8
+
+loop:
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y10
+	VPSRLQ  $0x04, Y9, Y11
+	VPAND   Y8, Y9, Y9
+	VPAND   Y8, Y11, Y11
+	VPSHUFB Y9, Y0, Y12
+	VPSHUFB Y9, Y1, Y9
+	VPSHUFB Y11, Y2, Y13
+	VPSHUFB Y11, Y3, Y11
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y9, Y11, Y9
+	VPAND   Y10, Y8, Y11
+	VPSRLQ  $0x04, Y10, Y10
+	VPAND   Y8, Y10, Y10
+	VPSHUFB Y11, Y4, Y13
+	VPSHUFB Y11, Y5, Y11
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y9, Y11, Y9
+	VPSHUFB Y10, Y6, Y13
+	VPSHUFB Y10, Y7, Y11
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y9, Y11, Y9
+	VMOVDQU Y12, (CX)
+	VMOVDQU Y9, 32(CX)
+	ADDQ    $0x40, CX
+	ADDQ    $0x40, DX
+	SUBQ    $0x40, AX
+	JNZ     loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx512_0(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx512_0(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), CX
+	MOVQ           table02+48(FP), DX
+	VBROADCASTI128 (DX), Y1
+	VBROADCASTI128 64(DX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(DX), Y1
+	VBROADCASTI128 80(DX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(DX), Y1
+	VBROADCASTI128 96(DX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(DX), Y1
+	VBROADCASTI128 112(DX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z24
+	VMOVAPS        Z0, Z25
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z26
+	VMOVAPS        Z0, Z27
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z28
+	VMOVAPS        Z0, Z29
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z30
+	VMOVAPS        Z0, Z31
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), DX
+	MOVQ           8(DX), BX
+	XORQ           SI, SI
+	MOVQ           (DX)(SI*1), DI
+	ADDQ           AX, SI
+	MOVQ           (DX)(SI*1), R8
+	ADDQ           AX, SI
+	MOVQ           (DX)(SI*1), R9
+	ADDQ           AX, SI
+	MOVQ           (DX)(SI*1), AX
+
+loop:
+	VMOVDQU        (DI), Y1
+	VMOVDQU        32(DI), Y2
+	VMOVDQU        (R8), Y3
+	VMOVDQU        32(R8), Y4
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VPSRLQ         $0x04, Y3, Y6
+	VPAND          Y0, Y3, Y5
+	VPAND          Y0, Y6, Y6
+	VPSHUFB        Y5, Y24, Y7
+	VPSHUFB        Y5, Y25, Y5
+	VPSHUFB        Y6, Y26, Y8
+	VPSHUFB        Y6, Y27, Y6
+	VPXOR          Y7, Y8, Y7
+	VPXOR          Y5, Y6, Y5
+	VPAND          Y4, Y0, Y6
+	VPSRLQ         $0x04, Y4, Y8
+	VPAND          Y0, Y8, Y8
+	VPSHUFB        Y6, Y28, Y9
+	VPSHUFB        Y6, Y29, Y6
+	VPXOR          Y7, Y9, Y7
+	VPXOR          Y5, Y6, Y5
+	VPSHUFB        Y8, Y30, Y9
+	VPSHUFB        Y8, Y31, Y6
+	VPTERNLOGD     $0x96, Y7, Y9, Y1
+	VPTERNLOGD     $0x96, Y5, Y6, Y2
+	VMOVDQU        (R9), Y5
+	VMOVDQU        32(R9), Y6
+	VMOVDQU        (AX), Y7
+	VMOVDQU        32(AX), Y8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	VPTERNLOGD     $0x96, Y11, Y13, Y5
+	VPTERNLOGD     $0x96, Y9, Y10, Y6
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VPSHUFB        Y9, Y16, Y11
+	VPSHUFB        Y9, Y17, Y9
+	VPSHUFB        Y10, Y18, Y12
+	VPSHUFB        Y10, Y19, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VPSHUFB        Y10, Y20, Y13
+	VPSHUFB        Y10, Y21, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VPSHUFB        Y12, Y22, Y13
+	VPSHUFB        Y12, Y23, Y10
+	VPTERNLOGD     $0x96, Y11, Y13, Y1
+	VPTERNLOGD     $0x96, Y9, Y10, Y2
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VPSHUFB        Y9, Y16, Y11
+	VPSHUFB        Y9, Y17, Y9
+	VPSHUFB        Y10, Y18, Y12
+	VPSHUFB        Y10, Y19, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VPSHUFB        Y10, Y20, Y13
+	VPSHUFB        Y10, Y21, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VPSHUFB        Y12, Y22, Y13
+	VPSHUFB        Y12, Y23, Y10
+	VPTERNLOGD     $0x96, Y11, Y13, Y3
+	VPTERNLOGD     $0x96, Y9, Y10, Y4
+	VMOVDQU        Y1, (DI)
+	VMOVDQU        Y2, 32(DI)
+	ADDQ           $0x40, DI
+	VMOVDQU        Y3, (R8)
+	VMOVDQU        Y4, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y5, (R9)
+	VMOVDQU        Y6, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y7, (AX)
+	VMOVDQU        Y8, 32(AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, BX
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx512_0(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx512_0(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), CX
+	MOVQ           table02+48(FP), DX
+	VBROADCASTI128 (DX), Y1
+	VBROADCASTI128 64(DX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(DX), Y1
+	VBROADCASTI128 80(DX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(DX), Y1
+	VBROADCASTI128 96(DX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(DX), Y1
+	VBROADCASTI128 112(DX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z24
+	VMOVAPS        Z0, Z25
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z26
+	VMOVAPS        Z0, Z27
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z28
+	VMOVAPS        Z0, Z29
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z30
+	VMOVAPS        Z0, Z31
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), DX
+	MOVQ           8(DX), BX
+	XORQ           SI, SI
+	MOVQ           (DX)(SI*1), DI
+	ADDQ           AX, SI
+	MOVQ           (DX)(SI*1), R8
+	ADDQ           AX, SI
+	MOVQ           (DX)(SI*1), R9
+	ADDQ           AX, SI
+	MOVQ           (DX)(SI*1), AX
+
+loop:
+	VMOVDQU        (DI), Y1
+	VMOVDQU        32(DI), Y2
+	VMOVDQU        (R9), Y5
+	VMOVDQU        32(R9), Y6
+	VMOVDQU        (R8), Y3
+	VMOVDQU        32(R8), Y4
+	VMOVDQU        (AX), Y7
+	VMOVDQU        32(AX), Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VPSHUFB        Y9, Y16, Y11
+	VPSHUFB        Y9, Y17, Y9
+	VPSHUFB        Y10, Y18, Y12
+	VPSHUFB        Y10, Y19, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VPSHUFB        Y10, Y20, Y13
+	VPSHUFB        Y10, Y21, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VPSHUFB        Y12, Y22, Y13
+	VPSHUFB        Y12, Y23, Y10
+	VPTERNLOGD     $0x96, Y11, Y13, Y1
+	VPTERNLOGD     $0x96, Y9, Y10, Y2
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VPSHUFB        Y9, Y16, Y11
+	VPSHUFB        Y9, Y17, Y9
+	VPSHUFB        Y10, Y18, Y12
+	VPSHUFB        Y10, Y19, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VPSHUFB        Y10, Y20, Y13
+	VPSHUFB        Y10, Y21, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VPSHUFB        Y12, Y22, Y13
+	VPSHUFB        Y12, Y23, Y10
+	VPTERNLOGD     $0x96, Y11, Y13, Y3
+	VPTERNLOGD     $0x96, Y9, Y10, Y4
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y3, Y10
+	VPAND          Y0, Y3, Y9
+	VPAND          Y0, Y10, Y10
+	VPSHUFB        Y9, Y24, Y11
+	VPSHUFB        Y9, Y25, Y9
+	VPSHUFB        Y10, Y26, Y12
+	VPSHUFB        Y10, Y27, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y4, Y0, Y10
+	VPSRLQ         $0x04, Y4, Y12
+	VPAND          Y0, Y12, Y12
+	VPSHUFB        Y10, Y28, Y13
+	VPSHUFB        Y10, Y29, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VPSHUFB        Y12, Y30, Y13
+	VPSHUFB        Y12, Y31, Y10
+	VPTERNLOGD     $0x96, Y11, Y13, Y1
+	VPTERNLOGD     $0x96, Y9, Y10, Y2
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        Y1, (DI)
+	VMOVDQU        Y2, 32(DI)
+	ADDQ           $0x40, DI
+	VMOVDQU        Y3, (R8)
+	VMOVDQU        Y4, 32(R8)
+	ADDQ           $0x40, R8
+	VPSRLQ         $0x04, Y7, Y2
+	VPAND          Y0, Y7, Y1
+	VPAND          Y0, Y2, Y2
+	VBROADCASTI128 (CX), Y3
+	VBROADCASTI128 64(CX), Y4
+	VPSHUFB        Y1, Y3, Y3
+	VPSHUFB        Y1, Y4, Y1
+	VBROADCASTI128 16(CX), Y4
+	VBROADCASTI128 80(CX), Y9
+	VPSHUFB        Y2, Y4, Y4
+	VPSHUFB        Y2, Y9, Y2
+	VPXOR          Y3, Y4, Y3
+	VPXOR          Y1, Y2, Y1
+	VPAND          Y8, Y0, Y2
+	VPSRLQ         $0x04, Y8, Y4
+	VPAND          Y0, Y4, Y4
+	VBROADCASTI128 32(CX), Y9
+	VBROADCASTI128 96(CX), Y10
+	VPSHUFB        Y2, Y9, Y9
+	VPSHUFB        Y2, Y10, Y2
+	VPXOR          Y3, Y9, Y3
+	VPXOR          Y1, Y2, Y1
+	VBROADCASTI128 48(CX), Y9
+	VBROADCASTI128 112(CX), Y2
+	VPSHUFB        Y4, Y9, Y9
+	VPSHUFB        Y4, Y2, Y2
+	VPTERNLOGD     $0x96, Y3, Y9, Y5
+	VPTERNLOGD     $0x96, Y1, Y2, Y6
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VMOVDQU        Y5, (R9)
+	VMOVDQU        Y6, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y7, (AX)
+	VMOVDQU        Y8, 32(AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, BX
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx512_1(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx512_1(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), AX
+	MOVQ           table02+48(FP), CX
+	VBROADCASTI128 (CX), Y1
+	VBROADCASTI128 64(CX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(CX), Y1
+	VBROADCASTI128 80(CX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(CX), Y1
+	VBROADCASTI128 96(CX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(CX), Y1
+	VBROADCASTI128 112(CX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z24
+	VMOVAPS        Z0, Z25
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z26
+	VMOVAPS        Z0, Z27
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z28
+	VMOVAPS        Z0, Z29
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z30
+	VMOVAPS        Z0, Z31
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VPSRLQ     $0x04, Y7, Y10
+	VPAND      Y0, Y7, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y24, Y11
+	VPSHUFB    Y9, Y25, Y9
+	VPSHUFB    Y10, Y26, Y12
+	VPSHUFB    Y10, Y27, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y8, Y0, Y10
+	VPSRLQ     $0x04, Y8, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y28, Y13
+	VPSHUFB    Y10, Y29, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y30, Y13
+	VPSHUFB    Y12, Y31, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y5
+	VPTERNLOGD $0x96, Y9, Y10, Y6
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VPSRLQ     $0x04, Y5, Y10
+	VPAND      Y0, Y5, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y6, Y0, Y10
+	VPSRLQ     $0x04, Y6, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y1
+	VPTERNLOGD $0x96, Y9, Y10, Y2
+	VPSRLQ     $0x04, Y7, Y10
+	VPAND      Y0, Y7, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y8, Y0, Y10
+	VPSRLQ     $0x04, Y8, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y3
+	VPTERNLOGD $0x96, Y9, Y10, Y4
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx512_1(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx512_1(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), CX
+	MOVQ           table02+48(FP), DX
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	VBROADCASTI128 (CX), Y1
+	VBROADCASTI128 64(CX), Y0
+	VMOVAPS        Z1, Z24
+	VMOVAPS        Z0, Z25
+	VBROADCASTI128 16(CX), Y1
+	VBROADCASTI128 80(CX), Y0
+	VMOVAPS        Z1, Z26
+	VMOVAPS        Z0, Z27
+	VBROADCASTI128 32(CX), Y1
+	VBROADCASTI128 96(CX), Y0
+	VMOVAPS        Z1, Z28
+	VMOVAPS        Z0, Z29
+	VBROADCASTI128 48(CX), Y1
+	VBROADCASTI128 112(CX), Y0
+	VMOVAPS        Z1, Z30
+	VMOVAPS        Z0, Z31
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VPSRLQ     $0x04, Y3, Y10
+	VPAND      Y0, Y3, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y4, Y0, Y10
+	VPSRLQ     $0x04, Y4, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y1
+	VPTERNLOGD $0x96, Y9, Y10, Y2
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VPSRLQ     $0x04, Y7, Y2
+	VPAND      Y0, Y7, Y1
+	VPAND      Y0, Y2, Y2
+	VPSHUFB    Y1, Y24, Y3
+	VPSHUFB    Y1, Y25, Y1
+	VPSHUFB    Y2, Y26, Y4
+	VPSHUFB    Y2, Y27, Y2
+	VPXOR      Y3, Y4, Y3
+	VPXOR      Y1, Y2, Y1
+	VPAND      Y8, Y0, Y2
+	VPSRLQ     $0x04, Y8, Y4
+	VPAND      Y0, Y4, Y4
+	VPSHUFB    Y2, Y28, Y9
+	VPSHUFB    Y2, Y29, Y2
+	VPXOR      Y3, Y9, Y3
+	VPXOR      Y1, Y2, Y1
+	VPSHUFB    Y4, Y30, Y9
+	VPSHUFB    Y4, Y31, Y2
+	VPTERNLOGD $0x96, Y3, Y9, Y5
+	VPTERNLOGD $0x96, Y1, Y2, Y6
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx512_2(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx512_2(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), CX
+	MOVQ           table02+48(FP), CX
+	VBROADCASTI128 (CX), Y1
+	VBROADCASTI128 64(CX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(CX), Y1
+	VBROADCASTI128 80(CX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(CX), Y1
+	VBROADCASTI128 96(CX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(CX), Y1
+	VBROADCASTI128 112(CX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z24
+	VMOVAPS        Z0, Z25
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z26
+	VMOVAPS        Z0, Z27
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z28
+	VMOVAPS        Z0, Z29
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z30
+	VMOVAPS        Z0, Z31
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VPSRLQ     $0x04, Y3, Y6
+	VPAND      Y0, Y3, Y5
+	VPAND      Y0, Y6, Y6
+	VPSHUFB    Y5, Y24, Y7
+	VPSHUFB    Y5, Y25, Y5
+	VPSHUFB    Y6, Y26, Y8
+	VPSHUFB    Y6, Y27, Y6
+	VPXOR      Y7, Y8, Y7
+	VPXOR      Y5, Y6, Y5
+	VPAND      Y4, Y0, Y6
+	VPSRLQ     $0x04, Y4, Y8
+	VPAND      Y0, Y8, Y8
+	VPSHUFB    Y6, Y28, Y9
+	VPSHUFB    Y6, Y29, Y6
+	VPXOR      Y7, Y9, Y7
+	VPXOR      Y5, Y6, Y5
+	VPSHUFB    Y8, Y30, Y9
+	VPSHUFB    Y8, Y31, Y6
+	VPTERNLOGD $0x96, Y7, Y9, Y1
+	VPTERNLOGD $0x96, Y5, Y6, Y2
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VPSRLQ     $0x04, Y5, Y10
+	VPAND      Y0, Y5, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y6, Y0, Y10
+	VPSRLQ     $0x04, Y6, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y1
+	VPTERNLOGD $0x96, Y9, Y10, Y2
+	VPSRLQ     $0x04, Y7, Y10
+	VPAND      Y0, Y7, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y8, Y0, Y10
+	VPSRLQ     $0x04, Y8, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y3
+	VPTERNLOGD $0x96, Y9, Y10, Y4
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx512_2(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx512_2(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), AX
+	MOVQ           table02+48(FP), CX
+	VBROADCASTI128 (CX), Y1
+	VBROADCASTI128 64(CX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(CX), Y1
+	VBROADCASTI128 80(CX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(CX), Y1
+	VBROADCASTI128 96(CX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(CX), Y1
+	VBROADCASTI128 112(CX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z24
+	VMOVAPS        Z0, Z25
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z26
+	VMOVAPS        Z0, Z27
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z28
+	VMOVAPS        Z0, Z29
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z30
+	VMOVAPS        Z0, Z31
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPSRLQ     $0x04, Y5, Y10
+	VPAND      Y0, Y5, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y6, Y0, Y10
+	VPSRLQ     $0x04, Y6, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y1
+	VPTERNLOGD $0x96, Y9, Y10, Y2
+	VPSRLQ     $0x04, Y7, Y10
+	VPAND      Y0, Y7, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y8, Y0, Y10
+	VPSRLQ     $0x04, Y8, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y3
+	VPTERNLOGD $0x96, Y9, Y10, Y4
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VPSRLQ     $0x04, Y7, Y2
+	VPAND      Y0, Y7, Y1
+	VPAND      Y0, Y2, Y2
+	VPSHUFB    Y1, Y24, Y3
+	VPSHUFB    Y1, Y25, Y1
+	VPSHUFB    Y2, Y26, Y4
+	VPSHUFB    Y2, Y27, Y2
+	VPXOR      Y3, Y4, Y3
+	VPXOR      Y1, Y2, Y1
+	VPAND      Y8, Y0, Y2
+	VPSRLQ     $0x04, Y8, Y4
+	VPAND      Y0, Y4, Y4
+	VPSHUFB    Y2, Y28, Y9
+	VPSHUFB    Y2, Y29, Y2
+	VPXOR      Y3, Y9, Y3
+	VPXOR      Y1, Y2, Y1
+	VPSHUFB    Y4, Y30, Y9
+	VPSHUFB    Y4, Y31, Y2
+	VPTERNLOGD $0x96, Y3, Y9, Y5
+	VPTERNLOGD $0x96, Y1, Y2, Y6
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx512_3(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx512_3(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), AX
+	MOVQ           table02+48(FP), AX
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VPSRLQ     $0x04, Y5, Y10
+	VPAND      Y0, Y5, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y6, Y0, Y10
+	VPSRLQ     $0x04, Y6, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y1
+	VPTERNLOGD $0x96, Y9, Y10, Y2
+	VPSRLQ     $0x04, Y7, Y10
+	VPAND      Y0, Y7, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y8, Y0, Y10
+	VPSRLQ     $0x04, Y8, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y3
+	VPTERNLOGD $0x96, Y9, Y10, Y4
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx512_3(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx512_3(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), AX
+	MOVQ           table02+48(FP), CX
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VPSRLQ     $0x04, Y7, Y2
+	VPAND      Y0, Y7, Y1
+	VPAND      Y0, Y2, Y2
+	VPSHUFB    Y1, Y16, Y3
+	VPSHUFB    Y1, Y17, Y1
+	VPSHUFB    Y2, Y18, Y4
+	VPSHUFB    Y2, Y19, Y2
+	VPXOR      Y3, Y4, Y3
+	VPXOR      Y1, Y2, Y1
+	VPAND      Y8, Y0, Y2
+	VPSRLQ     $0x04, Y8, Y4
+	VPAND      Y0, Y4, Y4
+	VPSHUFB    Y2, Y20, Y9
+	VPSHUFB    Y2, Y21, Y2
+	VPXOR      Y3, Y9, Y3
+	VPXOR      Y1, Y2, Y1
+	VPSHUFB    Y4, Y22, Y9
+	VPSHUFB    Y4, Y23, Y2
+	VPTERNLOGD $0x96, Y3, Y9, Y5
+	VPTERNLOGD $0x96, Y1, Y2, Y6
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx512_4(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx512_4(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), CX
+	MOVQ           table02+48(FP), DX
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	VBROADCASTI128 (CX), Y1
+	VBROADCASTI128 64(CX), Y0
+	VMOVAPS        Z1, Z24
+	VMOVAPS        Z0, Z25
+	VBROADCASTI128 16(CX), Y1
+	VBROADCASTI128 80(CX), Y0
+	VMOVAPS        Z1, Z26
+	VMOVAPS        Z0, Z27
+	VBROADCASTI128 32(CX), Y1
+	VBROADCASTI128 96(CX), Y0
+	VMOVAPS        Z1, Z28
+	VMOVAPS        Z0, Z29
+	VBROADCASTI128 48(CX), Y1
+	VBROADCASTI128 112(CX), Y0
+	VMOVAPS        Z1, Z30
+	VMOVAPS        Z0, Z31
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VPSRLQ     $0x04, Y3, Y6
+	VPAND      Y0, Y3, Y5
+	VPAND      Y0, Y6, Y6
+	VPSHUFB    Y5, Y16, Y7
+	VPSHUFB    Y5, Y17, Y5
+	VPSHUFB    Y6, Y18, Y8
+	VPSHUFB    Y6, Y19, Y6
+	VPXOR      Y7, Y8, Y7
+	VPXOR      Y5, Y6, Y5
+	VPAND      Y4, Y0, Y6
+	VPSRLQ     $0x04, Y4, Y8
+	VPAND      Y0, Y8, Y8
+	VPSHUFB    Y6, Y20, Y9
+	VPSHUFB    Y6, Y21, Y6
+	VPXOR      Y7, Y9, Y7
+	VPXOR      Y5, Y6, Y5
+	VPSHUFB    Y8, Y22, Y9
+	VPSHUFB    Y8, Y23, Y6
+	VPTERNLOGD $0x96, Y7, Y9, Y1
+	VPTERNLOGD $0x96, Y5, Y6, Y2
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VPSRLQ     $0x04, Y7, Y10
+	VPAND      Y0, Y7, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y24, Y11
+	VPSHUFB    Y9, Y25, Y9
+	VPSHUFB    Y10, Y26, Y12
+	VPSHUFB    Y10, Y27, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y8, Y0, Y10
+	VPSRLQ     $0x04, Y8, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y28, Y13
+	VPSHUFB    Y10, Y29, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y30, Y13
+	VPSHUFB    Y12, Y31, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y5
+	VPTERNLOGD $0x96, Y9, Y10, Y6
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx512_4(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx512_4(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), CX
+	MOVQ           table02+48(FP), CX
+	VBROADCASTI128 (CX), Y1
+	VBROADCASTI128 64(CX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(CX), Y1
+	VBROADCASTI128 80(CX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(CX), Y1
+	VBROADCASTI128 96(CX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(CX), Y1
+	VBROADCASTI128 112(CX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z24
+	VMOVAPS        Z0, Z25
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z26
+	VMOVAPS        Z0, Z27
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z28
+	VMOVAPS        Z0, Z29
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z30
+	VMOVAPS        Z0, Z31
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPSRLQ     $0x04, Y5, Y10
+	VPAND      Y0, Y5, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y6, Y0, Y10
+	VPSRLQ     $0x04, Y6, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y1
+	VPTERNLOGD $0x96, Y9, Y10, Y2
+	VPSRLQ     $0x04, Y7, Y10
+	VPAND      Y0, Y7, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y8, Y0, Y10
+	VPSRLQ     $0x04, Y8, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y3
+	VPTERNLOGD $0x96, Y9, Y10, Y4
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VPSRLQ     $0x04, Y3, Y10
+	VPAND      Y0, Y3, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y24, Y11
+	VPSHUFB    Y9, Y25, Y9
+	VPSHUFB    Y10, Y26, Y12
+	VPSHUFB    Y10, Y27, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y4, Y0, Y10
+	VPSRLQ     $0x04, Y4, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y28, Y13
+	VPSHUFB    Y10, Y29, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y30, Y13
+	VPSHUFB    Y12, Y31, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y1
+	VPTERNLOGD $0x96, Y9, Y10, Y2
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx512_5(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx512_5(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), AX
+	MOVQ           table02+48(FP), CX
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VPSRLQ     $0x04, Y7, Y10
+	VPAND      Y0, Y7, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y8, Y0, Y10
+	VPSRLQ     $0x04, Y8, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y5
+	VPTERNLOGD $0x96, Y9, Y10, Y6
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx512_5(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx512_5(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), CX
+	MOVQ           table02+48(FP), CX
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VPSRLQ     $0x04, Y3, Y10
+	VPAND      Y0, Y3, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y4, Y0, Y10
+	VPSRLQ     $0x04, Y4, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y1
+	VPTERNLOGD $0x96, Y9, Y10, Y2
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx512_6(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx512_6(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), CX
+	MOVQ           table02+48(FP), CX
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VPSRLQ     $0x04, Y3, Y6
+	VPAND      Y0, Y3, Y5
+	VPAND      Y0, Y6, Y6
+	VPSHUFB    Y5, Y16, Y7
+	VPSHUFB    Y5, Y17, Y5
+	VPSHUFB    Y6, Y18, Y8
+	VPSHUFB    Y6, Y19, Y6
+	VPXOR      Y7, Y8, Y7
+	VPXOR      Y5, Y6, Y5
+	VPAND      Y4, Y0, Y6
+	VPSRLQ     $0x04, Y4, Y8
+	VPAND      Y0, Y8, Y8
+	VPSHUFB    Y6, Y20, Y9
+	VPSHUFB    Y6, Y21, Y6
+	VPXOR      Y7, Y9, Y7
+	VPXOR      Y5, Y6, Y5
+	VPSHUFB    Y8, Y22, Y9
+	VPSHUFB    Y8, Y23, Y6
+	VPTERNLOGD $0x96, Y7, Y9, Y1
+	VPTERNLOGD $0x96, Y5, Y6, Y2
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx512_6(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx512_6(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ           table01+32(FP), AX
+	MOVQ           table23+40(FP), AX
+	MOVQ           table02+48(FP), AX
+	VBROADCASTI128 (AX), Y1
+	VBROADCASTI128 64(AX), Y0
+	VMOVAPS        Z1, Z16
+	VMOVAPS        Z0, Z17
+	VBROADCASTI128 16(AX), Y1
+	VBROADCASTI128 80(AX), Y0
+	VMOVAPS        Z1, Z18
+	VMOVAPS        Z0, Z19
+	VBROADCASTI128 32(AX), Y1
+	VBROADCASTI128 96(AX), Y0
+	VMOVAPS        Z1, Z20
+	VMOVAPS        Z0, Z21
+	VBROADCASTI128 48(AX), Y1
+	VBROADCASTI128 112(AX), Y0
+	VMOVAPS        Z1, Z22
+	VMOVAPS        Z0, Z23
+	MOVQ           $0x0000000f, AX
+	MOVQ           AX, X0
+	VPBROADCASTB   X0, Y0
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+
+loop:
+	VMOVDQU    (SI), Y1
+	VMOVDQU    32(SI), Y2
+	VMOVDQU    (R8), Y5
+	VMOVDQU    32(R8), Y6
+	VMOVDQU    (DI), Y3
+	VMOVDQU    32(DI), Y4
+	VMOVDQU    (AX), Y7
+	VMOVDQU    32(AX), Y8
+	VPSRLQ     $0x04, Y5, Y10
+	VPAND      Y0, Y5, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y6, Y0, Y10
+	VPSRLQ     $0x04, Y6, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y1
+	VPTERNLOGD $0x96, Y9, Y10, Y2
+	VPSRLQ     $0x04, Y7, Y10
+	VPAND      Y0, Y7, Y9
+	VPAND      Y0, Y10, Y10
+	VPSHUFB    Y9, Y16, Y11
+	VPSHUFB    Y9, Y17, Y9
+	VPSHUFB    Y10, Y18, Y12
+	VPSHUFB    Y10, Y19, Y10
+	VPXOR      Y11, Y12, Y11
+	VPXOR      Y9, Y10, Y9
+	VPAND      Y8, Y0, Y10
+	VPSRLQ     $0x04, Y8, Y12
+	VPAND      Y0, Y12, Y12
+	VPSHUFB    Y10, Y20, Y13
+	VPSHUFB    Y10, Y21, Y10
+	VPXOR      Y11, Y13, Y11
+	VPXOR      Y9, Y10, Y9
+	VPSHUFB    Y12, Y22, Y13
+	VPSHUFB    Y12, Y23, Y10
+	VPTERNLOGD $0x96, Y11, Y13, Y3
+	VPTERNLOGD $0x96, Y9, Y10, Y4
+	VPXOR      Y1, Y5, Y5
+	VPXOR      Y2, Y6, Y6
+	VPXOR      Y3, Y7, Y7
+	VPXOR      Y4, Y8, Y8
+	VPXOR      Y1, Y3, Y3
+	VPXOR      Y2, Y4, Y4
+	VMOVDQU    Y1, (SI)
+	VMOVDQU    Y2, 32(SI)
+	ADDQ       $0x40, SI
+	VMOVDQU    Y3, (DI)
+	VMOVDQU    Y4, 32(DI)
+	ADDQ       $0x40, DI
+	VPXOR      Y5, Y7, Y7
+	VPXOR      Y6, Y8, Y8
+	VMOVDQU    Y5, (R8)
+	VMOVDQU    Y6, 32(R8)
+	ADDQ       $0x40, R8
+	VMOVDQU    Y7, (AX)
+	VMOVDQU    Y8, 32(AX)
+	ADDQ       $0x40, AX
+	SUBQ       $0x40, DX
+	JNZ        loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx512_7(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·ifftDIT4_avx512_7(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), AX
+	MOVQ         $0x0000000f, AX
+	MOVQ         AX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), AX
+	MOVQ         work_base+0(FP), CX
+	MOVQ         8(CX), DX
+	XORQ         BX, BX
+	MOVQ         (CX)(BX*1), SI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), DI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), R8
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), AX
+
+loop:
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y1
+	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y3
+	VPXOR   Y0, Y2, Y2
+	VPXOR   Y1, Y3, Y3
+	VMOVDQU (R8), Y4
+	VMOVDQU 32(R8), Y5
+	VMOVDQU (AX), Y6
+	VMOVDQU 32(AX), Y7
+	VPXOR   Y4, Y6, Y6
+	VPXOR   Y5, Y7, Y7
+	VPXOR   Y0, Y4, Y4
+	VPXOR   Y1, Y5, Y5
+	VPXOR   Y2, Y6, Y6
+	VPXOR   Y3, Y7, Y7
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y5, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y6, (AX)
+	VMOVDQU Y7, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JNZ     loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx512_7(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·fftDIT4_avx512_7(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), AX
+	MOVQ         $0x0000000f, AX
+	MOVQ         AX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), AX
+	MOVQ         work_base+0(FP), CX
+	MOVQ         8(CX), DX
+	XORQ         BX, BX
+	MOVQ         (CX)(BX*1), SI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), DI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), R8
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), AX
+
+loop:
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y1
+	VMOVDQU (R8), Y4
+	VMOVDQU 32(R8), Y5
+	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y3
+	VMOVDQU (AX), Y6
+	VMOVDQU 32(AX), Y7
+	VPXOR   Y0, Y4, Y4
+	VPXOR   Y1, Y5, Y5
+	VPXOR   Y2, Y6, Y6
+	VPXOR   Y3, Y7, Y7
+	VPXOR   Y0, Y2, Y2
+	VPXOR   Y1, Y3, Y3
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VPXOR   Y4, Y6, Y6
+	VPXOR   Y5, Y7, Y7
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y5, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y6, (AX)
+	VMOVDQU Y7, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JNZ     loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx2_0(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx2_0(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), CX
+	MOVQ         table02+48(FP), DX
+	MOVQ         $0x0000000f, BX
+	MOVQ         BX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), BX
+	MOVQ         work_base+0(FP), SI
+	MOVQ         8(SI), DI
+	XORQ         R8, R8
+	MOVQ         (SI)(R8*1), R9
+	ADDQ         BX, R8
+	MOVQ         (SI)(R8*1), R10
+	ADDQ         BX, R8
+	MOVQ         (SI)(R8*1), R11
+	ADDQ         BX, R8
+	MOVQ         (SI)(R8*1), BX
+
+loop:
+	VMOVDQU        (R9), Y1
+	VMOVDQU        32(R9), Y2
+	VMOVDQU        (R10), Y3
+	VMOVDQU        32(R10), Y4
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VPSRLQ         $0x04, Y3, Y6
+	VPAND          Y0, Y3, Y5
+	VPAND          Y0, Y6, Y6
+	VBROADCASTI128 (AX), Y7
+	VBROADCASTI128 64(AX), Y8
+	VPSHUFB        Y5, Y7, Y7
+	VPSHUFB        Y5, Y8, Y5
+	VBROADCASTI128 16(AX), Y8
+	VBROADCASTI128 80(AX), Y9
+	VPSHUFB        Y6, Y8, Y8
+	VPSHUFB        Y6, Y9, Y6
+	VPXOR          Y7, Y8, Y7
+	VPXOR          Y5, Y6, Y5
+	VPAND          Y4, Y0, Y6
+	VPSRLQ         $0x04, Y4, Y8
+	VPAND          Y0, Y8, Y8
+	VBROADCASTI128 32(AX), Y9
+	VBROADCASTI128 96(AX), Y10
+	VPSHUFB        Y6, Y9, Y9
+	VPSHUFB        Y6, Y10, Y6
+	VPXOR          Y7, Y9, Y7
+	VPXOR          Y5, Y6, Y5
+	VBROADCASTI128 48(AX), Y9
+	VBROADCASTI128 112(AX), Y6
+	VPSHUFB        Y8, Y9, Y9
+	VPSHUFB        Y8, Y6, Y6
+	XOR3WAY(     $0x00, Y7, Y9, Y1)
+	XOR3WAY(     $0x00, Y5, Y6, Y2)
+	VMOVDQU        (R11), Y5
+	VMOVDQU        32(R11), Y6
+	VMOVDQU        (BX), Y7
+	VMOVDQU        32(BX), Y8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y5)
+	XOR3WAY(     $0x00, Y9, Y10, Y6)
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (DX), Y11
+	VBROADCASTI128 64(DX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(DX), Y12
+	VBROADCASTI128 80(DX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(DX), Y13
+	VBROADCASTI128 96(DX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(DX), Y13
+	VBROADCASTI128 112(DX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (DX), Y11
+	VBROADCASTI128 64(DX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(DX), Y12
+	VBROADCASTI128 80(DX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(DX), Y13
+	VBROADCASTI128 96(DX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(DX), Y13
+	VBROADCASTI128 112(DX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y3)
+	XOR3WAY(     $0x00, Y9, Y10, Y4)
+	VMOVDQU        Y1, (R9)
+	VMOVDQU        Y2, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y3, (R10)
+	VMOVDQU        Y4, 32(R10)
+	ADDQ           $0x40, R10
+	VMOVDQU        Y5, (R11)
+	VMOVDQU        Y6, 32(R11)
+	ADDQ           $0x40, R11
+	VMOVDQU        Y7, (BX)
+	VMOVDQU        Y8, 32(BX)
+	ADDQ           $0x40, BX
+	SUBQ           $0x40, DI
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx2_0(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx2_0(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), CX
+	MOVQ         table02+48(FP), DX
+	MOVQ         $0x0000000f, BX
+	MOVQ         BX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), BX
+	MOVQ         work_base+0(FP), SI
+	MOVQ         8(SI), DI
+	XORQ         R8, R8
+	MOVQ         (SI)(R8*1), R9
+	ADDQ         BX, R8
+	MOVQ         (SI)(R8*1), R10
+	ADDQ         BX, R8
+	MOVQ         (SI)(R8*1), R11
+	ADDQ         BX, R8
+	MOVQ         (SI)(R8*1), BX
+
+loop:
+	VMOVDQU        (R9), Y1
+	VMOVDQU        32(R9), Y2
+	VMOVDQU        (R11), Y5
+	VMOVDQU        32(R11), Y6
+	VMOVDQU        (R10), Y3
+	VMOVDQU        32(R10), Y4
+	VMOVDQU        (BX), Y7
+	VMOVDQU        32(BX), Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (DX), Y11
+	VBROADCASTI128 64(DX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(DX), Y12
+	VBROADCASTI128 80(DX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(DX), Y13
+	VBROADCASTI128 96(DX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(DX), Y13
+	VBROADCASTI128 112(DX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (DX), Y11
+	VBROADCASTI128 64(DX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(DX), Y12
+	VBROADCASTI128 80(DX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(DX), Y13
+	VBROADCASTI128 96(DX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(DX), Y13
+	VBROADCASTI128 112(DX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y3)
+	XOR3WAY(     $0x00, Y9, Y10, Y4)
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y3, Y10
+	VPAND          Y0, Y3, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y4, Y0, Y10
+	VPSRLQ         $0x04, Y4, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        Y1, (R9)
+	VMOVDQU        Y2, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y3, (R10)
+	VMOVDQU        Y4, 32(R10)
+	ADDQ           $0x40, R10
+	VPSRLQ         $0x04, Y7, Y2
+	VPAND          Y0, Y7, Y1
+	VPAND          Y0, Y2, Y2
+	VBROADCASTI128 (CX), Y3
+	VBROADCASTI128 64(CX), Y4
+	VPSHUFB        Y1, Y3, Y3
+	VPSHUFB        Y1, Y4, Y1
+	VBROADCASTI128 16(CX), Y4
+	VBROADCASTI128 80(CX), Y9
+	VPSHUFB        Y2, Y4, Y4
+	VPSHUFB        Y2, Y9, Y2
+	VPXOR          Y3, Y4, Y3
+	VPXOR          Y1, Y2, Y1
+	VPAND          Y8, Y0, Y2
+	VPSRLQ         $0x04, Y8, Y4
+	VPAND          Y0, Y4, Y4
+	VBROADCASTI128 32(CX), Y9
+	VBROADCASTI128 96(CX), Y10
+	VPSHUFB        Y2, Y9, Y9
+	VPSHUFB        Y2, Y10, Y2
+	VPXOR          Y3, Y9, Y3
+	VPXOR          Y1, Y2, Y1
+	VBROADCASTI128 48(CX), Y9
+	VBROADCASTI128 112(CX), Y2
+	VPSHUFB        Y4, Y9, Y9
+	VPSHUFB        Y4, Y2, Y2
+	XOR3WAY(     $0x00, Y3, Y9, Y5)
+	XOR3WAY(     $0x00, Y1, Y2, Y6)
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VMOVDQU        Y5, (R11)
+	VMOVDQU        Y6, 32(R11)
+	ADDQ           $0x40, R11
+	VMOVDQU        Y7, (BX)
+	VMOVDQU        Y8, 32(BX)
+	ADDQ           $0x40, BX
+	SUBQ           $0x40, DI
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx2_1(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx2_1(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), CX
+	MOVQ         $0x0000000f, DX
+	MOVQ         DX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), DX
+	MOVQ         work_base+0(FP), BX
+	MOVQ         8(BX), SI
+	XORQ         DI, DI
+	MOVQ         (BX)(DI*1), R8
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R9
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R10
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), DX
+
+loop:
+	VMOVDQU        (R8), Y1
+	VMOVDQU        32(R8), Y2
+	VMOVDQU        (R9), Y3
+	VMOVDQU        32(R9), Y4
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        (R10), Y5
+	VMOVDQU        32(R10), Y6
+	VMOVDQU        (DX), Y7
+	VMOVDQU        32(DX), Y8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y5)
+	XOR3WAY(     $0x00, Y9, Y10, Y6)
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y3)
+	XOR3WAY(     $0x00, Y9, Y10, Y4)
+	VMOVDQU        Y1, (R8)
+	VMOVDQU        Y2, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y3, (R9)
+	VMOVDQU        Y4, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y5, (R10)
+	VMOVDQU        Y6, 32(R10)
+	ADDQ           $0x40, R10
+	VMOVDQU        Y7, (DX)
+	VMOVDQU        Y8, 32(DX)
+	ADDQ           $0x40, DX
+	SUBQ           $0x40, SI
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx2_1(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx2_1(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), CX
+	MOVQ         table02+48(FP), DX
+	MOVQ         $0x0000000f, DX
+	MOVQ         DX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), DX
+	MOVQ         work_base+0(FP), BX
+	MOVQ         8(BX), SI
+	XORQ         DI, DI
+	MOVQ         (BX)(DI*1), R8
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R9
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R10
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), DX
+
+loop:
+	VMOVDQU        (R8), Y1
+	VMOVDQU        32(R8), Y2
+	VMOVDQU        (R10), Y5
+	VMOVDQU        32(R10), Y6
+	VMOVDQU        (R9), Y3
+	VMOVDQU        32(R9), Y4
+	VMOVDQU        (DX), Y7
+	VMOVDQU        32(DX), Y8
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y3, Y10
+	VPAND          Y0, Y3, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y4, Y0, Y10
+	VPSRLQ         $0x04, Y4, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        Y1, (R8)
+	VMOVDQU        Y2, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y3, (R9)
+	VMOVDQU        Y4, 32(R9)
+	ADDQ           $0x40, R9
+	VPSRLQ         $0x04, Y7, Y2
+	VPAND          Y0, Y7, Y1
+	VPAND          Y0, Y2, Y2
+	VBROADCASTI128 (CX), Y3
+	VBROADCASTI128 64(CX), Y4
+	VPSHUFB        Y1, Y3, Y3
+	VPSHUFB        Y1, Y4, Y1
+	VBROADCASTI128 16(CX), Y4
+	VBROADCASTI128 80(CX), Y9
+	VPSHUFB        Y2, Y4, Y4
+	VPSHUFB        Y2, Y9, Y2
+	VPXOR          Y3, Y4, Y3
+	VPXOR          Y1, Y2, Y1
+	VPAND          Y8, Y0, Y2
+	VPSRLQ         $0x04, Y8, Y4
+	VPAND          Y0, Y4, Y4
+	VBROADCASTI128 32(CX), Y9
+	VBROADCASTI128 96(CX), Y10
+	VPSHUFB        Y2, Y9, Y9
+	VPSHUFB        Y2, Y10, Y2
+	VPXOR          Y3, Y9, Y3
+	VPXOR          Y1, Y2, Y1
+	VBROADCASTI128 48(CX), Y9
+	VBROADCASTI128 112(CX), Y2
+	VPSHUFB        Y4, Y9, Y9
+	VPSHUFB        Y4, Y2, Y2
+	XOR3WAY(     $0x00, Y3, Y9, Y5)
+	XOR3WAY(     $0x00, Y1, Y2, Y6)
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VMOVDQU        Y5, (R10)
+	VMOVDQU        Y6, 32(R10)
+	ADDQ           $0x40, R10
+	VMOVDQU        Y7, (DX)
+	VMOVDQU        Y8, 32(DX)
+	ADDQ           $0x40, DX
+	SUBQ           $0x40, SI
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx2_2(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx2_2(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), CX
+	MOVQ         table02+48(FP), CX
+	MOVQ         $0x0000000f, DX
+	MOVQ         DX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), DX
+	MOVQ         work_base+0(FP), BX
+	MOVQ         8(BX), SI
+	XORQ         DI, DI
+	MOVQ         (BX)(DI*1), R8
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R9
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R10
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), DX
+
+loop:
+	VMOVDQU        (R8), Y1
+	VMOVDQU        32(R8), Y2
+	VMOVDQU        (R9), Y3
+	VMOVDQU        32(R9), Y4
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VPSRLQ         $0x04, Y3, Y6
+	VPAND          Y0, Y3, Y5
+	VPAND          Y0, Y6, Y6
+	VBROADCASTI128 (AX), Y7
+	VBROADCASTI128 64(AX), Y8
+	VPSHUFB        Y5, Y7, Y7
+	VPSHUFB        Y5, Y8, Y5
+	VBROADCASTI128 16(AX), Y8
+	VBROADCASTI128 80(AX), Y9
+	VPSHUFB        Y6, Y8, Y8
+	VPSHUFB        Y6, Y9, Y6
+	VPXOR          Y7, Y8, Y7
+	VPXOR          Y5, Y6, Y5
+	VPAND          Y4, Y0, Y6
+	VPSRLQ         $0x04, Y4, Y8
+	VPAND          Y0, Y8, Y8
+	VBROADCASTI128 32(AX), Y9
+	VBROADCASTI128 96(AX), Y10
+	VPSHUFB        Y6, Y9, Y9
+	VPSHUFB        Y6, Y10, Y6
+	VPXOR          Y7, Y9, Y7
+	VPXOR          Y5, Y6, Y5
+	VBROADCASTI128 48(AX), Y9
+	VBROADCASTI128 112(AX), Y6
+	VPSHUFB        Y8, Y9, Y9
+	VPSHUFB        Y8, Y6, Y6
+	XOR3WAY(     $0x00, Y7, Y9, Y1)
+	XOR3WAY(     $0x00, Y5, Y6, Y2)
+	VMOVDQU        (R10), Y5
+	VMOVDQU        32(R10), Y6
+	VMOVDQU        (DX), Y7
+	VMOVDQU        32(DX), Y8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y3)
+	XOR3WAY(     $0x00, Y9, Y10, Y4)
+	VMOVDQU        Y1, (R8)
+	VMOVDQU        Y2, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y3, (R9)
+	VMOVDQU        Y4, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y5, (R10)
+	VMOVDQU        Y6, 32(R10)
+	ADDQ           $0x40, R10
+	VMOVDQU        Y7, (DX)
+	VMOVDQU        Y8, 32(DX)
+	ADDQ           $0x40, DX
+	SUBQ           $0x40, SI
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx2_2(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx2_2(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), CX
+	MOVQ         $0x0000000f, DX
+	MOVQ         DX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), DX
+	MOVQ         work_base+0(FP), BX
+	MOVQ         8(BX), SI
+	XORQ         DI, DI
+	MOVQ         (BX)(DI*1), R8
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R9
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R10
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), DX
+
+loop:
+	VMOVDQU        (R8), Y1
+	VMOVDQU        32(R8), Y2
+	VMOVDQU        (R10), Y5
+	VMOVDQU        32(R10), Y6
+	VMOVDQU        (R9), Y3
+	VMOVDQU        32(R9), Y4
+	VMOVDQU        (DX), Y7
+	VMOVDQU        32(DX), Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y3)
+	XOR3WAY(     $0x00, Y9, Y10, Y4)
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        Y1, (R8)
+	VMOVDQU        Y2, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y3, (R9)
+	VMOVDQU        Y4, 32(R9)
+	ADDQ           $0x40, R9
+	VPSRLQ         $0x04, Y7, Y2
+	VPAND          Y0, Y7, Y1
+	VPAND          Y0, Y2, Y2
+	VBROADCASTI128 (AX), Y3
+	VBROADCASTI128 64(AX), Y4
+	VPSHUFB        Y1, Y3, Y3
+	VPSHUFB        Y1, Y4, Y1
+	VBROADCASTI128 16(AX), Y4
+	VBROADCASTI128 80(AX), Y9
+	VPSHUFB        Y2, Y4, Y4
+	VPSHUFB        Y2, Y9, Y2
+	VPXOR          Y3, Y4, Y3
+	VPXOR          Y1, Y2, Y1
+	VPAND          Y8, Y0, Y2
+	VPSRLQ         $0x04, Y8, Y4
+	VPAND          Y0, Y4, Y4
+	VBROADCASTI128 32(AX), Y9
+	VBROADCASTI128 96(AX), Y10
+	VPSHUFB        Y2, Y9, Y9
+	VPSHUFB        Y2, Y10, Y2
+	VPXOR          Y3, Y9, Y3
+	VPXOR          Y1, Y2, Y1
+	VBROADCASTI128 48(AX), Y9
+	VBROADCASTI128 112(AX), Y2
+	VPSHUFB        Y4, Y9, Y9
+	VPSHUFB        Y4, Y2, Y2
+	XOR3WAY(     $0x00, Y3, Y9, Y5)
+	XOR3WAY(     $0x00, Y1, Y2, Y6)
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VMOVDQU        Y5, (R10)
+	VMOVDQU        Y6, 32(R10)
+	ADDQ           $0x40, R10
+	VMOVDQU        Y7, (DX)
+	VMOVDQU        Y8, 32(DX)
+	ADDQ           $0x40, DX
+	SUBQ           $0x40, SI
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx2_3(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx2_3(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), AX
+	MOVQ         $0x0000000f, CX
+	MOVQ         CX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), CX
+	MOVQ         work_base+0(FP), DX
+	MOVQ         8(DX), BX
+	XORQ         SI, SI
+	MOVQ         (DX)(SI*1), DI
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R8
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R9
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), CX
+
+loop:
+	VMOVDQU        (DI), Y1
+	VMOVDQU        32(DI), Y2
+	VMOVDQU        (R8), Y3
+	VMOVDQU        32(R8), Y4
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        (R9), Y5
+	VMOVDQU        32(R9), Y6
+	VMOVDQU        (CX), Y7
+	VMOVDQU        32(CX), Y8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y3)
+	XOR3WAY(     $0x00, Y9, Y10, Y4)
+	VMOVDQU        Y1, (DI)
+	VMOVDQU        Y2, 32(DI)
+	ADDQ           $0x40, DI
+	VMOVDQU        Y3, (R8)
+	VMOVDQU        Y4, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y5, (R9)
+	VMOVDQU        Y6, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y7, (CX)
+	VMOVDQU        Y8, 32(CX)
+	ADDQ           $0x40, CX
+	SUBQ           $0x40, BX
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx2_3(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx2_3(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), CX
+	MOVQ         $0x0000000f, CX
+	MOVQ         CX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), CX
+	MOVQ         work_base+0(FP), DX
+	MOVQ         8(DX), BX
+	XORQ         SI, SI
+	MOVQ         (DX)(SI*1), DI
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R8
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R9
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), CX
+
+loop:
+	VMOVDQU        (DI), Y1
+	VMOVDQU        32(DI), Y2
+	VMOVDQU        (R9), Y5
+	VMOVDQU        32(R9), Y6
+	VMOVDQU        (R8), Y3
+	VMOVDQU        32(R8), Y4
+	VMOVDQU        (CX), Y7
+	VMOVDQU        32(CX), Y8
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        Y1, (DI)
+	VMOVDQU        Y2, 32(DI)
+	ADDQ           $0x40, DI
+	VMOVDQU        Y3, (R8)
+	VMOVDQU        Y4, 32(R8)
+	ADDQ           $0x40, R8
+	VPSRLQ         $0x04, Y7, Y2
+	VPAND          Y0, Y7, Y1
+	VPAND          Y0, Y2, Y2
+	VBROADCASTI128 (AX), Y3
+	VBROADCASTI128 64(AX), Y4
+	VPSHUFB        Y1, Y3, Y3
+	VPSHUFB        Y1, Y4, Y1
+	VBROADCASTI128 16(AX), Y4
+	VBROADCASTI128 80(AX), Y9
+	VPSHUFB        Y2, Y4, Y4
+	VPSHUFB        Y2, Y9, Y2
+	VPXOR          Y3, Y4, Y3
+	VPXOR          Y1, Y2, Y1
+	VPAND          Y8, Y0, Y2
+	VPSRLQ         $0x04, Y8, Y4
+	VPAND          Y0, Y4, Y4
+	VBROADCASTI128 32(AX), Y9
+	VBROADCASTI128 96(AX), Y10
+	VPSHUFB        Y2, Y9, Y9
+	VPSHUFB        Y2, Y10, Y2
+	VPXOR          Y3, Y9, Y3
+	VPXOR          Y1, Y2, Y1
+	VBROADCASTI128 48(AX), Y9
+	VBROADCASTI128 112(AX), Y2
+	VPSHUFB        Y4, Y9, Y9
+	VPSHUFB        Y4, Y2, Y2
+	XOR3WAY(     $0x00, Y3, Y9, Y5)
+	XOR3WAY(     $0x00, Y1, Y2, Y6)
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VMOVDQU        Y5, (R9)
+	VMOVDQU        Y6, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y7, (CX)
+	VMOVDQU        Y8, 32(CX)
+	ADDQ           $0x40, CX
+	SUBQ           $0x40, BX
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx2_4(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx2_4(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), CX
+	MOVQ         table02+48(FP), DX
+	MOVQ         $0x0000000f, DX
+	MOVQ         DX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), DX
+	MOVQ         work_base+0(FP), BX
+	MOVQ         8(BX), SI
+	XORQ         DI, DI
+	MOVQ         (BX)(DI*1), R8
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R9
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R10
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), DX
+
+loop:
+	VMOVDQU        (R8), Y1
+	VMOVDQU        32(R8), Y2
+	VMOVDQU        (R9), Y3
+	VMOVDQU        32(R9), Y4
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VPSRLQ         $0x04, Y3, Y6
+	VPAND          Y0, Y3, Y5
+	VPAND          Y0, Y6, Y6
+	VBROADCASTI128 (AX), Y7
+	VBROADCASTI128 64(AX), Y8
+	VPSHUFB        Y5, Y7, Y7
+	VPSHUFB        Y5, Y8, Y5
+	VBROADCASTI128 16(AX), Y8
+	VBROADCASTI128 80(AX), Y9
+	VPSHUFB        Y6, Y8, Y8
+	VPSHUFB        Y6, Y9, Y6
+	VPXOR          Y7, Y8, Y7
+	VPXOR          Y5, Y6, Y5
+	VPAND          Y4, Y0, Y6
+	VPSRLQ         $0x04, Y4, Y8
+	VPAND          Y0, Y8, Y8
+	VBROADCASTI128 32(AX), Y9
+	VBROADCASTI128 96(AX), Y10
+	VPSHUFB        Y6, Y9, Y9
+	VPSHUFB        Y6, Y10, Y6
+	VPXOR          Y7, Y9, Y7
+	VPXOR          Y5, Y6, Y5
+	VBROADCASTI128 48(AX), Y9
+	VBROADCASTI128 112(AX), Y6
+	VPSHUFB        Y8, Y9, Y9
+	VPSHUFB        Y8, Y6, Y6
+	XOR3WAY(     $0x00, Y7, Y9, Y1)
+	XOR3WAY(     $0x00, Y5, Y6, Y2)
+	VMOVDQU        (R10), Y5
+	VMOVDQU        32(R10), Y6
+	VMOVDQU        (DX), Y7
+	VMOVDQU        32(DX), Y8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y5)
+	XOR3WAY(     $0x00, Y9, Y10, Y6)
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VMOVDQU        Y1, (R8)
+	VMOVDQU        Y2, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y3, (R9)
+	VMOVDQU        Y4, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y5, (R10)
+	VMOVDQU        Y6, 32(R10)
+	ADDQ           $0x40, R10
+	VMOVDQU        Y7, (DX)
+	VMOVDQU        Y8, 32(DX)
+	ADDQ           $0x40, DX
+	SUBQ           $0x40, SI
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx2_4(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx2_4(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), CX
+	MOVQ         table02+48(FP), CX
+	MOVQ         $0x0000000f, DX
+	MOVQ         DX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), DX
+	MOVQ         work_base+0(FP), BX
+	MOVQ         8(BX), SI
+	XORQ         DI, DI
+	MOVQ         (BX)(DI*1), R8
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R9
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), R10
+	ADDQ         DX, DI
+	MOVQ         (BX)(DI*1), DX
+
+loop:
+	VMOVDQU        (R8), Y1
+	VMOVDQU        32(R8), Y2
+	VMOVDQU        (R10), Y5
+	VMOVDQU        32(R10), Y6
+	VMOVDQU        (R9), Y3
+	VMOVDQU        32(R9), Y4
+	VMOVDQU        (DX), Y7
+	VMOVDQU        32(DX), Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (CX), Y11
+	VBROADCASTI128 64(CX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(CX), Y12
+	VBROADCASTI128 80(CX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(CX), Y13
+	VBROADCASTI128 96(CX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(CX), Y13
+	VBROADCASTI128 112(CX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y3)
+	XOR3WAY(     $0x00, Y9, Y10, Y4)
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y3, Y10
+	VPAND          Y0, Y3, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y4, Y0, Y10
+	VPSRLQ         $0x04, Y4, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        Y1, (R8)
+	VMOVDQU        Y2, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y3, (R9)
+	VMOVDQU        Y4, 32(R9)
+	ADDQ           $0x40, R9
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VMOVDQU        Y5, (R10)
+	VMOVDQU        Y6, 32(R10)
+	ADDQ           $0x40, R10
+	VMOVDQU        Y7, (DX)
+	VMOVDQU        Y8, 32(DX)
+	ADDQ           $0x40, DX
+	SUBQ           $0x40, SI
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx2_5(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx2_5(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), CX
+	MOVQ         $0x0000000f, CX
+	MOVQ         CX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), CX
+	MOVQ         work_base+0(FP), DX
+	MOVQ         8(DX), BX
+	XORQ         SI, SI
+	MOVQ         (DX)(SI*1), DI
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R8
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R9
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), CX
+
+loop:
+	VMOVDQU        (DI), Y1
+	VMOVDQU        32(DI), Y2
+	VMOVDQU        (R8), Y3
+	VMOVDQU        32(R8), Y4
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        (R9), Y5
+	VMOVDQU        32(R9), Y6
+	VMOVDQU        (CX), Y7
+	VMOVDQU        32(CX), Y8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y5)
+	XOR3WAY(     $0x00, Y9, Y10, Y6)
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VMOVDQU        Y1, (DI)
+	VMOVDQU        Y2, 32(DI)
+	ADDQ           $0x40, DI
+	VMOVDQU        Y3, (R8)
+	VMOVDQU        Y4, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y5, (R9)
+	VMOVDQU        Y6, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y7, (CX)
+	VMOVDQU        Y8, 32(CX)
+	ADDQ           $0x40, CX
+	SUBQ           $0x40, BX
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx2_5(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx2_5(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), CX
+	MOVQ         table02+48(FP), CX
+	MOVQ         $0x0000000f, CX
+	MOVQ         CX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), CX
+	MOVQ         work_base+0(FP), DX
+	MOVQ         8(DX), BX
+	XORQ         SI, SI
+	MOVQ         (DX)(SI*1), DI
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R8
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R9
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), CX
+
+loop:
+	VMOVDQU        (DI), Y1
+	VMOVDQU        32(DI), Y2
+	VMOVDQU        (R9), Y5
+	VMOVDQU        32(R9), Y6
+	VMOVDQU        (R8), Y3
+	VMOVDQU        32(R8), Y4
+	VMOVDQU        (CX), Y7
+	VMOVDQU        32(CX), Y8
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPSRLQ         $0x04, Y3, Y10
+	VPAND          Y0, Y3, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y4, Y0, Y10
+	VPSRLQ         $0x04, Y4, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        Y1, (DI)
+	VMOVDQU        Y2, 32(DI)
+	ADDQ           $0x40, DI
+	VMOVDQU        Y3, (R8)
+	VMOVDQU        Y4, 32(R8)
+	ADDQ           $0x40, R8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VMOVDQU        Y5, (R9)
+	VMOVDQU        Y6, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y7, (CX)
+	VMOVDQU        Y8, 32(CX)
+	ADDQ           $0x40, CX
+	SUBQ           $0x40, BX
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx2_6(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT4_avx2_6(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), CX
+	MOVQ         table02+48(FP), CX
+	MOVQ         $0x0000000f, CX
+	MOVQ         CX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), CX
+	MOVQ         work_base+0(FP), DX
+	MOVQ         8(DX), BX
+	XORQ         SI, SI
+	MOVQ         (DX)(SI*1), DI
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R8
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R9
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), CX
+
+loop:
+	VMOVDQU        (DI), Y1
+	VMOVDQU        32(DI), Y2
+	VMOVDQU        (R8), Y3
+	VMOVDQU        32(R8), Y4
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VPSRLQ         $0x04, Y3, Y6
+	VPAND          Y0, Y3, Y5
+	VPAND          Y0, Y6, Y6
+	VBROADCASTI128 (AX), Y7
+	VBROADCASTI128 64(AX), Y8
+	VPSHUFB        Y5, Y7, Y7
+	VPSHUFB        Y5, Y8, Y5
+	VBROADCASTI128 16(AX), Y8
+	VBROADCASTI128 80(AX), Y9
+	VPSHUFB        Y6, Y8, Y8
+	VPSHUFB        Y6, Y9, Y6
+	VPXOR          Y7, Y8, Y7
+	VPXOR          Y5, Y6, Y5
+	VPAND          Y4, Y0, Y6
+	VPSRLQ         $0x04, Y4, Y8
+	VPAND          Y0, Y8, Y8
+	VBROADCASTI128 32(AX), Y9
+	VBROADCASTI128 96(AX), Y10
+	VPSHUFB        Y6, Y9, Y9
+	VPSHUFB        Y6, Y10, Y6
+	VPXOR          Y7, Y9, Y7
+	VPXOR          Y5, Y6, Y5
+	VBROADCASTI128 48(AX), Y9
+	VBROADCASTI128 112(AX), Y6
+	VPSHUFB        Y8, Y9, Y9
+	VPSHUFB        Y8, Y6, Y6
+	XOR3WAY(     $0x00, Y7, Y9, Y1)
+	XOR3WAY(     $0x00, Y5, Y6, Y2)
+	VMOVDQU        (R9), Y5
+	VMOVDQU        32(R9), Y6
+	VMOVDQU        (CX), Y7
+	VMOVDQU        32(CX), Y8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VMOVDQU        Y1, (DI)
+	VMOVDQU        Y2, 32(DI)
+	ADDQ           $0x40, DI
+	VMOVDQU        Y3, (R8)
+	VMOVDQU        Y4, 32(R8)
+	ADDQ           $0x40, R8
+	VMOVDQU        Y5, (R9)
+	VMOVDQU        Y6, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y7, (CX)
+	VMOVDQU        Y8, 32(CX)
+	ADDQ           $0x40, CX
+	SUBQ           $0x40, BX
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx2_6(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT4_avx2_6(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), AX
+	MOVQ         $0x0000000f, CX
+	MOVQ         CX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), CX
+	MOVQ         work_base+0(FP), DX
+	MOVQ         8(DX), BX
+	XORQ         SI, SI
+	MOVQ         (DX)(SI*1), DI
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R8
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), R9
+	ADDQ         CX, SI
+	MOVQ         (DX)(SI*1), CX
+
+loop:
+	VMOVDQU        (DI), Y1
+	VMOVDQU        32(DI), Y2
+	VMOVDQU        (R9), Y5
+	VMOVDQU        32(R9), Y6
+	VMOVDQU        (R8), Y3
+	VMOVDQU        32(R8), Y4
+	VMOVDQU        (CX), Y7
+	VMOVDQU        32(CX), Y8
+	VPSRLQ         $0x04, Y5, Y10
+	VPAND          Y0, Y5, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y6, Y0, Y10
+	VPSRLQ         $0x04, Y6, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y1)
+	XOR3WAY(     $0x00, Y9, Y10, Y2)
+	VPSRLQ         $0x04, Y7, Y10
+	VPAND          Y0, Y7, Y9
+	VPAND          Y0, Y10, Y10
+	VBROADCASTI128 (AX), Y11
+	VBROADCASTI128 64(AX), Y12
+	VPSHUFB        Y9, Y11, Y11
+	VPSHUFB        Y9, Y12, Y9
+	VBROADCASTI128 16(AX), Y12
+	VBROADCASTI128 80(AX), Y13
+	VPSHUFB        Y10, Y12, Y12
+	VPSHUFB        Y10, Y13, Y10
+	VPXOR          Y11, Y12, Y11
+	VPXOR          Y9, Y10, Y9
+	VPAND          Y8, Y0, Y10
+	VPSRLQ         $0x04, Y8, Y12
+	VPAND          Y0, Y12, Y12
+	VBROADCASTI128 32(AX), Y13
+	VBROADCASTI128 96(AX), Y14
+	VPSHUFB        Y10, Y13, Y13
+	VPSHUFB        Y10, Y14, Y10
+	VPXOR          Y11, Y13, Y11
+	VPXOR          Y9, Y10, Y9
+	VBROADCASTI128 48(AX), Y13
+	VBROADCASTI128 112(AX), Y10
+	VPSHUFB        Y12, Y13, Y13
+	VPSHUFB        Y12, Y10, Y10
+	XOR3WAY(     $0x00, Y11, Y13, Y3)
+	XOR3WAY(     $0x00, Y9, Y10, Y4)
+	VPXOR          Y1, Y5, Y5
+	VPXOR          Y2, Y6, Y6
+	VPXOR          Y3, Y7, Y7
+	VPXOR          Y4, Y8, Y8
+	VPXOR          Y1, Y3, Y3
+	VPXOR          Y2, Y4, Y4
+	VMOVDQU        Y1, (DI)
+	VMOVDQU        Y2, 32(DI)
+	ADDQ           $0x40, DI
+	VMOVDQU        Y3, (R8)
+	VMOVDQU        Y4, 32(R8)
+	ADDQ           $0x40, R8
+	VPXOR          Y5, Y7, Y7
+	VPXOR          Y6, Y8, Y8
+	VMOVDQU        Y5, (R9)
+	VMOVDQU        Y6, 32(R9)
+	ADDQ           $0x40, R9
+	VMOVDQU        Y7, (CX)
+	VMOVDQU        Y8, 32(CX)
+	ADDQ           $0x40, CX
+	SUBQ           $0x40, BX
+	JNZ            loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT4_avx2_7(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·ifftDIT4_avx2_7(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), AX
+	MOVQ         $0x0000000f, AX
+	MOVQ         AX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), AX
+	MOVQ         work_base+0(FP), CX
+	MOVQ         8(CX), DX
+	XORQ         BX, BX
+	MOVQ         (CX)(BX*1), SI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), DI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), R8
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), AX
+
+loop:
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y1
+	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y3
+	VPXOR   Y0, Y2, Y2
+	VPXOR   Y1, Y3, Y3
+	VMOVDQU (R8), Y4
+	VMOVDQU 32(R8), Y5
+	VMOVDQU (AX), Y6
+	VMOVDQU 32(AX), Y7
+	VPXOR   Y4, Y6, Y6
+	VPXOR   Y5, Y7, Y7
+	VPXOR   Y0, Y4, Y4
+	VPXOR   Y1, Y5, Y5
+	VPXOR   Y2, Y6, Y6
+	VPXOR   Y3, Y7, Y7
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y5, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y6, (AX)
+	VMOVDQU Y7, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JNZ     loop
+	VZEROUPPER
+	RET
+
+// func fftDIT4_avx2_7(work [][]byte, dist int, table01 *[128]uint8, table23 *[128]uint8, table02 *[128]uint8)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·fftDIT4_avx2_7(SB), NOSPLIT, $0-56
+	// dist must be multiplied by 24 (size of slice header)
+	MOVQ         table01+32(FP), AX
+	MOVQ         table23+40(FP), AX
+	MOVQ         table02+48(FP), AX
+	MOVQ         $0x0000000f, AX
+	MOVQ         AX, X0
+	VPBROADCASTB X0, Y0
+	MOVQ         dist+24(FP), AX
+	MOVQ         work_base+0(FP), CX
+	MOVQ         8(CX), DX
+	XORQ         BX, BX
+	MOVQ         (CX)(BX*1), SI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), DI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), R8
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), AX
+
+loop:
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y1
+	VMOVDQU (R8), Y4
+	VMOVDQU 32(R8), Y5
+	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y3
+	VMOVDQU (AX), Y6
+	VMOVDQU 32(AX), Y7
+	VPXOR   Y0, Y4, Y4
+	VPXOR   Y1, Y5, Y5
+	VPXOR   Y2, Y6, Y6
+	VPXOR   Y3, Y7, Y7
+	VPXOR   Y0, Y2, Y2
+	VPXOR   Y1, Y3, Y3
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VPXOR   Y4, Y6, Y6
+	VPXOR   Y5, Y7, Y7
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y5, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y6, (AX)
+	VMOVDQU Y7, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JNZ     loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT2_ssse3(x []byte, y []byte, table *[128]uint8)
+// Requires: SSE, SSE2, SSSE3
+TEXT Β·ifftDIT2_ssse3(SB), NOSPLIT, $0-56
+	MOVQ   table+48(FP), AX
+	MOVUPS (AX), X0
+	MOVUPS 64(AX), X1
+	MOVUPS 16(AX), X2
+	MOVUPS 80(AX), X3
+	MOVUPS 32(AX), X4
+	MOVUPS 96(AX), X5
+	XORPS  X6, X6
+	MOVQ   $0x0000000f, CX
+	MOVQ   CX, X7
+	PSHUFB X6, X7
+	MOVQ   x_len+8(FP), CX
+	MOVQ   x_base+0(FP), DX
+	MOVQ   y_base+24(FP), BX
+
+loop:
+	MOVUPS (DX), X6
+	MOVUPS 32(DX), X8
+	MOVUPS (BX), X9
+	MOVUPS 32(BX), X10
+	PXOR   X6, X9
+	PXOR   X8, X10
+	MOVUPS X9, (BX)
+	MOVUPS X10, 32(BX)
+	MOVAPS X9, X11
+	PSRLQ  $0x04, X11
+	MOVAPS X9, X9
+	PAND   X7, X9
+	PAND   X7, X11
+	MOVUPS X0, X12
+	MOVUPS X1, X13
+	PSHUFB X9, X12
+	PSHUFB X9, X13
+	MOVUPS X2, X9
+	MOVUPS X3, X14
+	PSHUFB X11, X9
+	PSHUFB X11, X14
+	PXOR   X9, X12
+	PXOR   X14, X13
+	MOVAPS X10, X9
+	MOVAPS X10, X10
+	PAND   X7, X9
+	PSRLQ  $0x04, X10
+	PAND   X7, X10
+	MOVUPS X4, X11
+	MOVUPS X5, X14
+	PSHUFB X9, X11
+	PSHUFB X9, X14
+	PXOR   X11, X12
+	PXOR   X14, X13
+	MOVUPS 48(AX), X11
+	MOVUPS 112(AX), X14
+	PSHUFB X10, X11
+	PSHUFB X10, X14
+	PXOR   X11, X12
+	PXOR   X14, X13
+	PXOR   X12, X6
+	PXOR   X13, X8
+	MOVUPS X6, (DX)
+	MOVUPS X8, 32(DX)
+	MOVUPS 16(DX), X6
+	MOVUPS 48(DX), X8
+	MOVUPS 16(BX), X9
+	MOVUPS 48(BX), X10
+	PXOR   X6, X9
+	PXOR   X8, X10
+	MOVUPS X9, 16(BX)
+	MOVUPS X10, 48(BX)
+	MOVAPS X9, X11
+	PSRLQ  $0x04, X11
+	MOVAPS X9, X9
+	PAND   X7, X9
+	PAND   X7, X11
+	MOVUPS X0, X12
+	MOVUPS X1, X13
+	PSHUFB X9, X12
+	PSHUFB X9, X13
+	MOVUPS X2, X9
+	MOVUPS X3, X14
+	PSHUFB X11, X9
+	PSHUFB X11, X14
+	PXOR   X9, X12
+	PXOR   X14, X13
+	MOVAPS X10, X9
+	MOVAPS X10, X10
+	PAND   X7, X9
+	PSRLQ  $0x04, X10
+	PAND   X7, X10
+	MOVUPS X4, X11
+	MOVUPS X5, X14
+	PSHUFB X9, X11
+	PSHUFB X9, X14
+	PXOR   X11, X12
+	PXOR   X14, X13
+	MOVUPS 48(AX), X11
+	MOVUPS 112(AX), X14
+	PSHUFB X10, X11
+	PSHUFB X10, X14
+	PXOR   X11, X12
+	PXOR   X14, X13
+	PXOR   X12, X6
+	PXOR   X13, X8
+	MOVUPS X6, 16(DX)
+	MOVUPS X8, 48(DX)
+	ADDQ   $0x40, DX
+	ADDQ   $0x40, BX
+	SUBQ   $0x40, CX
+	JNZ    loop
+	RET
+
+// func fftDIT2_ssse3(x []byte, y []byte, table *[128]uint8)
+// Requires: SSE, SSE2, SSSE3
+TEXT Β·fftDIT2_ssse3(SB), NOSPLIT, $0-56
+	MOVQ   table+48(FP), AX
+	MOVUPS (AX), X0
+	MOVUPS 64(AX), X1
+	MOVUPS 16(AX), X2
+	MOVUPS 80(AX), X3
+	MOVUPS 32(AX), X4
+	MOVUPS 96(AX), X5
+	XORPS  X6, X6
+	MOVQ   $0x0000000f, CX
+	MOVQ   CX, X7
+	PSHUFB X6, X7
+	MOVQ   x_len+8(FP), CX
+	MOVQ   x_base+0(FP), DX
+	MOVQ   y_base+24(FP), BX
+
+loop:
+	MOVUPS (BX), X9
+	MOVUPS 32(BX), X10
+	MOVAPS X9, X8
+	PSRLQ  $0x04, X8
+	MOVAPS X9, X6
+	PAND   X7, X6
+	PAND   X7, X8
+	MOVUPS X0, X11
+	MOVUPS X1, X12
+	PSHUFB X6, X11
+	PSHUFB X6, X12
+	MOVUPS X2, X6
+	MOVUPS X3, X13
+	PSHUFB X8, X6
+	PSHUFB X8, X13
+	PXOR   X6, X11
+	PXOR   X13, X12
+	MOVAPS X10, X6
+	MOVAPS X10, X8
+	PAND   X7, X6
+	PSRLQ  $0x04, X8
+	PAND   X7, X8
+	MOVUPS X4, X13
+	MOVUPS X5, X14
+	PSHUFB X6, X13
+	PSHUFB X6, X14
+	PXOR   X13, X11
+	PXOR   X14, X12
+	MOVUPS 48(AX), X13
+	MOVUPS 112(AX), X14
+	PSHUFB X8, X13
+	PSHUFB X8, X14
+	PXOR   X13, X11
+	PXOR   X14, X12
+	MOVUPS (DX), X6
+	MOVUPS 32(DX), X8
+	PXOR   X11, X6
+	PXOR   X12, X8
+	MOVUPS X6, (DX)
+	MOVUPS X8, 32(DX)
+	PXOR   X6, X9
+	PXOR   X8, X10
+	MOVUPS X9, (BX)
+	MOVUPS X10, 32(BX)
+	MOVUPS 16(BX), X9
+	MOVUPS 48(BX), X10
+	MOVAPS X9, X8
+	PSRLQ  $0x04, X8
+	MOVAPS X9, X6
+	PAND   X7, X6
+	PAND   X7, X8
+	MOVUPS X0, X11
+	MOVUPS X1, X12
+	PSHUFB X6, X11
+	PSHUFB X6, X12
+	MOVUPS X2, X6
+	MOVUPS X3, X13
+	PSHUFB X8, X6
+	PSHUFB X8, X13
+	PXOR   X6, X11
+	PXOR   X13, X12
+	MOVAPS X10, X6
+	MOVAPS X10, X8
+	PAND   X7, X6
+	PSRLQ  $0x04, X8
+	PAND   X7, X8
+	MOVUPS X4, X13
+	MOVUPS X5, X14
+	PSHUFB X6, X13
+	PSHUFB X6, X14
+	PXOR   X13, X11
+	PXOR   X14, X12
+	MOVUPS 48(AX), X13
+	MOVUPS 112(AX), X14
+	PSHUFB X8, X13
+	PSHUFB X8, X14
+	PXOR   X13, X11
+	PXOR   X14, X12
+	MOVUPS 16(DX), X6
+	MOVUPS 48(DX), X8
+	PXOR   X11, X6
+	PXOR   X12, X8
+	MOVUPS X6, 16(DX)
+	MOVUPS X8, 48(DX)
+	PXOR   X6, X9
+	PXOR   X8, X10
+	MOVUPS X9, 16(BX)
+	MOVUPS X10, 48(BX)
+	ADDQ   $0x40, DX
+	ADDQ   $0x40, BX
+	SUBQ   $0x40, CX
+	JNZ    loop
+	RET
+
+// func mulgf16_ssse3(x []byte, y []byte, table *[128]uint8)
+// Requires: SSE, SSE2, SSSE3
+TEXT Β·mulgf16_ssse3(SB), NOSPLIT, $0-56
+	MOVQ   table+48(FP), AX
+	MOVUPS (AX), X0
+	MOVUPS 64(AX), X1
+	MOVUPS 16(AX), X2
+	MOVUPS 80(AX), X3
+	MOVUPS 32(AX), X4
+	MOVUPS 96(AX), X5
+	MOVUPS 48(AX), X6
+	MOVUPS 112(AX), X7
+	MOVQ   x_len+8(FP), AX
+	MOVQ   x_base+0(FP), CX
+	MOVQ   y_base+24(FP), DX
+	XORPS  X8, X8
+	MOVQ   $0x0000000f, BX
+	MOVQ   BX, X9
+	PSHUFB X8, X9
+
+loop:
+	MOVUPS (DX), X8
+	MOVUPS 32(DX), X10
+	MOVAPS X8, X11
+	PSRLQ  $0x04, X11
+	MOVAPS X8, X8
+	PAND   X9, X8
+	PAND   X9, X11
+	MOVUPS X0, X12
+	MOVUPS X1, X13
+	PSHUFB X8, X12
+	PSHUFB X8, X13
+	MOVUPS X2, X8
+	MOVUPS X3, X14
+	PSHUFB X11, X8
+	PSHUFB X11, X14
+	PXOR   X8, X12
+	PXOR   X14, X13
+	MOVAPS X10, X8
+	MOVAPS X10, X10
+	PAND   X9, X8
+	PSRLQ  $0x04, X10
+	PAND   X9, X10
+	MOVUPS X4, X11
+	MOVUPS X5, X14
+	PSHUFB X8, X11
+	PSHUFB X8, X14
+	PXOR   X11, X12
+	PXOR   X14, X13
+	MOVUPS X6, X11
+	MOVUPS X7, X14
+	PSHUFB X10, X11
+	PSHUFB X10, X14
+	PXOR   X11, X12
+	PXOR   X14, X13
+	MOVUPS X12, (CX)
+	MOVUPS X13, 32(CX)
+	MOVUPS 16(DX), X8
+	MOVUPS 48(DX), X10
+	MOVAPS X8, X11
+	PSRLQ  $0x04, X11
+	MOVAPS X8, X8
+	PAND   X9, X8
+	PAND   X9, X11
+	MOVUPS X0, X12
+	MOVUPS X1, X13
+	PSHUFB X8, X12
+	PSHUFB X8, X13
+	MOVUPS X2, X8
+	MOVUPS X3, X14
+	PSHUFB X11, X8
+	PSHUFB X11, X14
+	PXOR   X8, X12
+	PXOR   X14, X13
+	MOVAPS X10, X8
+	MOVAPS X10, X10
+	PAND   X9, X8
+	PSRLQ  $0x04, X10
+	PAND   X9, X10
+	MOVUPS X4, X11
+	MOVUPS X5, X14
+	PSHUFB X8, X11
+	PSHUFB X8, X14
+	PXOR   X11, X12
+	PXOR   X14, X13
+	MOVUPS X6, X11
+	MOVUPS X7, X14
+	PSHUFB X10, X11
+	PSHUFB X10, X14
+	PXOR   X11, X12
+	PXOR   X14, X13
+	MOVUPS X12, 16(CX)
+	MOVUPS X13, 48(CX)
+	ADDQ   $0x40, CX
+	ADDQ   $0x40, DX
+	SUBQ   $0x40, AX
+	JNZ    loop
+	RET
+
+// func ifftDIT28_avx2(x []byte, y []byte, table *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT28_avx2(SB), NOSPLIT, $0-56
+	MOVQ           table+48(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           x_len+8(FP), AX
+	MOVQ           x_base+0(FP), CX
+	MOVQ           y_base+24(FP), DX
+	MOVQ           $0x0000000f, BX
+	MOVQ           BX, X2
+	VPBROADCASTB   X2, Y2
+
+loop:
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VMOVDQU (DX), Y5
+	VMOVDQU 32(DX), Y6
+	VPXOR   Y5, Y3, Y5
+	VPXOR   Y6, Y4, Y6
+	VMOVDQU Y5, (DX)
+	VMOVDQU Y6, 32(DX)
+
+	// LEO_MULADD_256
+	VPAND   Y5, Y2, Y7
+	VPSRLQ  $0x04, Y5, Y5
+	VPSHUFB Y7, Y0, Y7
+	VPAND   Y5, Y2, Y5
+	VPSHUFB Y5, Y1, Y5
+	XOR3WAY( $0x00, Y7, Y5, Y3)
+
+	// LEO_MULADD_256
+	VPAND   Y6, Y2, Y5
+	VPSRLQ  $0x04, Y6, Y6
+	VPSHUFB Y5, Y0, Y5
+	VPAND   Y6, Y2, Y6
+	VPSHUFB Y6, Y1, Y6
+	XOR3WAY( $0x00, Y5, Y6, Y4)
+	VMOVDQU Y3, (CX)
+	VMOVDQU Y4, 32(CX)
+	ADDQ    $0x40, CX
+	ADDQ    $0x40, DX
+	SUBQ    $0x40, AX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func fftDIT28_avx2(x []byte, y []byte, table *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT28_avx2(SB), NOSPLIT, $0-56
+	MOVQ           table+48(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           x_len+8(FP), AX
+	MOVQ           x_base+0(FP), CX
+	MOVQ           y_base+24(FP), DX
+	MOVQ           $0x0000000f, BX
+	MOVQ           BX, X2
+	VPBROADCASTB   X2, Y2
+
+loop:
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VMOVDQU (DX), Y5
+	VMOVDQU 32(DX), Y6
+
+	// LEO_MULADD_256
+	VPAND   Y5, Y2, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPSHUFB Y7, Y0, Y7
+	VPAND   Y8, Y2, Y8
+	VPSHUFB Y8, Y1, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// LEO_MULADD_256
+	VPAND   Y6, Y2, Y7
+	VPSRLQ  $0x04, Y6, Y8
+	VPSHUFB Y7, Y0, Y7
+	VPAND   Y8, Y2, Y8
+	VPSHUFB Y8, Y1, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y4)
+	VMOVDQU Y3, (CX)
+	VMOVDQU Y4, 32(CX)
+	VPXOR   Y5, Y3, Y5
+	VPXOR   Y6, Y4, Y6
+	VMOVDQU Y5, (DX)
+	VMOVDQU Y6, 32(DX)
+	ADDQ    $0x40, CX
+	ADDQ    $0x40, DX
+	SUBQ    $0x40, AX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_avx2_0(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT48_avx2_0(SB), NOSPLIT, $0-56
+	MOVQ           t01+32(FP), AX
+	VBROADCASTI128 16(AX), Y0
+	MOVQ           t23+40(FP), CX
+	VBROADCASTI128 (CX), Y1
+	VBROADCASTI128 16(CX), Y2
+	MOVQ           t02+48(FP), CX
+	VBROADCASTI128 (CX), Y3
+	VBROADCASTI128 16(CX), Y4
+	MOVQ           dist+24(FP), CX
+	MOVQ           work_base+0(FP), DX
+	MOVQ           8(DX), BX
+	XORQ           SI, SI
+	MOVQ           (DX)(SI*1), DI
+	ADDQ           CX, SI
+	MOVQ           (DX)(SI*1), R8
+	ADDQ           CX, SI
+	MOVQ           (DX)(SI*1), R9
+	ADDQ           CX, SI
+	MOVQ           (DX)(SI*1), CX
+	MOVQ           $0x0000000f, DX
+	MOVQ           DX, X5
+	VPBROADCASTB   X5, Y5
+
+loop:
+	VMOVDQU        (DI), Y6
+	VMOVDQU        (R8), Y7
+	VMOVDQU        32(DI), Y8
+	VMOVDQU        32(R8), Y9
+	VPXOR          Y7, Y6, Y7
+	VPXOR          Y9, Y8, Y9
+	VBROADCASTI128 (AX), Y10
+
+	// LEO_MULADD_256
+	VPAND   Y7, Y5, Y11
+	VPSRLQ  $0x04, Y7, Y12
+	VPSHUFB Y11, Y10, Y11
+	VPAND   Y12, Y5, Y12
+	VPSHUFB Y12, Y0, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+
+	// LEO_MULADD_256
+	VPAND   Y9, Y5, Y11
+	VPSRLQ  $0x04, Y9, Y12
+	VPSHUFB Y11, Y10, Y11
+	VPAND   Y12, Y5, Y12
+	VPSHUFB Y12, Y0, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VMOVDQU (R9), Y10
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(R9), Y12
+	VMOVDQU 32(CX), Y13
+	VPXOR   Y10, Y11, Y11
+	VPXOR   Y12, Y13, Y13
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y5, Y14
+	VPSRLQ  $0x04, Y11, Y15
+	VPSHUFB Y14, Y1, Y14
+	VPAND   Y15, Y5, Y15
+	VPSHUFB Y15, Y2, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y10)
+
+	// LEO_MULADD_256
+	VPAND   Y13, Y5, Y14
+	VPSRLQ  $0x04, Y13, Y15
+	VPSHUFB Y14, Y1, Y14
+	VPAND   Y15, Y5, Y15
+	VPSHUFB Y15, Y2, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y12)
+	VPXOR   Y6, Y10, Y10
+	VPXOR   Y7, Y11, Y11
+	VPXOR   Y8, Y12, Y12
+	VPXOR   Y9, Y13, Y13
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y5, Y14
+	VPSRLQ  $0x04, Y10, Y15
+	VPSHUFB Y14, Y3, Y14
+	VPAND   Y15, Y5, Y15
+	VPSHUFB Y15, Y4, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y6)
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y5, Y14
+	VPSRLQ  $0x04, Y11, Y15
+	VPSHUFB Y14, Y3, Y14
+	VPAND   Y15, Y5, Y15
+	VPSHUFB Y15, Y4, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y7)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y5, Y14
+	VPSRLQ  $0x04, Y12, Y15
+	VPSHUFB Y14, Y3, Y14
+	VPAND   Y15, Y5, Y15
+	VPSHUFB Y15, Y4, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y8)
+
+	// LEO_MULADD_256
+	VPAND   Y13, Y5, Y14
+	VPSRLQ  $0x04, Y13, Y15
+	VPSHUFB Y14, Y3, Y14
+	VPAND   Y15, Y5, Y15
+	VPSHUFB Y15, Y4, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y9)
+	VMOVDQU Y6, (DI)
+	VMOVDQU Y8, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y7, (R8)
+	VMOVDQU Y9, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y10, (R9)
+	VMOVDQU Y12, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y11, (CX)
+	VMOVDQU Y13, 32(CX)
+	ADDQ    $0x40, CX
+	SUBQ    $0x40, BX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_avx2_0(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT48_avx2_0(SB), NOSPLIT, $0-56
+	MOVQ           t01+32(FP), AX
+	VBROADCASTI128 16(AX), Y0
+	MOVQ           t23+40(FP), CX
+	VBROADCASTI128 16(CX), Y1
+	MOVQ           t02+48(FP), DX
+	VBROADCASTI128 (DX), Y2
+	VBROADCASTI128 16(DX), Y3
+	MOVQ           dist+24(FP), DX
+	MOVQ           work_base+0(FP), BX
+	MOVQ           8(BX), SI
+	XORQ           DI, DI
+	MOVQ           (BX)(DI*1), R8
+	ADDQ           DX, DI
+	MOVQ           (BX)(DI*1), R9
+	ADDQ           DX, DI
+	MOVQ           (BX)(DI*1), R10
+	ADDQ           DX, DI
+	MOVQ           (BX)(DI*1), DX
+	MOVQ           $0x0000000f, BX
+	MOVQ           BX, X4
+	VPBROADCASTB   X4, Y4
+
+loop:
+	VMOVDQU (R8), Y5
+	VMOVDQU 32(R8), Y6
+	VMOVDQU (R10), Y9
+	VMOVDQU 32(R10), Y10
+	VMOVDQU (R9), Y7
+	VMOVDQU 32(R9), Y8
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y12
+
+	// LEO_MULADD_256
+	VPAND   Y9, Y4, Y13
+	VPSRLQ  $0x04, Y9, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y4, Y13
+	VPSRLQ  $0x04, Y10, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y6)
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y4, Y13
+	VPSRLQ  $0x04, Y11, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y7)
+
+	// LEO_MULADD_256
+	VPAND          Y12, Y4, Y13
+	VPSRLQ         $0x04, Y12, Y14
+	VPSHUFB        Y13, Y2, Y13
+	VPAND          Y14, Y4, Y14
+	VPSHUFB        Y14, Y3, Y14
+	XOR3WAY(     $0x00, Y13, Y14, Y8)
+	VPXOR          Y5, Y9, Y9
+	VPXOR          Y7, Y11, Y11
+	VPXOR          Y6, Y10, Y10
+	VPXOR          Y8, Y12, Y12
+	VBROADCASTI128 (AX), Y13
+
+	// LEO_MULADD_256
+	VPAND   Y7, Y4, Y14
+	VPSRLQ  $0x04, Y7, Y15
+	VPSHUFB Y14, Y13, Y14
+	VPAND   Y15, Y4, Y15
+	VPSHUFB Y15, Y0, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y5)
+
+	// LEO_MULADD_256
+	VPAND          Y8, Y4, Y14
+	VPSRLQ         $0x04, Y8, Y15
+	VPSHUFB        Y14, Y13, Y14
+	VPAND          Y15, Y4, Y15
+	VPSHUFB        Y15, Y0, Y15
+	XOR3WAY(     $0x00, Y14, Y15, Y6)
+	VPXOR          Y7, Y5, Y7
+	VPXOR          Y8, Y6, Y8
+	VBROADCASTI128 (CX), Y13
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y4, Y14
+	VPSRLQ  $0x04, Y11, Y15
+	VPSHUFB Y14, Y13, Y14
+	VPAND   Y15, Y4, Y15
+	VPSHUFB Y15, Y1, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y9)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y4, Y14
+	VPSRLQ  $0x04, Y12, Y15
+	VPSHUFB Y14, Y13, Y14
+	VPAND   Y15, Y4, Y15
+	VPSHUFB Y15, Y1, Y15
+	XOR3WAY( $0x00, Y14, Y15, Y10)
+	VPXOR   Y9, Y11, Y11
+	VPXOR   Y10, Y12, Y12
+	VMOVDQU Y5, (R8)
+	VMOVDQU Y6, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y7, (R9)
+	VMOVDQU Y8, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y9, (R10)
+	VMOVDQU Y10, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y11, (DX)
+	VMOVDQU Y12, 32(DX)
+	ADDQ    $0x40, DX
+	SUBQ    $0x40, SI
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_avx2_1(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT48_avx2_1(SB), NOSPLIT, $0-56
+	MOVQ           t23+40(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           t02+48(FP), AX
+	VBROADCASTI128 (AX), Y2
+	VBROADCASTI128 16(AX), Y3
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X4
+	VPBROADCASTB   X4, Y4
+
+loop:
+	VMOVDQU (SI), Y5
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(SI), Y7
+	VMOVDQU 32(DI), Y8
+	VPXOR   Y6, Y5, Y6
+	VPXOR   Y8, Y7, Y8
+	VMOVDQU (R8), Y9
+	VMOVDQU (AX), Y10
+	VMOVDQU 32(R8), Y11
+	VMOVDQU 32(AX), Y12
+	VPXOR   Y9, Y10, Y10
+	VPXOR   Y11, Y12, Y12
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y4, Y13
+	VPSRLQ  $0x04, Y10, Y14
+	VPSHUFB Y13, Y0, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y1, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y9)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y4, Y13
+	VPSRLQ  $0x04, Y12, Y14
+	VPSHUFB Y13, Y0, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y1, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y11)
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y6, Y10, Y10
+	VPXOR   Y7, Y11, Y11
+	VPXOR   Y8, Y12, Y12
+
+	// LEO_MULADD_256
+	VPAND   Y9, Y4, Y13
+	VPSRLQ  $0x04, Y9, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y4, Y13
+	VPSRLQ  $0x04, Y10, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y6)
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y4, Y13
+	VPSRLQ  $0x04, Y11, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y7)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y4, Y13
+	VPSRLQ  $0x04, Y12, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y8)
+	VMOVDQU Y5, (SI)
+	VMOVDQU Y7, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y6, (DI)
+	VMOVDQU Y8, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y9, (R8)
+	VMOVDQU Y11, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y10, (AX)
+	VMOVDQU Y12, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_avx2_1(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT48_avx2_1(SB), NOSPLIT, $0-56
+	MOVQ           t01+32(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           t23+40(FP), AX
+	VBROADCASTI128 (AX), Y2
+	VBROADCASTI128 16(AX), Y3
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X4
+	VPBROADCASTB   X4, Y4
+
+loop:
+	VMOVDQU (SI), Y5
+	VMOVDQU 32(SI), Y6
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y10
+	VMOVDQU (DI), Y7
+	VMOVDQU 32(DI), Y8
+	VMOVDQU (AX), Y11
+	VMOVDQU 32(AX), Y12
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y7, Y11, Y11
+	VPXOR   Y6, Y10, Y10
+	VPXOR   Y8, Y12, Y12
+
+	// LEO_MULADD_256
+	VPAND   Y7, Y4, Y13
+	VPSRLQ  $0x04, Y7, Y14
+	VPSHUFB Y13, Y0, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y1, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y8, Y4, Y13
+	VPSRLQ  $0x04, Y8, Y14
+	VPSHUFB Y13, Y0, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y1, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y6)
+	VPXOR   Y7, Y5, Y7
+	VPXOR   Y8, Y6, Y8
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y4, Y13
+	VPSRLQ  $0x04, Y11, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y9)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y4, Y13
+	VPSRLQ  $0x04, Y12, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y10)
+	VPXOR   Y9, Y11, Y11
+	VPXOR   Y10, Y12, Y12
+	VMOVDQU Y5, (SI)
+	VMOVDQU Y6, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y7, (DI)
+	VMOVDQU Y8, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y9, (R8)
+	VMOVDQU Y10, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y11, (AX)
+	VMOVDQU Y12, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_avx2_2(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT48_avx2_2(SB), NOSPLIT, $0-56
+	MOVQ           t01+32(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           t02+48(FP), AX
+	VBROADCASTI128 (AX), Y2
+	VBROADCASTI128 16(AX), Y3
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X4
+	VPBROADCASTB   X4, Y4
+
+loop:
+	VMOVDQU (SI), Y5
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(SI), Y7
+	VMOVDQU 32(DI), Y8
+	VPXOR   Y6, Y5, Y6
+	VPXOR   Y8, Y7, Y8
+
+	// LEO_MULADD_256
+	VPAND   Y6, Y4, Y9
+	VPSRLQ  $0x04, Y6, Y10
+	VPSHUFB Y9, Y0, Y9
+	VPAND   Y10, Y4, Y10
+	VPSHUFB Y10, Y1, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y8, Y4, Y9
+	VPSRLQ  $0x04, Y8, Y10
+	VPSHUFB Y9, Y0, Y9
+	VPAND   Y10, Y4, Y10
+	VPSHUFB Y10, Y1, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+	VMOVDQU (R8), Y9
+	VMOVDQU (AX), Y10
+	VMOVDQU 32(R8), Y11
+	VMOVDQU 32(AX), Y12
+	VPXOR   Y9, Y10, Y10
+	VPXOR   Y11, Y12, Y12
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y6, Y10, Y10
+	VPXOR   Y7, Y11, Y11
+	VPXOR   Y8, Y12, Y12
+
+	// LEO_MULADD_256
+	VPAND   Y9, Y4, Y13
+	VPSRLQ  $0x04, Y9, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y4, Y13
+	VPSRLQ  $0x04, Y10, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y6)
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y4, Y13
+	VPSRLQ  $0x04, Y11, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y7)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y4, Y13
+	VPSRLQ  $0x04, Y12, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y8)
+	VMOVDQU Y5, (SI)
+	VMOVDQU Y7, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y6, (DI)
+	VMOVDQU Y8, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y9, (R8)
+	VMOVDQU Y11, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y10, (AX)
+	VMOVDQU Y12, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_avx2_2(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT48_avx2_2(SB), NOSPLIT, $0-56
+	MOVQ           t23+40(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           t02+48(FP), AX
+	VBROADCASTI128 (AX), Y2
+	VBROADCASTI128 16(AX), Y3
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X4
+	VPBROADCASTB   X4, Y4
+
+loop:
+	VMOVDQU (SI), Y5
+	VMOVDQU 32(SI), Y6
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y10
+	VMOVDQU (DI), Y7
+	VMOVDQU 32(DI), Y8
+	VMOVDQU (AX), Y11
+	VMOVDQU 32(AX), Y12
+
+	// LEO_MULADD_256
+	VPAND   Y9, Y4, Y13
+	VPSRLQ  $0x04, Y9, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y4, Y13
+	VPSRLQ  $0x04, Y10, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y6)
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y4, Y13
+	VPSRLQ  $0x04, Y11, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y7)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y4, Y13
+	VPSRLQ  $0x04, Y12, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y8)
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y7, Y11, Y11
+	VPXOR   Y6, Y10, Y10
+	VPXOR   Y8, Y12, Y12
+	VPXOR   Y7, Y5, Y7
+	VPXOR   Y8, Y6, Y8
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y4, Y13
+	VPSRLQ  $0x04, Y11, Y14
+	VPSHUFB Y13, Y0, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y1, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y9)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y4, Y13
+	VPSRLQ  $0x04, Y12, Y14
+	VPSHUFB Y13, Y0, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y1, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y10)
+	VPXOR   Y9, Y11, Y11
+	VPXOR   Y10, Y12, Y12
+	VMOVDQU Y5, (SI)
+	VMOVDQU Y6, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y7, (DI)
+	VMOVDQU Y8, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y9, (R8)
+	VMOVDQU Y10, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y11, (AX)
+	VMOVDQU Y12, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_avx2_3(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT48_avx2_3(SB), NOSPLIT, $0-56
+	MOVQ           t02+48(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X2
+	VPBROADCASTB   X2, Y2
+
+loop:
+	VMOVDQU (SI), Y3
+	VMOVDQU (DI), Y4
+	VMOVDQU 32(SI), Y5
+	VMOVDQU 32(DI), Y6
+	VPXOR   Y4, Y3, Y4
+	VPXOR   Y6, Y5, Y6
+	VMOVDQU (R8), Y7
+	VMOVDQU (AX), Y8
+	VMOVDQU 32(R8), Y9
+	VMOVDQU 32(AX), Y10
+	VPXOR   Y7, Y8, Y8
+	VPXOR   Y9, Y10, Y10
+	VPXOR   Y3, Y7, Y7
+	VPXOR   Y4, Y8, Y8
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y6, Y10, Y10
+
+	// LEO_MULADD_256
+	VPAND   Y7, Y2, Y11
+	VPSRLQ  $0x04, Y7, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+
+	// LEO_MULADD_256
+	VPAND   Y8, Y2, Y11
+	VPSRLQ  $0x04, Y8, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+
+	// LEO_MULADD_256
+	VPAND   Y9, Y2, Y11
+	VPSRLQ  $0x04, Y9, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y2, Y11
+	VPSRLQ  $0x04, Y10, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VMOVDQU Y3, (SI)
+	VMOVDQU Y5, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y4, (DI)
+	VMOVDQU Y6, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y7, (R8)
+	VMOVDQU Y9, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y8, (AX)
+	VMOVDQU Y10, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_avx2_3(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT48_avx2_3(SB), NOSPLIT, $0-56
+	MOVQ           t23+40(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X2
+	VPBROADCASTB   X2, Y2
+
+loop:
+	VMOVDQU (SI), Y3
+	VMOVDQU 32(SI), Y4
+	VMOVDQU (R8), Y7
+	VMOVDQU 32(R8), Y8
+	VMOVDQU (DI), Y5
+	VMOVDQU 32(DI), Y6
+	VMOVDQU (AX), Y9
+	VMOVDQU 32(AX), Y10
+	VPXOR   Y3, Y7, Y7
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y4, Y8, Y8
+	VPXOR   Y6, Y10, Y10
+	VPXOR   Y5, Y3, Y5
+	VPXOR   Y6, Y4, Y6
+
+	// LEO_MULADD_256
+	VPAND   Y9, Y2, Y11
+	VPSRLQ  $0x04, Y9, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y2, Y11
+	VPSRLQ  $0x04, Y10, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y8)
+	VPXOR   Y7, Y9, Y9
+	VPXOR   Y8, Y10, Y10
+	VMOVDQU Y3, (SI)
+	VMOVDQU Y4, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y5, (DI)
+	VMOVDQU Y6, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y7, (R8)
+	VMOVDQU Y8, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y9, (AX)
+	VMOVDQU Y10, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_avx2_4(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT48_avx2_4(SB), NOSPLIT, $0-56
+	MOVQ           t01+32(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           t23+40(FP), AX
+	VBROADCASTI128 (AX), Y2
+	VBROADCASTI128 16(AX), Y3
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X4
+	VPBROADCASTB   X4, Y4
+
+loop:
+	VMOVDQU (SI), Y5
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(SI), Y7
+	VMOVDQU 32(DI), Y8
+	VPXOR   Y6, Y5, Y6
+	VPXOR   Y8, Y7, Y8
+
+	// LEO_MULADD_256
+	VPAND   Y6, Y4, Y9
+	VPSRLQ  $0x04, Y6, Y10
+	VPSHUFB Y9, Y0, Y9
+	VPAND   Y10, Y4, Y10
+	VPSHUFB Y10, Y1, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y8, Y4, Y9
+	VPSRLQ  $0x04, Y8, Y10
+	VPSHUFB Y9, Y0, Y9
+	VPAND   Y10, Y4, Y10
+	VPSHUFB Y10, Y1, Y10
+	XOR3WAY( $0x00, Y9, Y10, Y7)
+	VMOVDQU (R8), Y9
+	VMOVDQU (AX), Y10
+	VMOVDQU 32(R8), Y11
+	VMOVDQU 32(AX), Y12
+	VPXOR   Y9, Y10, Y10
+	VPXOR   Y11, Y12, Y12
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y4, Y13
+	VPSRLQ  $0x04, Y10, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y9)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y4, Y13
+	VPSRLQ  $0x04, Y12, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y11)
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y6, Y10, Y10
+	VPXOR   Y7, Y11, Y11
+	VPXOR   Y8, Y12, Y12
+	VMOVDQU Y5, (SI)
+	VMOVDQU Y7, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y6, (DI)
+	VMOVDQU Y8, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y9, (R8)
+	VMOVDQU Y11, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y10, (AX)
+	VMOVDQU Y12, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_avx2_4(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT48_avx2_4(SB), NOSPLIT, $0-56
+	MOVQ           t01+32(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           t02+48(FP), AX
+	VBROADCASTI128 (AX), Y2
+	VBROADCASTI128 16(AX), Y3
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X4
+	VPBROADCASTB   X4, Y4
+
+loop:
+	VMOVDQU (SI), Y5
+	VMOVDQU 32(SI), Y6
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y10
+	VMOVDQU (DI), Y7
+	VMOVDQU 32(DI), Y8
+	VMOVDQU (AX), Y11
+	VMOVDQU 32(AX), Y12
+
+	// LEO_MULADD_256
+	VPAND   Y9, Y4, Y13
+	VPSRLQ  $0x04, Y9, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y4, Y13
+	VPSRLQ  $0x04, Y10, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y6)
+
+	// LEO_MULADD_256
+	VPAND   Y11, Y4, Y13
+	VPSRLQ  $0x04, Y11, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y7)
+
+	// LEO_MULADD_256
+	VPAND   Y12, Y4, Y13
+	VPSRLQ  $0x04, Y12, Y14
+	VPSHUFB Y13, Y2, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y3, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y8)
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y7, Y11, Y11
+	VPXOR   Y6, Y10, Y10
+	VPXOR   Y8, Y12, Y12
+
+	// LEO_MULADD_256
+	VPAND   Y7, Y4, Y13
+	VPSRLQ  $0x04, Y7, Y14
+	VPSHUFB Y13, Y0, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y1, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y8, Y4, Y13
+	VPSRLQ  $0x04, Y8, Y14
+	VPSHUFB Y13, Y0, Y13
+	VPAND   Y14, Y4, Y14
+	VPSHUFB Y14, Y1, Y14
+	XOR3WAY( $0x00, Y13, Y14, Y6)
+	VPXOR   Y7, Y5, Y7
+	VPXOR   Y8, Y6, Y8
+	VPXOR   Y9, Y11, Y11
+	VPXOR   Y10, Y12, Y12
+	VMOVDQU Y5, (SI)
+	VMOVDQU Y6, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y7, (DI)
+	VMOVDQU Y8, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y9, (R8)
+	VMOVDQU Y10, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y11, (AX)
+	VMOVDQU Y12, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_avx2_5(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT48_avx2_5(SB), NOSPLIT, $0-56
+	MOVQ           t23+40(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X2
+	VPBROADCASTB   X2, Y2
+
+loop:
+	VMOVDQU (SI), Y3
+	VMOVDQU (DI), Y4
+	VMOVDQU 32(SI), Y5
+	VMOVDQU 32(DI), Y6
+	VPXOR   Y4, Y3, Y4
+	VPXOR   Y6, Y5, Y6
+	VMOVDQU (R8), Y7
+	VMOVDQU (AX), Y8
+	VMOVDQU 32(R8), Y9
+	VMOVDQU 32(AX), Y10
+	VPXOR   Y7, Y8, Y8
+	VPXOR   Y9, Y10, Y10
+
+	// LEO_MULADD_256
+	VPAND   Y8, Y2, Y11
+	VPSRLQ  $0x04, Y8, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y7)
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y2, Y11
+	VPSRLQ  $0x04, Y10, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y9)
+	VPXOR   Y3, Y7, Y7
+	VPXOR   Y4, Y8, Y8
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y6, Y10, Y10
+	VMOVDQU Y3, (SI)
+	VMOVDQU Y5, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y4, (DI)
+	VMOVDQU Y6, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y7, (R8)
+	VMOVDQU Y9, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y8, (AX)
+	VMOVDQU Y10, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_avx2_5(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT48_avx2_5(SB), NOSPLIT, $0-56
+	MOVQ           t01+32(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X2
+	VPBROADCASTB   X2, Y2
+
+loop:
+	VMOVDQU (SI), Y3
+	VMOVDQU 32(SI), Y4
+	VMOVDQU (R8), Y7
+	VMOVDQU 32(R8), Y8
+	VMOVDQU (DI), Y5
+	VMOVDQU 32(DI), Y6
+	VMOVDQU (AX), Y9
+	VMOVDQU 32(AX), Y10
+	VPXOR   Y3, Y7, Y7
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y4, Y8, Y8
+	VPXOR   Y6, Y10, Y10
+
+	// LEO_MULADD_256
+	VPAND   Y5, Y2, Y11
+	VPSRLQ  $0x04, Y5, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+
+	// LEO_MULADD_256
+	VPAND   Y6, Y2, Y11
+	VPSRLQ  $0x04, Y6, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+	VPXOR   Y5, Y3, Y5
+	VPXOR   Y6, Y4, Y6
+	VPXOR   Y7, Y9, Y9
+	VPXOR   Y8, Y10, Y10
+	VMOVDQU Y3, (SI)
+	VMOVDQU Y4, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y5, (DI)
+	VMOVDQU Y6, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y7, (R8)
+	VMOVDQU Y8, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y9, (AX)
+	VMOVDQU Y10, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_avx2_6(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·ifftDIT48_avx2_6(SB), NOSPLIT, $0-56
+	MOVQ           t01+32(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X2
+	VPBROADCASTB   X2, Y2
+
+loop:
+	VMOVDQU (SI), Y3
+	VMOVDQU (DI), Y4
+	VMOVDQU 32(SI), Y5
+	VMOVDQU 32(DI), Y6
+	VPXOR   Y4, Y3, Y4
+	VPXOR   Y6, Y5, Y6
+
+	// LEO_MULADD_256
+	VPAND   Y4, Y2, Y7
+	VPSRLQ  $0x04, Y4, Y8
+	VPSHUFB Y7, Y0, Y7
+	VPAND   Y8, Y2, Y8
+	VPSHUFB Y8, Y1, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y3)
+
+	// LEO_MULADD_256
+	VPAND   Y6, Y2, Y7
+	VPSRLQ  $0x04, Y6, Y8
+	VPSHUFB Y7, Y0, Y7
+	VPAND   Y8, Y2, Y8
+	VPSHUFB Y8, Y1, Y8
+	XOR3WAY( $0x00, Y7, Y8, Y5)
+	VMOVDQU (R8), Y7
+	VMOVDQU (AX), Y8
+	VMOVDQU 32(R8), Y9
+	VMOVDQU 32(AX), Y10
+	VPXOR   Y7, Y8, Y8
+	VPXOR   Y9, Y10, Y10
+	VPXOR   Y3, Y7, Y7
+	VPXOR   Y4, Y8, Y8
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y6, Y10, Y10
+	VMOVDQU Y3, (SI)
+	VMOVDQU Y5, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y4, (DI)
+	VMOVDQU Y6, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y7, (R8)
+	VMOVDQU Y9, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y8, (AX)
+	VMOVDQU Y10, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_avx2_6(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, AVX512F, AVX512VL, SSE2
+TEXT Β·fftDIT48_avx2_6(SB), NOSPLIT, $0-56
+	MOVQ           t02+48(FP), AX
+	VBROADCASTI128 (AX), Y0
+	VBROADCASTI128 16(AX), Y1
+	MOVQ           dist+24(FP), AX
+	MOVQ           work_base+0(FP), CX
+	MOVQ           8(CX), DX
+	XORQ           BX, BX
+	MOVQ           (CX)(BX*1), SI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), DI
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), R8
+	ADDQ           AX, BX
+	MOVQ           (CX)(BX*1), AX
+	MOVQ           $0x0000000f, CX
+	MOVQ           CX, X2
+	VPBROADCASTB   X2, Y2
+
+loop:
+	VMOVDQU (SI), Y3
+	VMOVDQU 32(SI), Y4
+	VMOVDQU (R8), Y7
+	VMOVDQU 32(R8), Y8
+	VMOVDQU (DI), Y5
+	VMOVDQU 32(DI), Y6
+	VMOVDQU (AX), Y9
+	VMOVDQU 32(AX), Y10
+
+	// LEO_MULADD_256
+	VPAND   Y7, Y2, Y11
+	VPSRLQ  $0x04, Y7, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y3)
+
+	// LEO_MULADD_256
+	VPAND   Y8, Y2, Y11
+	VPSRLQ  $0x04, Y8, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y4)
+
+	// LEO_MULADD_256
+	VPAND   Y9, Y2, Y11
+	VPSRLQ  $0x04, Y9, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y5)
+
+	// LEO_MULADD_256
+	VPAND   Y10, Y2, Y11
+	VPSRLQ  $0x04, Y10, Y12
+	VPSHUFB Y11, Y0, Y11
+	VPAND   Y12, Y2, Y12
+	VPSHUFB Y12, Y1, Y12
+	XOR3WAY( $0x00, Y11, Y12, Y6)
+	VPXOR   Y3, Y7, Y7
+	VPXOR   Y5, Y9, Y9
+	VPXOR   Y4, Y8, Y8
+	VPXOR   Y6, Y10, Y10
+	VPXOR   Y5, Y3, Y5
+	VPXOR   Y6, Y4, Y6
+	VPXOR   Y7, Y9, Y9
+	VPXOR   Y8, Y10, Y10
+	VMOVDQU Y3, (SI)
+	VMOVDQU Y4, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y5, (DI)
+	VMOVDQU Y6, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y7, (R8)
+	VMOVDQU Y8, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y9, (AX)
+	VMOVDQU Y10, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_avx2_7(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·ifftDIT48_avx2_7(SB), NOSPLIT, $0-56
+	MOVQ         dist+24(FP), AX
+	MOVQ         work_base+0(FP), CX
+	MOVQ         8(CX), DX
+	XORQ         BX, BX
+	MOVQ         (CX)(BX*1), SI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), DI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), R8
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), AX
+	MOVQ         $0x0000000f, CX
+	MOVQ         CX, X0
+	VPBROADCASTB X0, Y0
+
+loop:
+	VMOVDQU (SI), Y0
+	VMOVDQU (DI), Y1
+	VMOVDQU 32(SI), Y2
+	VMOVDQU 32(DI), Y3
+	VPXOR   Y1, Y0, Y1
+	VPXOR   Y3, Y2, Y3
+	VMOVDQU (R8), Y4
+	VMOVDQU (AX), Y5
+	VMOVDQU 32(R8), Y6
+	VMOVDQU 32(AX), Y7
+	VPXOR   Y4, Y5, Y5
+	VPXOR   Y6, Y7, Y7
+	VPXOR   Y0, Y4, Y4
+	VPXOR   Y1, Y5, Y5
+	VPXOR   Y2, Y6, Y6
+	VPXOR   Y3, Y7, Y7
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y2, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y1, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y6, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y5, (AX)
+	VMOVDQU Y7, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_avx2_7(work [][]byte, dist int, t01 *[32]uint8, t23 *[32]uint8, t02 *[32]uint8)
+// Requires: AVX, AVX2, SSE2
+TEXT Β·fftDIT48_avx2_7(SB), NOSPLIT, $0-56
+	MOVQ         dist+24(FP), AX
+	MOVQ         work_base+0(FP), CX
+	MOVQ         8(CX), DX
+	XORQ         BX, BX
+	MOVQ         (CX)(BX*1), SI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), DI
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), R8
+	ADDQ         AX, BX
+	MOVQ         (CX)(BX*1), AX
+	MOVQ         $0x0000000f, CX
+	MOVQ         CX, X0
+	VPBROADCASTB X0, Y0
+
+loop:
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y1
+	VMOVDQU (R8), Y4
+	VMOVDQU 32(R8), Y5
+	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y3
+	VMOVDQU (AX), Y6
+	VMOVDQU 32(AX), Y7
+	VPXOR   Y0, Y4, Y4
+	VPXOR   Y2, Y6, Y6
+	VPXOR   Y1, Y5, Y5
+	VPXOR   Y3, Y7, Y7
+	VPXOR   Y2, Y0, Y2
+	VPXOR   Y3, Y1, Y3
+	VPXOR   Y4, Y6, Y6
+	VPXOR   Y5, Y7, Y7
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y5, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y6, (AX)
+	VMOVDQU Y7, 32(AX)
+	ADDQ    $0x40, AX
+	SUBQ    $0x40, DX
+	JA      loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_gfni_0(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·ifftDIT48_gfni_0(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t01+32(FP), Z0
+	VBROADCASTF32X2 t23+40(FP), Z1
+	VBROADCASTF32X2 t02+48(FP), Z2
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z3
+	VMOVDQU64 (DI), Z4
+	VMOVDQU64 (R8), Z5
+	VMOVDQU64 (AX), Z6
+	VXORPD    Z4, Z3, Z4
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z4, Z7
+	VXORPD         Z3, Z7, Z3
+	VXORPD         Z5, Z6, Z6
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z6, Z7
+	VPTERNLOGD     $0x96, Z7, Z3, Z5
+	VXORPD         Z4, Z6, Z6
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z2, Z5, Z7
+	VXORPD         Z3, Z7, Z3
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z2, Z6, Z7
+	VXORPD         Z4, Z7, Z4
+	VMOVDQU64      Z3, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z4, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z5, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z6, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_gfni_0(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·fftDIT48_gfni_0(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t01+32(FP), Z0
+	VBROADCASTF32X2 t23+40(FP), Z1
+	VBROADCASTF32X2 t02+48(FP), Z2
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z3
+	VMOVDQU64 (DI), Z4
+	VMOVDQU64 (R8), Z5
+	VMOVDQU64 (AX), Z6
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z2, Z5, Z7
+	VXORPD         Z3, Z7, Z3
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z2, Z6, Z7
+	VXORPD         Z4, Z7, Z4
+	VXORPD         Z3, Z5, Z5
+	VXORPD         Z4, Z6, Z6
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z4, Z7
+	VXORPD         Z3, Z7, Z3
+	VXORPD         Z4, Z3, Z4
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z6, Z7
+	VXORPD         Z5, Z7, Z5
+	VXORPD         Z5, Z6, Z6
+	VMOVDQU64      Z3, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z4, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z5, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z6, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_gfni_1(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·ifftDIT48_gfni_1(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t23+40(FP), Z0
+	VBROADCASTF32X2 t02+48(FP), Z1
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z2
+	VMOVDQU64 (DI), Z3
+	VMOVDQU64 (R8), Z4
+	VMOVDQU64 (AX), Z5
+	VXORPD    Z3, Z2, Z3
+	VXORPD    Z4, Z5, Z5
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z5, Z6
+	VPTERNLOGD     $0x96, Z6, Z2, Z4
+	VXORPD         Z3, Z5, Z5
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z4, Z6
+	VXORPD         Z2, Z6, Z2
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z5, Z6
+	VXORPD         Z3, Z6, Z3
+	VMOVDQU64      Z2, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z3, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z4, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z5, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_gfni_1(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·fftDIT48_gfni_1(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t01+32(FP), Z0
+	VBROADCASTF32X2 t23+40(FP), Z1
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z2
+	VMOVDQU64 (DI), Z3
+	VMOVDQU64 (R8), Z4
+	VMOVDQU64 (AX), Z5
+	VXORPD    Z2, Z4, Z4
+	VXORPD    Z3, Z5, Z5
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z3, Z6
+	VXORPD         Z2, Z6, Z2
+	VXORPD         Z3, Z2, Z3
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z5, Z6
+	VXORPD         Z4, Z6, Z4
+	VXORPD         Z4, Z5, Z5
+	VMOVDQU64      Z2, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z3, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z4, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z5, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_gfni_2(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·ifftDIT48_gfni_2(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t01+32(FP), Z0
+	VBROADCASTF32X2 t02+48(FP), Z1
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z2
+	VMOVDQU64 (DI), Z3
+	VMOVDQU64 (R8), Z4
+	VMOVDQU64 (AX), Z5
+	VXORPD    Z3, Z2, Z3
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z3, Z6
+	VXORPD         Z2, Z6, Z2
+	VXORPD         Z4, Z5, Z5
+	VXORPD         Z2, Z4, Z4
+	VXORPD         Z3, Z5, Z5
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z4, Z6
+	VXORPD         Z2, Z6, Z2
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z5, Z6
+	VXORPD         Z3, Z6, Z3
+	VMOVDQU64      Z2, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z3, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z4, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z5, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_gfni_2(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·fftDIT48_gfni_2(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t23+40(FP), Z0
+	VBROADCASTF32X2 t02+48(FP), Z1
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z2
+	VMOVDQU64 (DI), Z3
+	VMOVDQU64 (R8), Z4
+	VMOVDQU64 (AX), Z5
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z4, Z6
+	VXORPD         Z2, Z6, Z2
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z5, Z6
+	VXORPD         Z3, Z6, Z3
+	VXORPD         Z2, Z4, Z4
+	VXORPD         Z3, Z5, Z5
+	VXORPD         Z3, Z2, Z3
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z5, Z6
+	VXORPD         Z4, Z6, Z4
+	VXORPD         Z4, Z5, Z5
+	VMOVDQU64      Z2, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z3, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z4, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z5, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_gfni_3(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·ifftDIT48_gfni_3(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t02+48(FP), Z0
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z1
+	VMOVDQU64 (DI), Z2
+	VMOVDQU64 (R8), Z3
+	VMOVDQU64 (AX), Z4
+	VXORPD    Z2, Z1, Z2
+	VXORPD    Z3, Z4, Z4
+	VXORPD    Z1, Z3, Z3
+	VXORPD    Z2, Z4, Z4
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z3, Z5
+	VXORPD         Z1, Z5, Z1
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z4, Z5
+	VXORPD         Z2, Z5, Z2
+	VMOVDQU64      Z1, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z2, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z3, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z4, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_gfni_3(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·fftDIT48_gfni_3(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t23+40(FP), Z0
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z1
+	VMOVDQU64 (DI), Z2
+	VMOVDQU64 (R8), Z3
+	VMOVDQU64 (AX), Z4
+	VXORPD    Z1, Z3, Z3
+	VXORPD    Z2, Z4, Z4
+	VXORPD    Z2, Z1, Z2
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z4, Z5
+	VXORPD         Z3, Z5, Z3
+	VXORPD         Z3, Z4, Z4
+	VMOVDQU64      Z1, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z2, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z3, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z4, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_gfni_4(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·ifftDIT48_gfni_4(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t01+32(FP), Z0
+	VBROADCASTF32X2 t23+40(FP), Z1
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z2
+	VMOVDQU64 (DI), Z3
+	VMOVDQU64 (R8), Z4
+	VMOVDQU64 (AX), Z5
+	VXORPD    Z3, Z2, Z3
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z3, Z6
+	VXORPD         Z2, Z6, Z2
+	VXORPD         Z4, Z5, Z5
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z5, Z6
+	VPTERNLOGD     $0x96, Z6, Z2, Z4
+	VXORPD         Z3, Z5, Z5
+	VMOVDQU64      Z2, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z3, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z4, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z5, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_gfni_4(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·fftDIT48_gfni_4(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t01+32(FP), Z0
+	VBROADCASTF32X2 t02+48(FP), Z1
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z2
+	VMOVDQU64 (DI), Z3
+	VMOVDQU64 (R8), Z4
+	VMOVDQU64 (AX), Z5
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z4, Z6
+	VXORPD         Z2, Z6, Z2
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z1, Z5, Z6
+	VXORPD         Z3, Z6, Z3
+	VXORPD         Z2, Z4, Z4
+	VXORPD         Z3, Z5, Z5
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z3, Z6
+	VXORPD         Z2, Z6, Z2
+	VXORPD         Z3, Z2, Z3
+	VXORPD         Z4, Z5, Z5
+	VMOVDQU64      Z2, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z3, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z4, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z5, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_gfni_5(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·ifftDIT48_gfni_5(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t23+40(FP), Z0
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z1
+	VMOVDQU64 (DI), Z2
+	VMOVDQU64 (R8), Z3
+	VMOVDQU64 (AX), Z4
+	VXORPD    Z2, Z1, Z2
+	VXORPD    Z3, Z4, Z4
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z4, Z5
+	VPTERNLOGD     $0x96, Z5, Z1, Z3
+	VXORPD         Z2, Z4, Z4
+	VMOVDQU64      Z1, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z2, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z3, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z4, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_gfni_5(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·fftDIT48_gfni_5(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t01+32(FP), Z0
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z1
+	VMOVDQU64 (DI), Z2
+	VMOVDQU64 (R8), Z3
+	VMOVDQU64 (AX), Z4
+	VXORPD    Z1, Z3, Z3
+	VXORPD    Z2, Z4, Z4
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z2, Z5
+	VXORPD         Z1, Z5, Z1
+	VXORPD         Z2, Z1, Z2
+	VXORPD         Z3, Z4, Z4
+	VMOVDQU64      Z1, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z2, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z3, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z4, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_gfni_6(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·ifftDIT48_gfni_6(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t01+32(FP), Z0
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z1
+	VMOVDQU64 (DI), Z2
+	VMOVDQU64 (R8), Z3
+	VMOVDQU64 (AX), Z4
+	VXORPD    Z2, Z1, Z2
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z2, Z5
+	VXORPD         Z1, Z5, Z1
+	VXORPD         Z3, Z4, Z4
+	VXORPD         Z1, Z3, Z3
+	VXORPD         Z2, Z4, Z4
+	VMOVDQU64      Z1, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z2, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z3, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z4, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_gfni_6(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F, GFNI
+TEXT Β·fftDIT48_gfni_6(SB), NOSPLIT, $0-56
+	VBROADCASTF32X2 t02+48(FP), Z0
+	MOVQ            dist+24(FP), AX
+	MOVQ            work_base+0(FP), CX
+	MOVQ            8(CX), DX
+	XORQ            BX, BX
+	MOVQ            (CX)(BX*1), SI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), DI
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), R8
+	ADDQ            AX, BX
+	MOVQ            (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z1
+	VMOVDQU64 (DI), Z2
+	VMOVDQU64 (R8), Z3
+	VMOVDQU64 (AX), Z4
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z3, Z5
+	VXORPD         Z1, Z5, Z1
+
+	// LEO_MULADD_512
+	VGF2P8AFFINEQB $0x00, Z0, Z4, Z5
+	VXORPD         Z2, Z5, Z2
+	VXORPD         Z1, Z3, Z3
+	VXORPD         Z2, Z4, Z4
+	VXORPD         Z2, Z1, Z2
+	VXORPD         Z3, Z4, Z4
+	VMOVDQU64      Z1, (SI)
+	ADDQ           $0x40, SI
+	VMOVDQU64      Z2, (DI)
+	ADDQ           $0x40, DI
+	VMOVDQU64      Z3, (R8)
+	ADDQ           $0x40, R8
+	VMOVDQU64      Z4, (AX)
+	ADDQ           $0x40, AX
+	SUBQ           $0x40, DX
+	JA             loop
+	VZEROUPPER
+	RET
+
+// func ifftDIT48_gfni_7(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F
+TEXT Β·ifftDIT48_gfni_7(SB), NOSPLIT, $0-56
+	MOVQ dist+24(FP), AX
+	MOVQ work_base+0(FP), CX
+	MOVQ 8(CX), DX
+	XORQ BX, BX
+	MOVQ (CX)(BX*1), SI
+	ADDQ AX, BX
+	MOVQ (CX)(BX*1), DI
+	ADDQ AX, BX
+	MOVQ (CX)(BX*1), R8
+	ADDQ AX, BX
+	MOVQ (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z0
+	VMOVDQU64 (DI), Z1
+	VMOVDQU64 (R8), Z2
+	VMOVDQU64 (AX), Z3
+	VXORPD    Z1, Z0, Z1
+	VXORPD    Z2, Z3, Z3
+	VXORPD    Z0, Z2, Z2
+	VXORPD    Z1, Z3, Z3
+	VMOVDQU64 Z0, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z1, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z2, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z3, (AX)
+	ADDQ      $0x40, AX
+	SUBQ      $0x40, DX
+	JA        loop
+	VZEROUPPER
+	RET
+
+// func fftDIT48_gfni_7(work [][]byte, dist int, t01 uint64, t23 uint64, t02 uint64)
+// Requires: AVX, AVX512DQ, AVX512F
+TEXT Β·fftDIT48_gfni_7(SB), NOSPLIT, $0-56
+	MOVQ dist+24(FP), AX
+	MOVQ work_base+0(FP), CX
+	MOVQ 8(CX), DX
+	XORQ BX, BX
+	MOVQ (CX)(BX*1), SI
+	ADDQ AX, BX
+	MOVQ (CX)(BX*1), DI
+	ADDQ AX, BX
+	MOVQ (CX)(BX*1), R8
+	ADDQ AX, BX
+	MOVQ (CX)(BX*1), AX
+
+loop:
+	VMOVDQU64 (SI), Z0
+	VMOVDQU64 (DI), Z1
+	VMOVDQU64 (R8), Z2
+	VMOVDQU64 (AX), Z3
+	VXORPD    Z0, Z2, Z2
+	VXORPD    Z1, Z3, Z3
+	VXORPD    Z1, Z0, Z1
+	VXORPD    Z2, Z3, Z3
+	VMOVDQU64 Z0, (SI)
+	ADDQ      $0x40, SI
+	VMOVDQU64 Z1, (DI)
+	ADDQ      $0x40, DI
+	VMOVDQU64 Z2, (R8)
+	ADDQ      $0x40, R8
+	VMOVDQU64 Z3, (AX)
+	ADDQ      $0x40, AX
+	SUBQ      $0x40, DX
+	JA        loop
+	VZEROUPPER
 	RET
diff --git a/galois_gen_none.go b/galois_gen_none.go
index b4917bc..11929e6 100644
--- a/galois_gen_none.go
+++ b/galois_gen_none.go
@@ -1,11 +1,26 @@
-//+build !amd64 noasm appengine gccgo nogen
+//go:build !amd64 || noasm || appengine || gccgo || nogen
+// +build !amd64 noasm appengine gccgo nogen
 
 package reedsolomon
 
-const maxAvx2Inputs = 0
-const maxAvx2Outputs = 0
+const maxAvx2Inputs = 1
+const maxAvx2Outputs = 1
+const minAvx2Size = 1
+const avxSizeMask = 0
 const avx2CodeGen = false
 
 func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
-	panic("avx2 codegen not available")
+	panic("codegen not available")
+}
+
+func galMulSlicesAvx2Xor(matrix []byte, in, out [][]byte, start, stop int) int {
+	panic("codegen not available")
+}
+
+func galMulSlicesGFNI(matrix []uint64, in, out [][]byte, start, stop int) int {
+	panic("codegen not available")
+}
+
+func galMulSlicesGFNIXor(matrix []uint64, in, out [][]byte, start, stop int) int {
+	panic("codegen not available")
 }
diff --git a/galois_gen_switch_amd64.go b/galois_gen_switch_amd64.go
index 9985fc6..ffc1bb1 100644
--- a/galois_gen_switch_amd64.go
+++ b/galois_gen_switch_amd64.go
@@ -1,35 +1,35 @@
 // Code generated by command: go generate gen.go. DO NOT EDIT.
 
-// +build !appengine
-// +build !noasm
-// +build gc
-// +build !nogen
+//go:build !appengine && !noasm && gc && !nogen
+// +build !appengine,!noasm,gc,!nogen
 
 package reedsolomon
 
-import "fmt"
+import (
+	"fmt"
+)
 
-const avx2CodeGen = true
-const maxAvx2Inputs = 10
-const maxAvx2Outputs = 10
+const (
+	avx2CodeGen    = true
+	maxAvx2Inputs  = 10
+	maxAvx2Outputs = 10
+	minAvx2Size    = 64
+	avxSizeMask    = maxInt - (minAvx2Size - 1)
+)
 
 func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
-	n := stop - start
-	n = (n >> 5) << 5
+	n := (stop - start) & avxSizeMask
 
 	switch len(in) {
 	case 1:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_1x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_1x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_1x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -57,15 +57,12 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	case 2:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_2x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_2x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_2x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -93,15 +90,12 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	case 3:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_3x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_3x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_3x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -129,15 +123,12 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	case 4:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_4x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_4x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_4x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -165,15 +156,12 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	case 5:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_5x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_5x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_5x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -201,15 +189,12 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	case 6:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_6x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_6x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_6x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -237,15 +222,12 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	case 7:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_7x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_7x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_7x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -273,15 +255,12 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	case 8:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_8x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_8x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_8x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -309,15 +288,12 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	case 9:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_9x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_9x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_9x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -345,15 +321,12 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	case 10:
 		switch len(out) {
 		case 1:
-			n = (n >> 6) << 6
 			mulAvxTwo_10x1_64(matrix, in, out, start, n)
 			return n
 		case 2:
-			n = (n >> 6) << 6
 			mulAvxTwo_10x2_64(matrix, in, out, start, n)
 			return n
 		case 3:
-			n = (n >> 6) << 6
 			mulAvxTwo_10x3_64(matrix, in, out, start, n)
 			return n
 		case 4:
@@ -381,3 +354,1017 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	}
 	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
 }
+
+func galMulSlicesAvx2Xor(matrix []byte, in, out [][]byte, start, stop int) int {
+	n := (stop - start) & avxSizeMask
+
+	switch len(in) {
+	case 1:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_1x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_1x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_1x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_1x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_1x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_1x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_1x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_1x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_1x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_1x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 2:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_2x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_2x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_2x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_2x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_2x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_2x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_2x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_2x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_2x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_2x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 3:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_3x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_3x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_3x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_3x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_3x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_3x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_3x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_3x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_3x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_3x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 4:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_4x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_4x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_4x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_4x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_4x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_4x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_4x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_4x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_4x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_4x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 5:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_5x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_5x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_5x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_5x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_5x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_5x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_5x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_5x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_5x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_5x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 6:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_6x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_6x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_6x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_6x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_6x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_6x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_6x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_6x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_6x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_6x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 7:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_7x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_7x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_7x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_7x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_7x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_7x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_7x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_7x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_7x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_7x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 8:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_8x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_8x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_8x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_8x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_8x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_8x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_8x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_8x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_8x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_8x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 9:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_9x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_9x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_9x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_9x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_9x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_9x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_9x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_9x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_9x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_9x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 10:
+		switch len(out) {
+		case 1:
+			mulAvxTwo_10x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulAvxTwo_10x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulAvxTwo_10x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulAvxTwo_10x4Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulAvxTwo_10x5Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulAvxTwo_10x6Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulAvxTwo_10x7Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulAvxTwo_10x8Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulAvxTwo_10x9Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulAvxTwo_10x10Xor(matrix, in, out, start, n)
+			return n
+		}
+	}
+	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
+}
+
+func galMulSlicesGFNI(matrix []uint64, in, out [][]byte, start, stop int) int {
+	n := (stop - start) & avxSizeMask
+
+	switch len(in) {
+	case 1:
+		switch len(out) {
+		case 1:
+			mulGFNI_1x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_1x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_1x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_1x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_1x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_1x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_1x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_1x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_1x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_1x10_64(matrix, in, out, start, n)
+			return n
+		}
+	case 2:
+		switch len(out) {
+		case 1:
+			mulGFNI_2x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_2x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_2x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_2x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_2x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_2x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_2x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_2x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_2x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_2x10_64(matrix, in, out, start, n)
+			return n
+		}
+	case 3:
+		switch len(out) {
+		case 1:
+			mulGFNI_3x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_3x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_3x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_3x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_3x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_3x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_3x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_3x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_3x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_3x10_64(matrix, in, out, start, n)
+			return n
+		}
+	case 4:
+		switch len(out) {
+		case 1:
+			mulGFNI_4x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_4x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_4x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_4x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_4x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_4x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_4x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_4x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_4x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_4x10_64(matrix, in, out, start, n)
+			return n
+		}
+	case 5:
+		switch len(out) {
+		case 1:
+			mulGFNI_5x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_5x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_5x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_5x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_5x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_5x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_5x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_5x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_5x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_5x10_64(matrix, in, out, start, n)
+			return n
+		}
+	case 6:
+		switch len(out) {
+		case 1:
+			mulGFNI_6x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_6x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_6x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_6x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_6x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_6x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_6x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_6x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_6x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_6x10_64(matrix, in, out, start, n)
+			return n
+		}
+	case 7:
+		switch len(out) {
+		case 1:
+			mulGFNI_7x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_7x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_7x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_7x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_7x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_7x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_7x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_7x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_7x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_7x10_64(matrix, in, out, start, n)
+			return n
+		}
+	case 8:
+		switch len(out) {
+		case 1:
+			mulGFNI_8x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_8x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_8x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_8x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_8x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_8x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_8x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_8x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_8x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_8x10_64(matrix, in, out, start, n)
+			return n
+		}
+	case 9:
+		switch len(out) {
+		case 1:
+			mulGFNI_9x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_9x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_9x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_9x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_9x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_9x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_9x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_9x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_9x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_9x10_64(matrix, in, out, start, n)
+			return n
+		}
+	case 10:
+		switch len(out) {
+		case 1:
+			mulGFNI_10x1_64(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_10x2_64(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_10x3_64(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_10x4_64(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_10x5_64(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_10x6_64(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_10x7_64(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_10x8_64(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_10x9_64(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_10x10_64(matrix, in, out, start, n)
+			return n
+		}
+	}
+	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
+}
+
+func galMulSlicesGFNIXor(matrix []uint64, in, out [][]byte, start, stop int) int {
+	n := (stop - start) & avxSizeMask
+
+	switch len(in) {
+	case 1:
+		switch len(out) {
+		case 1:
+			mulGFNI_1x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_1x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_1x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_1x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_1x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_1x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_1x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_1x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_1x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_1x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 2:
+		switch len(out) {
+		case 1:
+			mulGFNI_2x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_2x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_2x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_2x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_2x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_2x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_2x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_2x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_2x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_2x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 3:
+		switch len(out) {
+		case 1:
+			mulGFNI_3x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_3x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_3x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_3x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_3x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_3x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_3x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_3x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_3x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_3x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 4:
+		switch len(out) {
+		case 1:
+			mulGFNI_4x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_4x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_4x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_4x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_4x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_4x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_4x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_4x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_4x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_4x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 5:
+		switch len(out) {
+		case 1:
+			mulGFNI_5x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_5x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_5x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_5x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_5x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_5x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_5x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_5x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_5x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_5x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 6:
+		switch len(out) {
+		case 1:
+			mulGFNI_6x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_6x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_6x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_6x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_6x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_6x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_6x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_6x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_6x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_6x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 7:
+		switch len(out) {
+		case 1:
+			mulGFNI_7x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_7x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_7x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_7x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_7x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_7x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_7x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_7x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_7x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_7x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 8:
+		switch len(out) {
+		case 1:
+			mulGFNI_8x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_8x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_8x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_8x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_8x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_8x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_8x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_8x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_8x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_8x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 9:
+		switch len(out) {
+		case 1:
+			mulGFNI_9x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_9x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_9x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_9x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_9x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_9x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_9x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_9x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_9x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_9x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	case 10:
+		switch len(out) {
+		case 1:
+			mulGFNI_10x1_64Xor(matrix, in, out, start, n)
+			return n
+		case 2:
+			mulGFNI_10x2_64Xor(matrix, in, out, start, n)
+			return n
+		case 3:
+			mulGFNI_10x3_64Xor(matrix, in, out, start, n)
+			return n
+		case 4:
+			mulGFNI_10x4_64Xor(matrix, in, out, start, n)
+			return n
+		case 5:
+			mulGFNI_10x5_64Xor(matrix, in, out, start, n)
+			return n
+		case 6:
+			mulGFNI_10x6_64Xor(matrix, in, out, start, n)
+			return n
+		case 7:
+			mulGFNI_10x7_64Xor(matrix, in, out, start, n)
+			return n
+		case 8:
+			mulGFNI_10x8_64Xor(matrix, in, out, start, n)
+			return n
+		case 9:
+			mulGFNI_10x9_64Xor(matrix, in, out, start, n)
+			return n
+		case 10:
+			mulGFNI_10x10_64Xor(matrix, in, out, start, n)
+			return n
+		}
+	}
+	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
+}
diff --git a/galois_noasm.go b/galois_noasm.go
index 1d00e06..9043601 100644
--- a/galois_noasm.go
+++ b/galois_noasm.go
@@ -1,6 +1,7 @@
-//+build !amd64 noasm appengine gccgo
-//+build !arm64 noasm appengine gccgo
-//+build !ppc64le noasm appengine gccgo
+//go:build (!amd64 || noasm || appengine || gccgo) && (!arm64 || noasm || appengine || gccgo) && (!ppc64le || noasm || appengine || gccgo)
+// +build !amd64 noasm appengine gccgo
+// +build !arm64 noasm appengine gccgo
+// +build !ppc64le noasm appengine gccgo
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 
@@ -21,9 +22,7 @@ func galMulSlice(c byte, in, out []byte, o *options) {
 func galMulSliceXor(c byte, in, out []byte, o *options) {
 	out = out[:len(in)]
 	if c == 1 {
-		for n, input := range in {
-			out[n] ^= input
-		}
+		sliceXor(in, out, o)
 		return
 	}
 	mt := mulTable[c][:256]
@@ -32,13 +31,67 @@ func galMulSliceXor(c byte, in, out []byte, o *options) {
 	}
 }
 
-// slice galois add
+// simple slice xor
 func sliceXor(in, out []byte, o *options) {
-	for n, input := range in {
-		out[n] ^= input
-	}
+	sliceXorGo(in, out, o)
 }
 
 func init() {
 	defaultOptions.useAVX512 = false
 }
+
+// 4-way butterfly
+func ifftDIT4(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	ifftDIT4Ref(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func ifftDIT48(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	ifftDIT4Ref8(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func fftDIT4(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	fftDIT4Ref(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func fftDIT48(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	fftDIT4Ref8(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 2-way butterfly forward
+func fftDIT2(x, y []byte, log_m ffe, o *options) {
+	// Reference version:
+	refMulAdd(x, y, log_m)
+	sliceXorGo(x, y, o)
+}
+
+// 2-way butterfly forward
+func fftDIT28(x, y []byte, log_m ffe8, o *options) {
+	// Reference version:
+	refMulAdd8(x, y, log_m)
+	sliceXorGo(x, y, o)
+}
+
+// 2-way butterfly inverse
+func ifftDIT2(x, y []byte, log_m ffe, o *options) {
+	// Reference version:
+	sliceXorGo(x, y, o)
+	refMulAdd(x, y, log_m)
+}
+
+// 2-way butterfly inverse
+func ifftDIT28(x, y []byte, log_m ffe8, o *options) {
+	// Reference version:
+	sliceXorGo(x, y, o)
+	refMulAdd8(x, y, log_m)
+}
+
+func mulgf16(x, y []byte, log_m ffe, o *options) {
+	refMul(x, y, log_m)
+}
+
+func mulgf8(x, y []byte, log_m ffe8, o *options) {
+	refMul8(x, y, log_m)
+}
diff --git a/galois_notamd64.go b/galois_notamd64.go
index bd15e3a..e67905b 100644
--- a/galois_notamd64.go
+++ b/galois_notamd64.go
@@ -1,13 +1,14 @@
-//+build !amd64 noasm appengine gccgo
+//go:build !amd64 || noasm || appengine || gccgo
+// +build !amd64 noasm appengine gccgo
 
 // Copyright 2020, Klaus Post, see LICENSE for details.
 
 package reedsolomon
 
-func (r *reedSolomon) codeSomeShardsAvx512(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
+func (r *reedSolomon) codeSomeShardsAvx512(matrixRows, inputs, outputs [][]byte, byteCount int) {
 	panic("codeSomeShardsAvx512 should not be called if built without asm")
 }
 
-func (r *reedSolomon) codeSomeShardsAvx512P(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
+func (r *reedSolomon) codeSomeShardsAvx512P(matrixRows, inputs, outputs [][]byte, byteCount int) {
 	panic("codeSomeShardsAvx512P should not be called if built without asm")
 }
diff --git a/galois_ppc64le.go b/galois_ppc64le.go
index 70f93d6..8cd7b52 100644
--- a/galois_ppc64le.go
+++ b/galois_ppc64le.go
@@ -1,6 +1,5 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
+//go:build !noasm && !appengine && !gccgo
+// +build !noasm,!appengine,!gccgo
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 // Copyright 2018, Minio, Inc.
@@ -69,7 +68,83 @@ func galMulSliceXor(c byte, in, out []byte, o *options) {
 
 // slice galois add
 func sliceXor(in, out []byte, o *options) {
-	for n, input := range in {
-		out[n] ^= input
+	sliceXorGo(in, out, o)
+}
+
+// 4-way butterfly
+func ifftDIT4(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	ifftDIT4Ref(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func ifftDIT48(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	ifftDIT4Ref8(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func fftDIT4(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	fftDIT4Ref(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 4-way butterfly
+func fftDIT48(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	fftDIT4Ref8(work, dist, log_m01, log_m23, log_m02, o)
+}
+
+// 2-way butterfly forward
+func fftDIT2(x, y []byte, log_m ffe, o *options) {
+	// Reference version:
+	refMulAdd(x, y, log_m)
+	sliceXorGo(x, y, o)
+}
+
+// 2-way butterfly forward
+func fftDIT28(x, y []byte, log_m ffe8, o *options) {
+	// Reference version:
+	mulAdd8(x, y, log_m, o)
+	sliceXorGo(x, y, o)
+}
+
+// 2-way butterfly inverse
+func ifftDIT2(x, y []byte, log_m ffe, o *options) {
+	// Reference version:
+	sliceXorGo(x, y, o)
+	refMulAdd(x, y, log_m)
+}
+
+// 2-way butterfly inverse
+func ifftDIT28(x, y []byte, log_m ffe8, o *options) {
+	// Reference version:
+	sliceXorGo(x, y, o)
+	mulAdd8(x, y, log_m, o)
+}
+
+func mulgf16(x, y []byte, log_m ffe, o *options) {
+	refMul(x, y, log_m)
+}
+
+func mulAdd8(out, in []byte, log_m ffe8, o *options) {
+	t := &multiply256LUT8[log_m]
+	galMulPpcXor(t[:16], t[16:32], in, out)
+	done := (len(in) >> 4) << 4
+	in = in[done:]
+	if len(in) > 0 {
+		out = out[done:]
+		refMulAdd8(in, out, log_m)
+	}
+}
+
+func mulgf8(out, in []byte, log_m ffe8, o *options) {
+	var done int
+	t := &multiply256LUT8[log_m]
+	galMulPpc(t[:16], t[16:32], in, out)
+	done = (len(in) >> 4) << 4
+
+	remain := len(in) - done
+	if remain > 0 {
+		mt := mul8LUTs[log_m].Value[:]
+		for i := done; i < len(in); i++ {
+			out[i] ^= byte(mt[in[i]])
+		}
 	}
 }
diff --git a/gentables.go b/gentables.go
index 843aade..b194c4a 100644
--- a/gentables.go
+++ b/gentables.go
@@ -1,4 +1,5 @@
-//+build ignore
+//go:build ignore
+// +build ignore
 
 package main
 
diff --git a/go.mod b/go.mod
index 4920a67..98f3ca4 100644
--- a/go.mod
+++ b/go.mod
@@ -1,5 +1,14 @@
 module github.com/klauspost/reedsolomon
 
-go 1.14
+go 1.17
 
-require github.com/klauspost/cpuid/v2 v2.0.6
+require github.com/klauspost/cpuid/v2 v2.1.1
+
+require golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e // indirect
+
+
+retract (
+ v1.11.2 // https://github.com/klauspost/reedsolomon/pull/229
+ [v1.11.3, v1.11.5] // https://github.com/klauspost/reedsolomon/pull/238
+ v1.11.6 // https://github.com/klauspost/reedsolomon/issues/240
+)
diff --git a/go.sum b/go.sum
index 5b8b0f4..7e550f5 100644
--- a/go.sum
+++ b/go.sum
@@ -1,2 +1,4 @@
-github.com/klauspost/cpuid/v2 v2.0.6 h1:dQ5ueTiftKxp0gyjKSx5+8BtPWkyQbd95m8Gys/RarI=
-github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.1.1 h1:t0wUqjowdm8ezddV5k0tLWVklVuvLJpoHeb4WBdydm0=
+github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e h1:CsOuNlbOuf0mzxJIefr6Q4uAUetRUwZE4qt7VfzP+xo=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/leopard.go b/leopard.go
new file mode 100644
index 0000000..16bec4b
--- /dev/null
+++ b/leopard.go
@@ -0,0 +1,1259 @@
+package reedsolomon
+
+// This is a O(n*log n) implementation of Reed-Solomon
+// codes, ported from the C++ library https://github.com/catid/leopard.
+//
+// The implementation is based on the paper
+//
+// S.-J. Lin, T. Y. Al-Naffouri, Y. S. Han, and W.-H. Chung,
+// "Novel Polynomial Basis with Fast Fourier Transform
+// and Its Application to Reed-Solomon Erasure Codes"
+// IEEE Trans. on Information Theory, pp. 6284-6299, November, 2016.
+
+import (
+	"bytes"
+	"io"
+	"math/bits"
+	"sync"
+	"unsafe"
+
+	"github.com/klauspost/cpuid/v2"
+)
+
+// leopardFF16 is like reedSolomon but for more than 256 total shards.
+type leopardFF16 struct {
+	dataShards   int // Number of data shards, should not be modified.
+	parityShards int // Number of parity shards, should not be modified.
+	totalShards  int // Total number of shards. Calculated, and should not be modified.
+
+	workPool sync.Pool
+
+	o options
+}
+
+// newFF16 is like New, but for more than 256 total shards.
+func newFF16(dataShards, parityShards int, opt options) (*leopardFF16, error) {
+	initConstants()
+
+	if dataShards <= 0 || parityShards <= 0 {
+		return nil, ErrInvShardNum
+	}
+
+	if dataShards+parityShards > 65536 {
+		return nil, ErrMaxShardNum
+	}
+
+	r := &leopardFF16{
+		dataShards:   dataShards,
+		parityShards: parityShards,
+		totalShards:  dataShards + parityShards,
+		o:            opt,
+	}
+	return r, nil
+}
+
+var _ = Extensions(&leopardFF16{})
+
+func (r *leopardFF16) ShardSizeMultiple() int {
+	return 64
+}
+
+func (r *leopardFF16) DataShards() int {
+	return r.dataShards
+}
+
+func (r *leopardFF16) ParityShards() int {
+	return r.parityShards
+}
+
+func (r *leopardFF16) TotalShards() int {
+	return r.totalShards
+}
+
+func (r *leopardFF16) AllocAligned(each int) [][]byte {
+	return AllocAligned(r.totalShards, each)
+}
+
+type ffe uint16
+
+const (
+	bitwidth   = 16
+	order      = 1 << bitwidth
+	modulus    = order - 1
+	polynomial = 0x1002D
+)
+
+var (
+	fftSkew  *[modulus]ffe
+	logWalsh *[order]ffe
+)
+
+// Logarithm Tables
+var (
+	logLUT *[order]ffe
+	expLUT *[order]ffe
+)
+
+// Stores the partial products of x * y at offset x + y * 65536
+// Repeated accesses from the same y value are faster
+var mul16LUTs *[order]mul16LUT
+
+type mul16LUT struct {
+	// Contains Lo product as a single lookup.
+	// Should be XORed with Hi lookup for result.
+	Lo [256]ffe
+	Hi [256]ffe
+}
+
+// Stores lookup for avx2
+var multiply256LUT *[order][8 * 16]byte
+
+func (r *leopardFF16) Encode(shards [][]byte) error {
+	if len(shards) != r.totalShards {
+		return ErrTooFewShards
+	}
+
+	if err := checkShards(shards, false); err != nil {
+		return err
+	}
+	return r.encode(shards)
+}
+
+func (r *leopardFF16) encode(shards [][]byte) error {
+	shardSize := shardSize(shards)
+	if shardSize%64 != 0 {
+		return ErrInvalidShardSize
+	}
+
+	m := ceilPow2(r.parityShards)
+	var work [][]byte
+	if w, ok := r.workPool.Get().([][]byte); ok {
+		work = w
+	}
+	if cap(work) >= m*2 {
+		work = work[:m*2]
+	} else {
+		work = AllocAligned(m*2, shardSize)
+	}
+	for i := range work {
+		if cap(work[i]) < shardSize {
+			work[i] = AllocAligned(1, shardSize)[0]
+		} else {
+			work[i] = work[i][:shardSize]
+		}
+	}
+	defer r.workPool.Put(work)
+
+	mtrunc := m
+	if r.dataShards < mtrunc {
+		mtrunc = r.dataShards
+	}
+
+	skewLUT := fftSkew[m-1:]
+
+	sh := shards
+	ifftDITEncoder(
+		sh[:r.dataShards],
+		mtrunc,
+		work,
+		nil, // No xor output
+		m,
+		skewLUT,
+		&r.o,
+	)
+
+	lastCount := r.dataShards % m
+	if m >= r.dataShards {
+		goto skip_body
+	}
+
+	// For sets of m data pieces:
+	for i := m; i+m <= r.dataShards; i += m {
+		sh = sh[m:]
+		skewLUT = skewLUT[m:]
+
+		// work <- work xor IFFT(data + i, m, m + i)
+
+		ifftDITEncoder(
+			sh, // data source
+			m,
+			work[m:], // temporary workspace
+			work,     // xor destination
+			m,
+			skewLUT,
+			&r.o,
+		)
+	}
+
+	// Handle final partial set of m pieces:
+	if lastCount != 0 {
+		sh = sh[m:]
+		skewLUT = skewLUT[m:]
+
+		// work <- work xor IFFT(data + i, m, m + i)
+
+		ifftDITEncoder(
+			sh, // data source
+			lastCount,
+			work[m:], // temporary workspace
+			work,     // xor destination
+			m,
+			skewLUT,
+			&r.o,
+		)
+	}
+
+skip_body:
+	// work <- FFT(work, m, 0)
+	fftDIT(work, r.parityShards, m, fftSkew[:], &r.o)
+
+	for i, w := range work[:r.parityShards] {
+		sh := shards[i+r.dataShards]
+		if cap(sh) >= shardSize {
+			sh = append(sh[:0], w...)
+		} else {
+			sh = w
+		}
+		shards[i+r.dataShards] = sh
+	}
+
+	return nil
+}
+
+func (r *leopardFF16) EncodeIdx(dataShard []byte, idx int, parity [][]byte) error {
+	return ErrNotSupported
+}
+
+func (r *leopardFF16) Join(dst io.Writer, shards [][]byte, outSize int) error {
+	// Do we have enough shards?
+	if len(shards) < r.dataShards {
+		return ErrTooFewShards
+	}
+	shards = shards[:r.dataShards]
+
+	// Do we have enough data?
+	size := 0
+	for _, shard := range shards {
+		if shard == nil {
+			return ErrReconstructRequired
+		}
+		size += len(shard)
+
+		// Do we have enough data already?
+		if size >= outSize {
+			break
+		}
+	}
+	if size < outSize {
+		return ErrShortData
+	}
+
+	// Copy data to dst
+	write := outSize
+	for _, shard := range shards {
+		if write < len(shard) {
+			_, err := dst.Write(shard[:write])
+			return err
+		}
+		n, err := dst.Write(shard)
+		if err != nil {
+			return err
+		}
+		write -= n
+	}
+	return nil
+}
+
+func (r *leopardFF16) Update(shards [][]byte, newDatashards [][]byte) error {
+	return ErrNotSupported
+}
+
+func (r *leopardFF16) Split(data []byte) ([][]byte, error) {
+	if len(data) == 0 {
+		return nil, ErrShortData
+	}
+	if r.totalShards == 1 && len(data)&63 == 0 {
+		return [][]byte{data}, nil
+	}
+	dataLen := len(data)
+	// Calculate number of bytes per data shard.
+	perShard := (len(data) + r.dataShards - 1) / r.dataShards
+	perShard = ((perShard + 63) / 64) * 64
+	needTotal := r.totalShards * perShard
+
+	if cap(data) > len(data) {
+		if cap(data) > needTotal {
+			data = data[:needTotal]
+		} else {
+			data = data[:cap(data)]
+		}
+		clear := data[dataLen:]
+		for i := range clear {
+			clear[i] = 0
+		}
+	}
+
+	// Only allocate memory if necessary
+	var padding [][]byte
+	if len(data) < needTotal {
+		// calculate maximum number of full shards in `data` slice
+		fullShards := len(data) / perShard
+		padding = AllocAligned(r.totalShards-fullShards, perShard)
+		if dataLen > perShard*fullShards {
+			// Copy partial shards
+			copyFrom := data[perShard*fullShards : dataLen]
+			for i := range padding {
+				if len(copyFrom) <= 0 {
+					break
+				}
+				copyFrom = copyFrom[copy(padding[i], copyFrom):]
+			}
+		}
+	} else {
+		zero := data[dataLen : r.totalShards*perShard]
+		for i := range zero {
+			zero[i] = 0
+		}
+	}
+
+	// Split into equal-length shards.
+	dst := make([][]byte, r.totalShards)
+	i := 0
+	for ; i < len(dst) && len(data) >= perShard; i++ {
+		dst[i] = data[:perShard:perShard]
+		data = data[perShard:]
+	}
+
+	for j := 0; i+j < len(dst); j++ {
+		dst[i+j] = padding[0]
+		padding = padding[1:]
+	}
+
+	return dst, nil
+}
+
+func (r *leopardFF16) ReconstructSome(shards [][]byte, required []bool) error {
+	return r.ReconstructData(shards)
+}
+
+func (r *leopardFF16) Reconstruct(shards [][]byte) error {
+	return r.reconstruct(shards, true)
+}
+
+func (r *leopardFF16) ReconstructData(shards [][]byte) error {
+	return r.reconstruct(shards, false)
+}
+
+func (r *leopardFF16) Verify(shards [][]byte) (bool, error) {
+	if len(shards) != r.totalShards {
+		return false, ErrTooFewShards
+	}
+	if err := checkShards(shards, false); err != nil {
+		return false, err
+	}
+
+	// Re-encode parity shards to temporary storage.
+	shardSize := len(shards[0])
+	outputs := make([][]byte, r.totalShards)
+	copy(outputs, shards[:r.dataShards])
+	for i := r.dataShards; i < r.totalShards; i++ {
+		outputs[i] = make([]byte, shardSize)
+	}
+	if err := r.Encode(outputs); err != nil {
+		return false, err
+	}
+
+	// Compare.
+	for i := r.dataShards; i < r.totalShards; i++ {
+		if !bytes.Equal(outputs[i], shards[i]) {
+			return false, nil
+		}
+	}
+	return true, nil
+}
+
+func (r *leopardFF16) reconstruct(shards [][]byte, recoverAll bool) error {
+	if len(shards) != r.totalShards {
+		return ErrTooFewShards
+	}
+
+	if err := checkShards(shards, true); err != nil {
+		return err
+	}
+
+	// Quick check: are all of the shards present?  If so, there's
+	// nothing to do.
+	numberPresent := 0
+	dataPresent := 0
+	for i := 0; i < r.totalShards; i++ {
+		if len(shards[i]) != 0 {
+			numberPresent++
+			if i < r.dataShards {
+				dataPresent++
+			}
+		}
+	}
+	if numberPresent == r.totalShards || !recoverAll && dataPresent == r.dataShards {
+		// Cool. All of the shards have data. We don't
+		// need to do anything.
+		return nil
+	}
+
+	// Use only if we are missing less than 1/4 parity.
+	useBits := r.totalShards-numberPresent <= r.parityShards/4
+
+	// Check if we have enough to reconstruct.
+	if numberPresent < r.dataShards {
+		return ErrTooFewShards
+	}
+
+	shardSize := shardSize(shards)
+	if shardSize%64 != 0 {
+		return ErrInvalidShardSize
+	}
+
+	m := ceilPow2(r.parityShards)
+	n := ceilPow2(m + r.dataShards)
+
+	const LEO_ERROR_BITFIELD_OPT = true
+
+	// Fill in error locations.
+	var errorBits errorBitfield
+	var errLocs [order]ffe
+	for i := 0; i < r.parityShards; i++ {
+		if len(shards[i+r.dataShards]) == 0 {
+			errLocs[i] = 1
+			if LEO_ERROR_BITFIELD_OPT && recoverAll {
+				errorBits.set(i)
+			}
+		}
+	}
+	for i := r.parityShards; i < m; i++ {
+		errLocs[i] = 1
+		if LEO_ERROR_BITFIELD_OPT && recoverAll {
+			errorBits.set(i)
+		}
+	}
+	for i := 0; i < r.dataShards; i++ {
+		if len(shards[i]) == 0 {
+			errLocs[i+m] = 1
+			if LEO_ERROR_BITFIELD_OPT {
+				errorBits.set(i + m)
+			}
+		}
+	}
+
+	if LEO_ERROR_BITFIELD_OPT && useBits {
+		errorBits.prepare()
+	}
+
+	// Evaluate error locator polynomial
+	fwht(&errLocs, order, m+r.dataShards)
+
+	for i := 0; i < order; i++ {
+		errLocs[i] = ffe((uint(errLocs[i]) * uint(logWalsh[i])) % modulus)
+	}
+
+	fwht(&errLocs, order, order)
+
+	var work [][]byte
+	if w, ok := r.workPool.Get().([][]byte); ok {
+		work = w
+	}
+	if cap(work) >= n {
+		work = work[:n]
+	} else {
+		work = make([][]byte, n)
+	}
+	for i := range work {
+		if cap(work[i]) < shardSize {
+			work[i] = make([]byte, shardSize)
+		} else {
+			work[i] = work[i][:shardSize]
+		}
+	}
+	defer r.workPool.Put(work)
+
+	// work <- recovery data
+
+	for i := 0; i < r.parityShards; i++ {
+		if len(shards[i+r.dataShards]) != 0 {
+			mulgf16(work[i], shards[i+r.dataShards], errLocs[i], &r.o)
+		} else {
+			memclr(work[i])
+		}
+	}
+	for i := r.parityShards; i < m; i++ {
+		memclr(work[i])
+	}
+
+	// work <- original data
+
+	for i := 0; i < r.dataShards; i++ {
+		if len(shards[i]) != 0 {
+			mulgf16(work[m+i], shards[i], errLocs[m+i], &r.o)
+		} else {
+			memclr(work[m+i])
+		}
+	}
+	for i := m + r.dataShards; i < n; i++ {
+		memclr(work[i])
+	}
+
+	// work <- IFFT(work, n, 0)
+
+	ifftDITDecoder(
+		m+r.dataShards,
+		work,
+		n,
+		fftSkew[:],
+		&r.o,
+	)
+
+	// work <- FormalDerivative(work, n)
+
+	for i := 1; i < n; i++ {
+		width := ((i ^ (i - 1)) + 1) >> 1
+		slicesXor(work[i-width:i], work[i:i+width], &r.o)
+	}
+
+	// work <- FFT(work, n, 0) truncated to m + dataShards
+
+	outputCount := m + r.dataShards
+
+	if LEO_ERROR_BITFIELD_OPT && useBits {
+		errorBits.fftDIT(work, outputCount, n, fftSkew[:], &r.o)
+	} else {
+		fftDIT(work, outputCount, n, fftSkew[:], &r.o)
+	}
+
+	// Reveal erasures
+	//
+	//  Original = -ErrLocator * FFT( Derivative( IFFT( ErrLocator * ReceivedData ) ) )
+	//  mul_mem(x, y, log_m, ) equals x[] = y[] * log_m
+	//
+	// mem layout: [Recovery Data (Power of Two = M)] [Original Data (K)] [Zero Padding out to N]
+	end := r.dataShards
+	if recoverAll {
+		end = r.totalShards
+	}
+	for i := 0; i < end; i++ {
+		if len(shards[i]) != 0 {
+			continue
+		}
+		if cap(shards[i]) >= shardSize {
+			shards[i] = shards[i][:shardSize]
+		} else {
+			shards[i] = make([]byte, shardSize)
+		}
+		if i >= r.dataShards {
+			// Parity shard.
+			mulgf16(shards[i], work[i-r.dataShards], modulus-errLocs[i-r.dataShards], &r.o)
+		} else {
+			// Data shard.
+			mulgf16(shards[i], work[i+m], modulus-errLocs[i+m], &r.o)
+		}
+	}
+	return nil
+}
+
+// Basic no-frills version for decoder
+func ifftDITDecoder(mtrunc int, work [][]byte, m int, skewLUT []ffe, o *options) {
+	// Decimation in time: Unroll 2 layers at a time
+	dist := 1
+	dist4 := 4
+	for dist4 <= m {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			iend := r + dist
+			log_m01 := skewLUT[iend-1]
+			log_m02 := skewLUT[iend+dist-1]
+			log_m23 := skewLUT[iend+dist*2-1]
+
+			// For each set of dist elements:
+			for i := r; i < iend; i++ {
+				ifftDIT4(work[i:], dist, log_m01, log_m23, log_m02, o)
+			}
+		}
+		dist = dist4
+		dist4 <<= 2
+	}
+
+	// If there is one layer left:
+	if dist < m {
+		// Assuming that dist = m / 2
+		if dist*2 != m {
+			panic("internal error")
+		}
+
+		log_m := skewLUT[dist-1]
+
+		if log_m == modulus {
+			slicesXor(work[dist:2*dist], work[:dist], o)
+		} else {
+			for i := 0; i < dist; i++ {
+				ifftDIT2(
+					work[i],
+					work[i+dist],
+					log_m,
+					o,
+				)
+			}
+		}
+	}
+}
+
+// In-place FFT for encoder and decoder
+func fftDIT(work [][]byte, mtrunc, m int, skewLUT []ffe, o *options) {
+	// Decimation in time: Unroll 2 layers at a time
+	dist4 := m
+	dist := m >> 2
+	for dist != 0 {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			iend := r + dist
+			log_m01 := skewLUT[iend-1]
+			log_m02 := skewLUT[iend+dist-1]
+			log_m23 := skewLUT[iend+dist*2-1]
+
+			// For each set of dist elements:
+			for i := r; i < iend; i++ {
+				fftDIT4(
+					work[i:],
+					dist,
+					log_m01,
+					log_m23,
+					log_m02,
+					o,
+				)
+			}
+		}
+		dist4 = dist
+		dist >>= 2
+	}
+
+	// If there is one layer left:
+	if dist4 == 2 {
+		for r := 0; r < mtrunc; r += 2 {
+			log_m := skewLUT[r+1-1]
+
+			if log_m == modulus {
+				sliceXor(work[r], work[r+1], o)
+			} else {
+				fftDIT2(work[r], work[r+1], log_m, o)
+			}
+		}
+	}
+}
+
+// 4-way butterfly
+func fftDIT4Ref(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	// First layer:
+	if log_m02 == modulus {
+		sliceXor(work[0], work[dist*2], o)
+		sliceXor(work[dist], work[dist*3], o)
+	} else {
+		fftDIT2(work[0], work[dist*2], log_m02, o)
+		fftDIT2(work[dist], work[dist*3], log_m02, o)
+	}
+
+	// Second layer:
+	if log_m01 == modulus {
+		sliceXor(work[0], work[dist], o)
+	} else {
+		fftDIT2(work[0], work[dist], log_m01, o)
+	}
+
+	if log_m23 == modulus {
+		sliceXor(work[dist*2], work[dist*3], o)
+	} else {
+		fftDIT2(work[dist*2], work[dist*3], log_m23, o)
+	}
+}
+
+// Unrolled IFFT for encoder
+func ifftDITEncoder(data [][]byte, mtrunc int, work [][]byte, xorRes [][]byte, m int, skewLUT []ffe, o *options) {
+	// I tried rolling the memcpy/memset into the first layer of the FFT and
+	// found that it only yields a 4% performance improvement, which is not
+	// worth the extra complexity.
+	for i := 0; i < mtrunc; i++ {
+		copy(work[i], data[i])
+	}
+	for i := mtrunc; i < m; i++ {
+		memclr(work[i])
+	}
+
+	// I tried splitting up the first few layers into L3-cache sized blocks but
+	// found that it only provides about 5% performance boost, which is not
+	// worth the extra complexity.
+
+	// Decimation in time: Unroll 2 layers at a time
+	dist := 1
+	dist4 := 4
+	for dist4 <= m {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			iend := r + dist
+			log_m01 := skewLUT[iend]
+			log_m02 := skewLUT[iend+dist]
+			log_m23 := skewLUT[iend+dist*2]
+
+			// For each set of dist elements:
+			for i := r; i < iend; i++ {
+				ifftDIT4(
+					work[i:],
+					dist,
+					log_m01,
+					log_m23,
+					log_m02,
+					o,
+				)
+			}
+		}
+
+		dist = dist4
+		dist4 <<= 2
+		// I tried alternating sweeps left->right and right->left to reduce cache misses.
+		// It provides about 1% performance boost when done for both FFT and IFFT, so it
+		// does not seem to be worth the extra complexity.
+	}
+
+	// If there is one layer left:
+	if dist < m {
+		// Assuming that dist = m / 2
+		if dist*2 != m {
+			panic("internal error")
+		}
+
+		logm := skewLUT[dist]
+
+		if logm == modulus {
+			slicesXor(work[dist:dist*2], work[:dist], o)
+		} else {
+			for i := 0; i < dist; i++ {
+				ifftDIT2(work[i], work[i+dist], logm, o)
+			}
+		}
+	}
+
+	// I tried unrolling this but it does not provide more than 5% performance
+	// improvement for 16-bit finite fields, so it's not worth the complexity.
+	if xorRes != nil {
+		slicesXor(xorRes[:m], work[:m], o)
+	}
+}
+
+func ifftDIT4Ref(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe, o *options) {
+	// First layer:
+	if log_m01 == modulus {
+		sliceXor(work[0], work[dist], o)
+	} else {
+		ifftDIT2(work[0], work[dist], log_m01, o)
+	}
+
+	if log_m23 == modulus {
+		sliceXor(work[dist*2], work[dist*3], o)
+	} else {
+		ifftDIT2(work[dist*2], work[dist*3], log_m23, o)
+	}
+
+	// Second layer:
+	if log_m02 == modulus {
+		sliceXor(work[0], work[dist*2], o)
+		sliceXor(work[dist], work[dist*3], o)
+	} else {
+		ifftDIT2(work[0], work[dist*2], log_m02, o)
+		ifftDIT2(work[dist], work[dist*3], log_m02, o)
+	}
+}
+
+// Reference version of muladd: x[] ^= y[] * log_m
+func refMulAdd(x, y []byte, log_m ffe) {
+	lut := &mul16LUTs[log_m]
+
+	for len(x) >= 64 {
+		// Assert sizes for no bounds checks in loop
+		hiA := y[32:64]
+		loA := y[:32]
+		dst := x[:64] // Needed, but not checked...
+		for i, lo := range loA {
+			hi := hiA[i]
+			prod := lut.Lo[lo] ^ lut.Hi[hi]
+
+			dst[i] ^= byte(prod)
+			dst[i+32] ^= byte(prod >> 8)
+		}
+		x = x[64:]
+		y = y[64:]
+	}
+}
+
+func memclr(s []byte) {
+	for i := range s {
+		s[i] = 0
+	}
+}
+
+// slicesXor calls xor for every slice pair in v1, v2.
+func slicesXor(v1, v2 [][]byte, o *options) {
+	for i, v := range v1 {
+		sliceXor(v2[i], v, o)
+	}
+}
+
+// Reference version of mul: x[] = y[] * log_m
+func refMul(x, y []byte, log_m ffe) {
+	lut := &mul16LUTs[log_m]
+
+	for off := 0; off < len(x); off += 64 {
+		loA := y[off : off+32]
+		hiA := y[off+32:]
+		hiA = hiA[:len(loA)]
+		for i, lo := range loA {
+			hi := hiA[i]
+			prod := lut.Lo[lo] ^ lut.Hi[hi]
+
+			x[off+i] = byte(prod)
+			x[off+i+32] = byte(prod >> 8)
+		}
+	}
+}
+
+// Returns a * Log(b)
+func mulLog(a, log_b ffe) ffe {
+	/*
+	   Note that this operation is not a normal multiplication in a finite
+	   field because the right operand is already a logarithm.  This is done
+	   because it moves K table lookups from the Decode() method into the
+	   initialization step that is less performance critical.  The LogWalsh[]
+	   table below contains precalculated logarithms so it is easier to do
+	   all the other multiplies in that form as well.
+	*/
+	if a == 0 {
+		return 0
+	}
+	return expLUT[addMod(logLUT[a], log_b)]
+}
+
+// z = x + y (mod kModulus)
+func addMod(a, b ffe) ffe {
+	sum := uint(a) + uint(b)
+
+	// Partial reduction step, allowing for kModulus to be returned
+	return ffe(sum + sum>>bitwidth)
+}
+
+// z = x - y (mod kModulus)
+func subMod(a, b ffe) ffe {
+	dif := uint(a) - uint(b)
+
+	// Partial reduction step, allowing for kModulus to be returned
+	return ffe(dif + dif>>bitwidth)
+}
+
+// ceilPow2 returns power of two at or above n.
+func ceilPow2(n int) int {
+	const w = int(unsafe.Sizeof(n) * 8)
+	return 1 << (w - bits.LeadingZeros(uint(n-1)))
+}
+
+// Decimation in time (DIT) Fast Walsh-Hadamard Transform
+// Unrolls pairs of layers to perform cross-layer operations in registers
+// mtrunc: Number of elements that are non-zero at the front of data
+func fwht(data *[order]ffe, m, mtrunc int) {
+	// Decimation in time: Unroll 2 layers at a time
+	dist := 1
+	dist4 := 4
+	for dist4 <= m {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			// For each set of dist elements:
+			// Use 16 bit indices to avoid bounds check on [65536]ffe.
+			dist := uint16(dist)
+			off := uint16(r)
+			for i := uint16(0); i < dist; i++ {
+				// fwht4(data[i:], dist) inlined...
+				// Reading values appear faster than updating pointers.
+				// Casting to uint is not faster.
+				t0 := data[off]
+				t1 := data[off+dist]
+				t2 := data[off+dist*2]
+				t3 := data[off+dist*3]
+
+				t0, t1 = fwht2alt(t0, t1)
+				t2, t3 = fwht2alt(t2, t3)
+				t0, t2 = fwht2alt(t0, t2)
+				t1, t3 = fwht2alt(t1, t3)
+
+				data[off] = t0
+				data[off+dist] = t1
+				data[off+dist*2] = t2
+				data[off+dist*3] = t3
+				off++
+			}
+		}
+		dist = dist4
+		dist4 <<= 2
+	}
+
+	// If there is one layer left:
+	if dist < m {
+		dist := uint16(dist)
+		for i := uint16(0); i < dist; i++ {
+			fwht2(&data[i], &data[i+dist])
+		}
+	}
+}
+
+func fwht4(data []ffe, s int) {
+	s2 := s << 1
+
+	t0 := &data[0]
+	t1 := &data[s]
+	t2 := &data[s2]
+	t3 := &data[s2+s]
+
+	fwht2(t0, t1)
+	fwht2(t2, t3)
+	fwht2(t0, t2)
+	fwht2(t1, t3)
+}
+
+// {a, b} = {a + b, a - b} (Mod Q)
+func fwht2(a, b *ffe) {
+	sum := addMod(*a, *b)
+	dif := subMod(*a, *b)
+	*a = sum
+	*b = dif
+}
+
+// fwht2alt is as fwht2, but returns result.
+func fwht2alt(a, b ffe) (ffe, ffe) {
+	return addMod(a, b), subMod(a, b)
+}
+
+var initOnce sync.Once
+
+func initConstants() {
+	initOnce.Do(func() {
+		initLUTs()
+		initFFTSkew()
+		initMul16LUT()
+	})
+}
+
+// Initialize logLUT, expLUT.
+func initLUTs() {
+	cantorBasis := [bitwidth]ffe{
+		0x0001, 0xACCA, 0x3C0E, 0x163E,
+		0xC582, 0xED2E, 0x914C, 0x4012,
+		0x6C98, 0x10D8, 0x6A72, 0xB900,
+		0xFDB8, 0xFB34, 0xFF38, 0x991E,
+	}
+
+	expLUT = &[order]ffe{}
+	logLUT = &[order]ffe{}
+
+	// LFSR table generation:
+	state := 1
+	for i := ffe(0); i < modulus; i++ {
+		expLUT[state] = i
+		state <<= 1
+		if state >= order {
+			state ^= polynomial
+		}
+	}
+	expLUT[0] = modulus
+
+	// Conversion to Cantor basis:
+
+	logLUT[0] = 0
+	for i := 0; i < bitwidth; i++ {
+		basis := cantorBasis[i]
+		width := 1 << i
+
+		for j := 0; j < width; j++ {
+			logLUT[j+width] = logLUT[j] ^ basis
+		}
+	}
+
+	for i := 0; i < order; i++ {
+		logLUT[i] = expLUT[logLUT[i]]
+	}
+
+	for i := 0; i < order; i++ {
+		expLUT[logLUT[i]] = ffe(i)
+	}
+
+	expLUT[modulus] = expLUT[0]
+}
+
+// Initialize fftSkew.
+func initFFTSkew() {
+	var temp [bitwidth - 1]ffe
+
+	// Generate FFT skew vector {1}:
+
+	for i := 1; i < bitwidth; i++ {
+		temp[i-1] = ffe(1 << i)
+	}
+
+	fftSkew = &[modulus]ffe{}
+	logWalsh = &[order]ffe{}
+
+	for m := 0; m < bitwidth-1; m++ {
+		step := 1 << (m + 1)
+
+		fftSkew[1<<m-1] = 0
+
+		for i := m; i < bitwidth-1; i++ {
+			s := 1 << (i + 1)
+
+			for j := 1<<m - 1; j < s; j += step {
+				fftSkew[j+s] = fftSkew[j] ^ temp[i]
+			}
+		}
+
+		temp[m] = modulus - logLUT[mulLog(temp[m], logLUT[temp[m]^1])]
+
+		for i := m + 1; i < bitwidth-1; i++ {
+			sum := addMod(logLUT[temp[i]^1], temp[m])
+			temp[i] = mulLog(temp[i], sum)
+		}
+	}
+
+	for i := 0; i < modulus; i++ {
+		fftSkew[i] = logLUT[fftSkew[i]]
+	}
+
+	// Precalculate FWHT(Log[i]):
+
+	for i := 0; i < order; i++ {
+		logWalsh[i] = logLUT[i]
+	}
+	logWalsh[0] = 0
+
+	fwht(logWalsh, order, order)
+}
+
+func initMul16LUT() {
+	mul16LUTs = &[order]mul16LUT{}
+
+	// For each log_m multiplicand:
+	for log_m := 0; log_m < order; log_m++ {
+		var tmp [64]ffe
+		for nibble, shift := 0, 0; nibble < 4; {
+			nibble_lut := tmp[nibble*16:]
+
+			for xnibble := 0; xnibble < 16; xnibble++ {
+				prod := mulLog(ffe(xnibble<<shift), ffe(log_m))
+				nibble_lut[xnibble] = prod
+			}
+			nibble++
+			shift += 4
+		}
+		lut := &mul16LUTs[log_m]
+		for i := range lut.Lo[:] {
+			lut.Lo[i] = tmp[i&15] ^ tmp[((i>>4)+16)]
+			lut.Hi[i] = tmp[((i&15)+32)] ^ tmp[((i>>4)+48)]
+		}
+	}
+	if cpuid.CPU.Has(cpuid.SSSE3) || cpuid.CPU.Has(cpuid.AVX2) || cpuid.CPU.Has(cpuid.AVX512F) {
+		multiply256LUT = &[order][16 * 8]byte{}
+
+		for logM := range multiply256LUT[:] {
+			// For each 4 bits of the finite field width in bits:
+			shift := 0
+			for i := 0; i < 4; i++ {
+				// Construct 16 entry LUT for PSHUFB
+				prodLo := multiply256LUT[logM][i*16 : i*16+16]
+				prodHi := multiply256LUT[logM][4*16+i*16 : 4*16+i*16+16]
+				for x := range prodLo[:] {
+					prod := mulLog(ffe(x<<shift), ffe(logM))
+					prodLo[x] = byte(prod)
+					prodHi[x] = byte(prod >> 8)
+				}
+				shift += 4
+			}
+		}
+	}
+}
+
+const kWordMips = 5
+const kWords = order / 64
+const kBigMips = 6
+const kBigWords = (kWords + 63) / 64
+const kBiggestMips = 4
+
+// errorBitfield contains progressive errors to help indicate which
+// shards need reconstruction.
+type errorBitfield struct {
+	Words        [kWordMips][kWords]uint64
+	BigWords     [kBigMips][kBigWords]uint64
+	BiggestWords [kBiggestMips]uint64
+}
+
+func (e *errorBitfield) set(i int) {
+	e.Words[0][i/64] |= uint64(1) << (i & 63)
+}
+
+func (e *errorBitfield) isNeededFn(mipLevel int) func(bit int) bool {
+	if mipLevel >= 16 {
+		return func(bit int) bool {
+			return true
+		}
+	}
+	if mipLevel >= 12 {
+		w := e.BiggestWords[mipLevel-12]
+		return func(bit int) bool {
+			bit /= 4096
+			return 0 != (w & (uint64(1) << bit))
+		}
+	}
+	if mipLevel >= 6 {
+		w := e.BigWords[mipLevel-6][:]
+		return func(bit int) bool {
+			bit /= 64
+			return 0 != (w[bit/64] & (uint64(1) << (bit & 63)))
+		}
+	}
+	if mipLevel > 0 {
+		w := e.Words[mipLevel-1][:]
+		return func(bit int) bool {
+			return 0 != (w[bit/64] & (uint64(1) << (bit & 63)))
+		}
+	}
+	return nil
+}
+
+func (e *errorBitfield) isNeeded(mipLevel int, bit uint) bool {
+	if mipLevel >= 16 {
+		return true
+	}
+	if mipLevel >= 12 {
+		bit /= 4096
+		return 0 != (e.BiggestWords[mipLevel-12] & (uint64(1) << bit))
+	}
+	if mipLevel >= 6 {
+		bit /= 64
+		return 0 != (e.BigWords[mipLevel-6][bit/64] & (uint64(1) << (bit % 64)))
+	}
+	return 0 != (e.Words[mipLevel-1][bit/64] & (uint64(1) << (bit % 64)))
+}
+
+var kHiMasks = [5]uint64{
+	0xAAAAAAAAAAAAAAAA,
+	0xCCCCCCCCCCCCCCCC,
+	0xF0F0F0F0F0F0F0F0,
+	0xFF00FF00FF00FF00,
+	0xFFFF0000FFFF0000,
+}
+
+func (e *errorBitfield) prepare() {
+	// First mip level is for final layer of FFT: pairs of data
+	for i := 0; i < kWords; i++ {
+		w_i := e.Words[0][i]
+		hi2lo0 := w_i | ((w_i & kHiMasks[0]) >> 1)
+		lo2hi0 := (w_i & (kHiMasks[0] >> 1)) << 1
+		w_i = hi2lo0 | lo2hi0
+		e.Words[0][i] = w_i
+
+		bits := 2
+		for j := 1; j < kWordMips; j++ {
+			hi2lo_j := w_i | ((w_i & kHiMasks[j]) >> bits)
+			lo2hi_j := (w_i & (kHiMasks[j] >> bits)) << bits
+			w_i = hi2lo_j | lo2hi_j
+			e.Words[j][i] = w_i
+			bits <<= 1
+		}
+	}
+
+	for i := 0; i < kBigWords; i++ {
+		w_i := uint64(0)
+		bit := uint64(1)
+		src := e.Words[kWordMips-1][i*64 : i*64+64]
+		for _, w := range src {
+			w_i |= (w | (w >> 32) | (w << 32)) & bit
+			bit <<= 1
+		}
+		e.BigWords[0][i] = w_i
+
+		bits := 1
+		for j := 1; j < kBigMips; j++ {
+			hi2lo_j := w_i | ((w_i & kHiMasks[j-1]) >> bits)
+			lo2hi_j := (w_i & (kHiMasks[j-1] >> bits)) << bits
+			w_i = hi2lo_j | lo2hi_j
+			e.BigWords[j][i] = w_i
+			bits <<= 1
+		}
+	}
+
+	w_i := uint64(0)
+	bit := uint64(1)
+	for _, w := range e.BigWords[kBigMips-1][:kBigWords] {
+		w_i |= (w | (w >> 32) | (w << 32)) & bit
+		bit <<= 1
+	}
+	e.BiggestWords[0] = w_i
+
+	bits := uint64(1)
+	for j := 1; j < kBiggestMips; j++ {
+		hi2lo_j := w_i | ((w_i & kHiMasks[j-1]) >> bits)
+		lo2hi_j := (w_i & (kHiMasks[j-1] >> bits)) << bits
+		w_i = hi2lo_j | lo2hi_j
+		e.BiggestWords[j] = w_i
+		bits <<= 1
+	}
+}
+
+func (e *errorBitfield) fftDIT(work [][]byte, mtrunc, m int, skewLUT []ffe, o *options) {
+	// Decimation in time: Unroll 2 layers at a time
+	mipLevel := bits.Len32(uint32(m)) - 1
+
+	dist4 := m
+	dist := m >> 2
+	needed := e.isNeededFn(mipLevel)
+	for dist != 0 {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			if !needed(r) {
+				continue
+			}
+			iEnd := r + dist
+			logM01 := skewLUT[iEnd-1]
+			logM02 := skewLUT[iEnd+dist-1]
+			logM23 := skewLUT[iEnd+dist*2-1]
+
+			// For each set of dist elements:
+			for i := r; i < iEnd; i++ {
+				fftDIT4(
+					work[i:],
+					dist,
+					logM01,
+					logM23,
+					logM02,
+					o)
+			}
+		}
+		dist4 = dist
+		dist >>= 2
+		mipLevel -= 2
+		needed = e.isNeededFn(mipLevel)
+	}
+
+	// If there is one layer left:
+	if dist4 == 2 {
+		for r := 0; r < mtrunc; r += 2 {
+			if !needed(r) {
+				continue
+			}
+			logM := skewLUT[r+1-1]
+
+			if logM == modulus {
+				sliceXor(work[r], work[r+1], o)
+			} else {
+				fftDIT2(work[r], work[r+1], logM, o)
+			}
+		}
+	}
+}
diff --git a/leopard8.go b/leopard8.go
new file mode 100644
index 0000000..31c97ea
--- /dev/null
+++ b/leopard8.go
@@ -0,0 +1,1266 @@
+package reedsolomon
+
+// This is a O(n*log n) implementation of Reed-Solomon
+// codes, ported from the C++ library https://github.com/catid/leopard.
+//
+// The implementation is based on the paper
+//
+// S.-J. Lin, T. Y. Al-Naffouri, Y. S. Han, and W.-H. Chung,
+// "Novel Polynomial Basis with Fast Fourier Transform
+// and Its Application to Reed-Solomon Erasure Codes"
+// IEEE Trans. on Information Theory, pp. 6284-6299, November, 2016.
+
+import (
+	"bytes"
+	"encoding/binary"
+	"io"
+	"math/bits"
+	"sync"
+)
+
+// leopardFF8 is like reedSolomon but for the 8-bit "leopard" implementation.
+type leopardFF8 struct {
+	dataShards   int // Number of data shards, should not be modified.
+	parityShards int // Number of parity shards, should not be modified.
+	totalShards  int // Total number of shards. Calculated, and should not be modified.
+
+	workPool    sync.Pool
+	inversion   map[[inversion8Bytes]byte]leopardGF8cache
+	inversionMu sync.Mutex
+
+	o options
+}
+
+const inversion8Bytes = 256 / 8
+
+type leopardGF8cache struct {
+	errorLocs [256]ffe8
+	bits      *errorBitfield8
+}
+
+// newFF8 is like New, but for the 8-bit "leopard" implementation.
+func newFF8(dataShards, parityShards int, opt options) (*leopardFF8, error) {
+	initConstants8()
+
+	if dataShards <= 0 || parityShards <= 0 {
+		return nil, ErrInvShardNum
+	}
+
+	if dataShards+parityShards > 65536 {
+		return nil, ErrMaxShardNum
+	}
+
+	r := &leopardFF8{
+		dataShards:   dataShards,
+		parityShards: parityShards,
+		totalShards:  dataShards + parityShards,
+		o:            opt,
+	}
+	if opt.inversionCache && (r.totalShards <= 64 || opt.forcedInversionCache) {
+		// Inversion cache is relatively ineffective for big shard counts and takes up potentially lots of memory
+		// r.totalShards is not covering the space, but an estimate.
+		r.inversion = make(map[[inversion8Bytes]byte]leopardGF8cache, r.totalShards)
+	}
+	return r, nil
+}
+
+var _ = Extensions(&leopardFF8{})
+
+func (r *leopardFF8) ShardSizeMultiple() int {
+	return 64
+}
+
+func (r *leopardFF8) DataShards() int {
+	return r.dataShards
+}
+
+func (r *leopardFF8) ParityShards() int {
+	return r.parityShards
+}
+
+func (r *leopardFF8) TotalShards() int {
+	return r.totalShards
+}
+
+func (r *leopardFF8) AllocAligned(each int) [][]byte {
+	return AllocAligned(r.totalShards, each)
+}
+
+type ffe8 uint8
+
+const (
+	bitwidth8   = 8
+	order8      = 1 << bitwidth8
+	modulus8    = order8 - 1
+	polynomial8 = 0x11D
+
+	// Encode in blocks of this size.
+	workSize8 = 32 << 10
+)
+
+var (
+	fftSkew8  *[modulus8]ffe8
+	logWalsh8 *[order8]ffe8
+)
+
+// Logarithm Tables
+var (
+	logLUT8 *[order8]ffe8
+	expLUT8 *[order8]ffe8
+)
+
+// Stores the partial products of x * y at offset x + y * 256
+// Repeated accesses from the same y value are faster
+var mul8LUTs *[order8]mul8LUT
+
+type mul8LUT struct {
+	Value [256]ffe8
+}
+
+// Stores lookup for avx2
+var multiply256LUT8 *[order8][2 * 16]byte
+
+func (r *leopardFF8) Encode(shards [][]byte) error {
+	if len(shards) != r.totalShards {
+		return ErrTooFewShards
+	}
+
+	if err := checkShards(shards, false); err != nil {
+		return err
+	}
+	return r.encode(shards)
+}
+
+func (r *leopardFF8) encode(shards [][]byte) error {
+	shardSize := shardSize(shards)
+	if shardSize%64 != 0 {
+		return ErrInvalidShardSize
+	}
+
+	m := ceilPow2(r.parityShards)
+	var work [][]byte
+	if w, ok := r.workPool.Get().([][]byte); ok {
+		work = w
+	} else {
+		work = AllocAligned(m*2, workSize8)
+	}
+	if cap(work) >= m*2 {
+		work = work[:m*2]
+		for i := range work {
+			if i >= r.parityShards {
+				if cap(work[i]) < workSize8 {
+					work[i] = AllocAligned(1, workSize8)[0]
+				} else {
+					work[i] = work[i][:workSize8]
+				}
+			}
+		}
+	} else {
+		work = AllocAligned(m*2, workSize8)
+	}
+
+	defer r.workPool.Put(work)
+
+	mtrunc := m
+	if r.dataShards < mtrunc {
+		mtrunc = r.dataShards
+	}
+
+	skewLUT := fftSkew8[m-1:]
+
+	// Split large shards.
+	// More likely on lower shard count.
+	off := 0
+	sh := make([][]byte, len(shards))
+
+	// work slice we can modify
+	wMod := make([][]byte, len(work))
+	copy(wMod, work)
+	for off < shardSize {
+		work := wMod
+		sh := sh
+		end := off + workSize8
+		if end > shardSize {
+			end = shardSize
+			sz := shardSize - off
+			for i := range work {
+				// Last iteration only...
+				work[i] = work[i][:sz]
+			}
+		}
+		for i := range shards {
+			sh[i] = shards[i][off:end]
+		}
+
+		// Replace work slices, so we write directly to output.
+		// Note that work has parity *before* data shards.
+		res := shards[r.dataShards:r.totalShards]
+		for i := range res {
+			work[i] = res[i][off:end]
+		}
+
+		ifftDITEncoder8(
+			sh[:r.dataShards],
+			mtrunc,
+			work,
+			nil, // No xor output
+			m,
+			skewLUT,
+			&r.o,
+		)
+
+		lastCount := r.dataShards % m
+		skewLUT2 := skewLUT
+		if m >= r.dataShards {
+			goto skip_body
+		}
+
+		// For sets of m data pieces:
+		for i := m; i+m <= r.dataShards; i += m {
+			sh = sh[m:]
+			skewLUT2 = skewLUT2[m:]
+
+			// work <- work xor IFFT(data + i, m, m + i)
+
+			ifftDITEncoder8(
+				sh, // data source
+				m,
+				work[m:], // temporary workspace
+				work,     // xor destination
+				m,
+				skewLUT2,
+				&r.o,
+			)
+		}
+
+		// Handle final partial set of m pieces:
+		if lastCount != 0 {
+			sh = sh[m:]
+			skewLUT2 = skewLUT2[m:]
+
+			// work <- work xor IFFT(data + i, m, m + i)
+
+			ifftDITEncoder8(
+				sh, // data source
+				lastCount,
+				work[m:], // temporary workspace
+				work,     // xor destination
+				m,
+				skewLUT2,
+				&r.o,
+			)
+		}
+
+	skip_body:
+		// work <- FFT(work, m, 0)
+		fftDIT8(work, r.parityShards, m, fftSkew8[:], &r.o)
+		off += workSize8
+	}
+
+	return nil
+}
+
+func (r *leopardFF8) EncodeIdx(dataShard []byte, idx int, parity [][]byte) error {
+	return ErrNotSupported
+}
+
+func (r *leopardFF8) Join(dst io.Writer, shards [][]byte, outSize int) error {
+	// Do we have enough shards?
+	if len(shards) < r.dataShards {
+		return ErrTooFewShards
+	}
+	shards = shards[:r.dataShards]
+
+	// Do we have enough data?
+	size := 0
+	for _, shard := range shards {
+		if shard == nil {
+			return ErrReconstructRequired
+		}
+		size += len(shard)
+
+		// Do we have enough data already?
+		if size >= outSize {
+			break
+		}
+	}
+	if size < outSize {
+		return ErrShortData
+	}
+
+	// Copy data to dst
+	write := outSize
+	for _, shard := range shards {
+		if write < len(shard) {
+			_, err := dst.Write(shard[:write])
+			return err
+		}
+		n, err := dst.Write(shard)
+		if err != nil {
+			return err
+		}
+		write -= n
+	}
+	return nil
+}
+
+func (r *leopardFF8) Update(shards [][]byte, newDatashards [][]byte) error {
+	return ErrNotSupported
+}
+
+func (r *leopardFF8) Split(data []byte) ([][]byte, error) {
+	if len(data) == 0 {
+		return nil, ErrShortData
+	}
+	if r.totalShards == 1 && len(data)&63 == 0 {
+		return [][]byte{data}, nil
+	}
+
+	dataLen := len(data)
+	// Calculate number of bytes per data shard.
+	perShard := (len(data) + r.dataShards - 1) / r.dataShards
+	perShard = ((perShard + 63) / 64) * 64
+	needTotal := r.totalShards * perShard
+
+	if cap(data) > len(data) {
+		if cap(data) > needTotal {
+			data = data[:needTotal]
+		} else {
+			data = data[:cap(data)]
+		}
+		clear := data[dataLen:]
+		for i := range clear {
+			clear[i] = 0
+		}
+	}
+
+	// Only allocate memory if necessary
+	var padding [][]byte
+	if len(data) < needTotal {
+		// calculate maximum number of full shards in `data` slice
+		fullShards := len(data) / perShard
+		padding = AllocAligned(r.totalShards-fullShards, perShard)
+		if dataLen > perShard*fullShards {
+			// Copy partial shards
+			copyFrom := data[perShard*fullShards : dataLen]
+			for i := range padding {
+				if len(copyFrom) <= 0 {
+					break
+				}
+				copyFrom = copyFrom[copy(padding[i], copyFrom):]
+			}
+		}
+	}
+
+	// Split into equal-length shards.
+	dst := make([][]byte, r.totalShards)
+	i := 0
+	for ; i < len(dst) && len(data) >= perShard; i++ {
+		dst[i] = data[:perShard:perShard]
+		data = data[perShard:]
+	}
+
+	for j := 0; i+j < len(dst); j++ {
+		dst[i+j] = padding[0]
+		padding = padding[1:]
+	}
+
+	return dst, nil
+}
+
+func (r *leopardFF8) ReconstructSome(shards [][]byte, required []bool) error {
+	return r.ReconstructData(shards)
+}
+
+func (r *leopardFF8) Reconstruct(shards [][]byte) error {
+	return r.reconstruct(shards, true)
+}
+
+func (r *leopardFF8) ReconstructData(shards [][]byte) error {
+	return r.reconstruct(shards, false)
+}
+
+func (r *leopardFF8) Verify(shards [][]byte) (bool, error) {
+	if len(shards) != r.totalShards {
+		return false, ErrTooFewShards
+	}
+	if err := checkShards(shards, false); err != nil {
+		return false, err
+	}
+
+	// Re-encode parity shards to temporary storage.
+	shardSize := len(shards[0])
+	outputs := make([][]byte, r.totalShards)
+	copy(outputs, shards[:r.dataShards])
+	for i := r.dataShards; i < r.totalShards; i++ {
+		outputs[i] = make([]byte, shardSize)
+	}
+	if err := r.Encode(outputs); err != nil {
+		return false, err
+	}
+
+	// Compare.
+	for i := r.dataShards; i < r.totalShards; i++ {
+		if !bytes.Equal(outputs[i], shards[i]) {
+			return false, nil
+		}
+	}
+	return true, nil
+}
+
+func (r *leopardFF8) reconstruct(shards [][]byte, recoverAll bool) error {
+	if len(shards) != r.totalShards {
+		return ErrTooFewShards
+	}
+
+	if err := checkShards(shards, true); err != nil {
+		return err
+	}
+
+	// Quick check: are all of the shards present?  If so, there's
+	// nothing to do.
+	numberPresent := 0
+	dataPresent := 0
+	for i := 0; i < r.totalShards; i++ {
+		if len(shards[i]) != 0 {
+			numberPresent++
+			if i < r.dataShards {
+				dataPresent++
+			}
+		}
+	}
+	if numberPresent == r.totalShards || !recoverAll && dataPresent == r.dataShards {
+		// Cool. All of the shards have data. We don't
+		// need to do anything.
+		return nil
+	}
+
+	// Check if we have enough to reconstruct.
+	if numberPresent < r.dataShards {
+		return ErrTooFewShards
+	}
+
+	shardSize := shardSize(shards)
+	if shardSize%64 != 0 {
+		return ErrInvalidShardSize
+	}
+
+	// Use only if we are missing less than 1/4 parity,
+	// And we are restoring a significant amount of data.
+	useBits := r.totalShards-numberPresent <= r.parityShards/4 && shardSize*r.totalShards >= 64<<10
+
+	m := ceilPow2(r.parityShards)
+	n := ceilPow2(m + r.dataShards)
+
+	const LEO_ERROR_BITFIELD_OPT = true
+
+	// Fill in error locations.
+	var errorBits errorBitfield8
+	var errLocs [order8]ffe8
+	for i := 0; i < r.parityShards; i++ {
+		if len(shards[i+r.dataShards]) == 0 {
+			errLocs[i] = 1
+			if LEO_ERROR_BITFIELD_OPT && recoverAll {
+				errorBits.set(i)
+			}
+		}
+	}
+	for i := r.parityShards; i < m; i++ {
+		errLocs[i] = 1
+		if LEO_ERROR_BITFIELD_OPT && recoverAll {
+			errorBits.set(i)
+		}
+	}
+	for i := 0; i < r.dataShards; i++ {
+		if len(shards[i]) == 0 {
+			errLocs[i+m] = 1
+			if LEO_ERROR_BITFIELD_OPT {
+				errorBits.set(i + m)
+			}
+		}
+	}
+
+	var gotInversion bool
+	if LEO_ERROR_BITFIELD_OPT && r.inversion != nil {
+		cacheID := errorBits.cacheID()
+		r.inversionMu.Lock()
+		if inv, ok := r.inversion[cacheID]; ok {
+			r.inversionMu.Unlock()
+			errLocs = inv.errorLocs
+			if inv.bits != nil && useBits {
+				errorBits = *inv.bits
+				useBits = true
+			} else {
+				useBits = false
+			}
+			gotInversion = true
+		} else {
+			r.inversionMu.Unlock()
+		}
+	}
+
+	if !gotInversion {
+		// No inversion...
+		if LEO_ERROR_BITFIELD_OPT && useBits {
+			errorBits.prepare()
+		}
+
+		// Evaluate error locator polynomial8
+		fwht8(&errLocs, order8, m+r.dataShards)
+
+		for i := 0; i < order8; i++ {
+			errLocs[i] = ffe8((uint(errLocs[i]) * uint(logWalsh8[i])) % modulus8)
+		}
+
+		fwht8(&errLocs, order8, order8)
+
+		if r.inversion != nil {
+			c := leopardGF8cache{
+				errorLocs: errLocs,
+			}
+			if useBits {
+				// Heap alloc
+				var x errorBitfield8
+				x = errorBits
+				c.bits = &x
+			}
+			r.inversionMu.Lock()
+			r.inversion[errorBits.cacheID()] = c
+			r.inversionMu.Unlock()
+		}
+	}
+
+	var work [][]byte
+	if w, ok := r.workPool.Get().([][]byte); ok {
+		work = w
+	}
+	if cap(work) >= n {
+		work = work[:n]
+		for i := range work {
+			if cap(work[i]) < workSize8 {
+				work[i] = make([]byte, workSize8)
+			} else {
+				work[i] = work[i][:workSize8]
+			}
+		}
+
+	} else {
+		work = make([][]byte, n)
+		all := make([]byte, n*workSize8)
+		for i := range work {
+			work[i] = all[i*workSize8 : i*workSize8+workSize8]
+		}
+	}
+	defer r.workPool.Put(work)
+
+	// work <- recovery data
+
+	// Split large shards.
+	// More likely on lower shard count.
+	sh := make([][]byte, len(shards))
+	// Copy...
+	copy(sh, shards)
+
+	// Add output
+	for i, sh := range shards {
+		if !recoverAll && i >= r.dataShards {
+			continue
+		}
+		if len(sh) == 0 {
+			if cap(sh) >= shardSize {
+				shards[i] = sh[:shardSize]
+			} else {
+				shards[i] = make([]byte, shardSize)
+			}
+		}
+	}
+
+	off := 0
+	for off < shardSize {
+		endSlice := off + workSize8
+		if endSlice > shardSize {
+			endSlice = shardSize
+			sz := shardSize - off
+			// Last iteration only
+			for i := range work {
+				work[i] = work[i][:sz]
+			}
+		}
+		for i := range shards {
+			if len(sh[i]) != 0 {
+				sh[i] = shards[i][off:endSlice]
+			}
+		}
+		for i := 0; i < r.parityShards; i++ {
+			if len(sh[i+r.dataShards]) != 0 {
+				mulgf8(work[i], sh[i+r.dataShards], errLocs[i], &r.o)
+			} else {
+				memclr(work[i])
+			}
+		}
+		for i := r.parityShards; i < m; i++ {
+			memclr(work[i])
+		}
+
+		// work <- original data
+
+		for i := 0; i < r.dataShards; i++ {
+			if len(sh[i]) != 0 {
+				mulgf8(work[m+i], sh[i], errLocs[m+i], &r.o)
+			} else {
+				memclr(work[m+i])
+			}
+		}
+		for i := m + r.dataShards; i < n; i++ {
+			memclr(work[i])
+		}
+
+		// work <- IFFT(work, n, 0)
+
+		ifftDITDecoder8(
+			m+r.dataShards,
+			work,
+			n,
+			fftSkew8[:],
+			&r.o,
+		)
+
+		// work <- FormalDerivative(work, n)
+
+		for i := 1; i < n; i++ {
+			width := ((i ^ (i - 1)) + 1) >> 1
+			slicesXor(work[i-width:i], work[i:i+width], &r.o)
+		}
+
+		// work <- FFT(work, n, 0) truncated to m + dataShards
+
+		outputCount := m + r.dataShards
+
+		if LEO_ERROR_BITFIELD_OPT && useBits {
+			errorBits.fftDIT8(work, outputCount, n, fftSkew8[:], &r.o)
+		} else {
+			fftDIT8(work, outputCount, n, fftSkew8[:], &r.o)
+		}
+
+		// Reveal erasures
+		//
+		//  Original = -ErrLocator * FFT( Derivative( IFFT( ErrLocator * ReceivedData ) ) )
+		//  mul_mem(x, y, log_m, ) equals x[] = y[] * log_m
+		//
+		// mem layout: [Recovery Data (Power of Two = M)] [Original Data (K)] [Zero Padding out to N]
+		end := r.dataShards
+		if recoverAll {
+			end = r.totalShards
+		}
+		// Restore
+		for i := 0; i < end; i++ {
+			if len(sh[i]) != 0 {
+				continue
+			}
+
+			if i >= r.dataShards {
+				// Parity shard.
+				mulgf8(shards[i][off:endSlice], work[i-r.dataShards], modulus8-errLocs[i-r.dataShards], &r.o)
+			} else {
+				// Data shard.
+				mulgf8(shards[i][off:endSlice], work[i+m], modulus8-errLocs[i+m], &r.o)
+			}
+		}
+		off += workSize8
+	}
+	return nil
+}
+
+// Basic no-frills version for decoder
+func ifftDITDecoder8(mtrunc int, work [][]byte, m int, skewLUT []ffe8, o *options) {
+	// Decimation in time: Unroll 2 layers at a time
+	dist := 1
+	dist4 := 4
+	for dist4 <= m {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			iend := r + dist
+			log_m01 := skewLUT[iend-1]
+			log_m02 := skewLUT[iend+dist-1]
+			log_m23 := skewLUT[iend+dist*2-1]
+
+			// For each set of dist elements:
+			for i := r; i < iend; i++ {
+				ifftDIT48(work[i:], dist, log_m01, log_m23, log_m02, o)
+			}
+		}
+		dist = dist4
+		dist4 <<= 2
+	}
+
+	// If there is one layer left:
+	if dist < m {
+		// Assuming that dist = m / 2
+		if dist*2 != m {
+			panic("internal error")
+		}
+
+		log_m := skewLUT[dist-1]
+
+		if log_m == modulus8 {
+			slicesXor(work[dist:2*dist], work[:dist], o)
+		} else {
+			for i := 0; i < dist; i++ {
+				ifftDIT28(
+					work[i],
+					work[i+dist],
+					log_m,
+					o,
+				)
+			}
+		}
+	}
+}
+
+// In-place FFT for encoder and decoder
+func fftDIT8(work [][]byte, mtrunc, m int, skewLUT []ffe8, o *options) {
+	// Decimation in time: Unroll 2 layers at a time
+	dist4 := m
+	dist := m >> 2
+	for dist != 0 {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			iend := r + dist
+			log_m01 := skewLUT[iend-1]
+			log_m02 := skewLUT[iend+dist-1]
+			log_m23 := skewLUT[iend+dist*2-1]
+
+			// For each set of dist elements:
+			for i := r; i < iend; i++ {
+				fftDIT48(
+					work[i:],
+					dist,
+					log_m01,
+					log_m23,
+					log_m02,
+					o,
+				)
+			}
+		}
+		dist4 = dist
+		dist >>= 2
+	}
+
+	// If there is one layer left:
+	if dist4 == 2 {
+		for r := 0; r < mtrunc; r += 2 {
+			log_m := skewLUT[r+1-1]
+
+			if log_m == modulus8 {
+				sliceXor(work[r], work[r+1], o)
+			} else {
+				fftDIT28(work[r], work[r+1], log_m, o)
+			}
+		}
+	}
+}
+
+// 4-way butterfly
+func fftDIT4Ref8(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	// First layer:
+	if log_m02 == modulus8 {
+		sliceXor(work[0], work[dist*2], o)
+		sliceXor(work[dist], work[dist*3], o)
+	} else {
+		fftDIT28(work[0], work[dist*2], log_m02, o)
+		fftDIT28(work[dist], work[dist*3], log_m02, o)
+	}
+
+	// Second layer:
+	if log_m01 == modulus8 {
+		sliceXor(work[0], work[dist], o)
+	} else {
+		fftDIT28(work[0], work[dist], log_m01, o)
+	}
+
+	if log_m23 == modulus8 {
+		sliceXor(work[dist*2], work[dist*3], o)
+	} else {
+		fftDIT28(work[dist*2], work[dist*3], log_m23, o)
+	}
+}
+
+// Unrolled IFFT for encoder
+func ifftDITEncoder8(data [][]byte, mtrunc int, work [][]byte, xorRes [][]byte, m int, skewLUT []ffe8, o *options) {
+	// I tried rolling the memcpy/memset into the first layer of the FFT and
+	// found that it only yields a 4% performance improvement, which is not
+	// worth the extra complexity.
+	for i := 0; i < mtrunc; i++ {
+		copy(work[i], data[i])
+	}
+	for i := mtrunc; i < m; i++ {
+		memclr(work[i])
+	}
+
+	// Decimation in time: Unroll 2 layers at a time
+	dist := 1
+	dist4 := 4
+	for dist4 <= m {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			iend := r + dist
+			log_m01 := skewLUT[iend]
+			log_m02 := skewLUT[iend+dist]
+			log_m23 := skewLUT[iend+dist*2]
+
+			// For each set of dist elements:
+			for i := r; i < iend; i++ {
+				ifftDIT48(
+					work[i:],
+					dist,
+					log_m01,
+					log_m23,
+					log_m02,
+					o,
+				)
+			}
+		}
+
+		dist = dist4
+		dist4 <<= 2
+		// I tried alternating sweeps left->right and right->left to reduce cache misses.
+		// It provides about 1% performance boost when done for both FFT and IFFT, so it
+		// does not seem to be worth the extra complexity.
+	}
+
+	// If there is one layer left:
+	if dist < m {
+		// Assuming that dist = m / 2
+		if dist*2 != m {
+			panic("internal error")
+		}
+
+		logm := skewLUT[dist]
+
+		if logm == modulus8 {
+			slicesXor(work[dist:dist*2], work[:dist], o)
+		} else {
+			for i := 0; i < dist; i++ {
+				ifftDIT28(work[i], work[i+dist], logm, o)
+			}
+		}
+	}
+
+	// I tried unrolling this but it does not provide more than 5% performance
+	// improvement for 16-bit finite fields, so it's not worth the complexity.
+	if xorRes != nil {
+		slicesXor(xorRes[:m], work[:m], o)
+	}
+}
+
+func ifftDIT4Ref8(work [][]byte, dist int, log_m01, log_m23, log_m02 ffe8, o *options) {
+	// First layer:
+	if log_m01 == modulus8 {
+		sliceXor(work[0], work[dist], o)
+	} else {
+		ifftDIT28(work[0], work[dist], log_m01, o)
+	}
+
+	if log_m23 == modulus8 {
+		sliceXor(work[dist*2], work[dist*3], o)
+	} else {
+		ifftDIT28(work[dist*2], work[dist*3], log_m23, o)
+	}
+
+	// Second layer:
+	if log_m02 == modulus8 {
+		sliceXor(work[0], work[dist*2], o)
+		sliceXor(work[dist], work[dist*3], o)
+	} else {
+		ifftDIT28(work[0], work[dist*2], log_m02, o)
+		ifftDIT28(work[dist], work[dist*3], log_m02, o)
+	}
+}
+
+// Reference version of muladd: x[] ^= y[] * log_m
+func refMulAdd8(x, y []byte, log_m ffe8) {
+	lut := &mul8LUTs[log_m]
+
+	for len(x) >= 64 {
+		// Assert sizes for no bounds checks in loop
+		src := y[:64]
+		dst := x[:len(src)] // Needed, but not checked...
+		for i, y1 := range src {
+			dst[i] ^= byte(lut.Value[y1])
+		}
+		x = x[64:]
+		y = y[64:]
+	}
+}
+
+// Reference version of mul: x[] = y[] * log_m
+func refMul8(x, y []byte, log_m ffe8) {
+	lut := &mul8LUTs[log_m]
+
+	for off := 0; off < len(x); off += 64 {
+		src := y[off : off+64]
+		for i, y1 := range src {
+			x[off+i] = byte(lut.Value[y1])
+		}
+	}
+}
+
+// Returns a * Log(b)
+func mulLog8(a, log_b ffe8) ffe8 {
+	/*
+	   Note that this operation is not a normal multiplication in a finite
+	   field because the right operand is already a logarithm.  This is done
+	   because it moves K table lookups from the Decode() method into the
+	   initialization step that is less performance critical.  The LogWalsh[]
+	   table below contains precalculated logarithms so it is easier to do
+	   all the other multiplies in that form as well.
+	*/
+	if a == 0 {
+		return 0
+	}
+	return expLUT8[addMod8(logLUT8[a], log_b)]
+}
+
+// z = x + y (mod kModulus)
+func addMod8(a, b ffe8) ffe8 {
+	sum := uint(a) + uint(b)
+
+	// Partial reduction step, allowing for kModulus to be returned
+	return ffe8(sum + sum>>bitwidth8)
+}
+
+// z = x - y (mod kModulus)
+func subMod8(a, b ffe8) ffe8 {
+	dif := uint(a) - uint(b)
+
+	// Partial reduction step, allowing for kModulus to be returned
+	return ffe8(dif + dif>>bitwidth8)
+}
+
+// Decimation in time (DIT) Fast Walsh-Hadamard Transform
+// Unrolls pairs of layers to perform cross-layer operations in registers
+// mtrunc: Number of elements that are non-zero at the front of data
+func fwht8(data *[order8]ffe8, m, mtrunc int) {
+	// Decimation in time: Unroll 2 layers at a time
+	dist := 1
+	dist4 := 4
+	for dist4 <= m {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			// For each set of dist elements:
+			// Use 16 bit indices to avoid bounds check on [65536]ffe8.
+			dist := uint16(dist)
+			off := uint16(r)
+			for i := uint16(0); i < dist; i++ {
+				// fwht48(data[i:], dist) inlined...
+				// Reading values appear faster than updating pointers.
+				// Casting to uint is not faster.
+				t0 := data[off]
+				t1 := data[off+dist]
+				t2 := data[off+dist*2]
+				t3 := data[off+dist*3]
+
+				t0, t1 = fwht2alt8(t0, t1)
+				t2, t3 = fwht2alt8(t2, t3)
+				t0, t2 = fwht2alt8(t0, t2)
+				t1, t3 = fwht2alt8(t1, t3)
+
+				data[off] = t0
+				data[off+dist] = t1
+				data[off+dist*2] = t2
+				data[off+dist*3] = t3
+				off++
+			}
+		}
+		dist = dist4
+		dist4 <<= 2
+	}
+
+	// If there is one layer left:
+	if dist < m {
+		dist := uint16(dist)
+		for i := uint16(0); i < dist; i++ {
+			fwht28(&data[i], &data[i+dist])
+		}
+	}
+}
+
+func fwht48(data []ffe8, s int) {
+	s2 := s << 1
+
+	t0 := &data[0]
+	t1 := &data[s]
+	t2 := &data[s2]
+	t3 := &data[s2+s]
+
+	fwht28(t0, t1)
+	fwht28(t2, t3)
+	fwht28(t0, t2)
+	fwht28(t1, t3)
+}
+
+// {a, b} = {a + b, a - b} (Mod Q)
+func fwht28(a, b *ffe8) {
+	sum := addMod8(*a, *b)
+	dif := subMod8(*a, *b)
+	*a = sum
+	*b = dif
+}
+
+// fwht2alt8  is as fwht28, but returns result.
+func fwht2alt8(a, b ffe8) (ffe8, ffe8) {
+	return addMod8(a, b), subMod8(a, b)
+}
+
+var initOnce8 sync.Once
+
+func initConstants8() {
+	initOnce8.Do(func() {
+		initLUTs8()
+		initFFTSkew8()
+		initMul8LUT()
+	})
+}
+
+// Initialize logLUT8, expLUT8.
+func initLUTs8() {
+	cantorBasis := [bitwidth8]ffe8{
+		1, 214, 152, 146, 86, 200, 88, 230,
+	}
+
+	expLUT8 = &[order8]ffe8{}
+	logLUT8 = &[order8]ffe8{}
+
+	// LFSR table generation:
+	state := 1
+	for i := ffe8(0); i < modulus8; i++ {
+		expLUT8[state] = i
+		state <<= 1
+		if state >= order8 {
+			state ^= polynomial8
+		}
+	}
+	expLUT8[0] = modulus8
+
+	// Conversion to Cantor basis:
+
+	logLUT8[0] = 0
+	for i := 0; i < bitwidth8; i++ {
+		basis := cantorBasis[i]
+		width := 1 << i
+
+		for j := 0; j < width; j++ {
+			logLUT8[j+width] = logLUT8[j] ^ basis
+		}
+	}
+
+	for i := 0; i < order8; i++ {
+		logLUT8[i] = expLUT8[logLUT8[i]]
+	}
+
+	for i := 0; i < order8; i++ {
+		expLUT8[logLUT8[i]] = ffe8(i)
+	}
+
+	expLUT8[modulus8] = expLUT8[0]
+}
+
+// Initialize fftSkew8.
+func initFFTSkew8() {
+	var temp [bitwidth8 - 1]ffe8
+
+	// Generate FFT skew vector {1}:
+
+	for i := 1; i < bitwidth8; i++ {
+		temp[i-1] = ffe8(1 << i)
+	}
+
+	fftSkew8 = &[modulus8]ffe8{}
+	logWalsh8 = &[order8]ffe8{}
+
+	for m := 0; m < bitwidth8-1; m++ {
+		step := 1 << (m + 1)
+
+		fftSkew8[1<<m-1] = 0
+
+		for i := m; i < bitwidth8-1; i++ {
+			s := 1 << (i + 1)
+
+			for j := 1<<m - 1; j < s; j += step {
+				fftSkew8[j+s] = fftSkew8[j] ^ temp[i]
+			}
+		}
+
+		temp[m] = modulus8 - logLUT8[mulLog8(temp[m], logLUT8[temp[m]^1])]
+
+		for i := m + 1; i < bitwidth8-1; i++ {
+			sum := addMod8(logLUT8[temp[i]^1], temp[m])
+			temp[i] = mulLog8(temp[i], sum)
+		}
+	}
+
+	for i := 0; i < modulus8; i++ {
+		fftSkew8[i] = logLUT8[fftSkew8[i]]
+	}
+
+	// Precalculate FWHT(Log[i]):
+
+	for i := 0; i < order8; i++ {
+		logWalsh8[i] = logLUT8[i]
+	}
+	logWalsh8[0] = 0
+
+	fwht8(logWalsh8, order8, order8)
+}
+
+func initMul8LUT() {
+	mul8LUTs = &[order8]mul8LUT{}
+
+	// For each log_m multiplicand:
+	for log_m := 0; log_m < order8; log_m++ {
+		var tmp [64]ffe8
+		for nibble, shift := 0, 0; nibble < 4; {
+			nibble_lut := tmp[nibble*16:]
+
+			for xnibble := 0; xnibble < 16; xnibble++ {
+				prod := mulLog8(ffe8(xnibble<<shift), ffe8(log_m))
+				nibble_lut[xnibble] = prod
+			}
+			nibble++
+			shift += 4
+		}
+		lut := &mul8LUTs[log_m]
+		for i := range lut.Value[:] {
+			lut.Value[i] = tmp[i&15] ^ tmp[((i>>4)+16)]
+		}
+	}
+	// Always initialize assembly tables.
+	// Not as big resource hog as gf16.
+	if true {
+		multiply256LUT8 = &[order8][16 * 2]byte{}
+
+		for logM := range multiply256LUT8[:] {
+			// For each 4 bits of the finite field width in bits:
+			shift := 0
+			for i := 0; i < 2; i++ {
+				// Construct 16 entry LUT for PSHUFB
+				prod := multiply256LUT8[logM][i*16 : i*16+16]
+				for x := range prod[:] {
+					prod[x] = byte(mulLog8(ffe8(x<<shift), ffe8(logM)))
+				}
+				shift += 4
+			}
+		}
+	}
+}
+
+const kWords8 = order8 / 64
+
+// errorBitfield contains progressive errors to help indicate which
+// shards need reconstruction.
+type errorBitfield8 struct {
+	Words [7][kWords8]uint64
+}
+
+func (e *errorBitfield8) set(i int) {
+	e.Words[0][(i/64)&3] |= uint64(1) << (i & 63)
+}
+
+func (e *errorBitfield8) cacheID() [inversion8Bytes]byte {
+	var res [inversion8Bytes]byte
+	binary.LittleEndian.PutUint64(res[0:8], e.Words[0][0])
+	binary.LittleEndian.PutUint64(res[8:16], e.Words[0][1])
+	binary.LittleEndian.PutUint64(res[16:24], e.Words[0][2])
+	binary.LittleEndian.PutUint64(res[24:32], e.Words[0][3])
+	return res
+}
+
+func (e *errorBitfield8) isNeeded(mipLevel, bit int) bool {
+	if mipLevel >= 8 || mipLevel <= 0 {
+		return true
+	}
+	return 0 != (e.Words[mipLevel-1][bit/64] & (uint64(1) << (bit & 63)))
+}
+
+func (e *errorBitfield8) prepare() {
+	// First mip level is for final layer of FFT: pairs of data
+	for i := 0; i < kWords8; i++ {
+		w_i := e.Words[0][i]
+		hi2lo0 := w_i | ((w_i & kHiMasks[0]) >> 1)
+		lo2hi0 := (w_i & (kHiMasks[0] >> 1)) << 1
+		w_i = hi2lo0 | lo2hi0
+		e.Words[0][i] = w_i
+
+		bits := 2
+		for j := 1; j < 5; j++ {
+			hi2lo_j := w_i | ((w_i & kHiMasks[j]) >> bits)
+			lo2hi_j := (w_i & (kHiMasks[j] >> bits)) << bits
+			w_i = hi2lo_j | lo2hi_j
+			e.Words[j][i] = w_i
+			bits <<= 1
+		}
+	}
+
+	for i := 0; i < kWords8; i++ {
+		w := e.Words[4][i]
+		w |= w >> 32
+		w |= w << 32
+		e.Words[5][i] = w
+	}
+
+	for i := 0; i < kWords8; i += 2 {
+		t := e.Words[5][i] | e.Words[5][i+1]
+		e.Words[6][i] = t
+		e.Words[6][i+1] = t
+	}
+}
+
+func (e *errorBitfield8) fftDIT8(work [][]byte, mtrunc, m int, skewLUT []ffe8, o *options) {
+	// Decimation in time: Unroll 2 layers at a time
+	mipLevel := bits.Len32(uint32(m)) - 1
+
+	dist4 := m
+	dist := m >> 2
+	for dist != 0 {
+		// For each set of dist*4 elements:
+		for r := 0; r < mtrunc; r += dist4 {
+			if !e.isNeeded(mipLevel, r) {
+				continue
+			}
+			iEnd := r + dist
+			logM01 := skewLUT[iEnd-1]
+			logM02 := skewLUT[iEnd+dist-1]
+			logM23 := skewLUT[iEnd+dist*2-1]
+
+			// For each set of dist elements:
+			for i := r; i < iEnd; i++ {
+				fftDIT48(
+					work[i:],
+					dist,
+					logM01,
+					logM23,
+					logM02,
+					o)
+			}
+		}
+		dist4 = dist
+		dist >>= 2
+		mipLevel -= 2
+	}
+
+	// If there is one layer left:
+	if dist4 == 2 {
+		for r := 0; r < mtrunc; r += 2 {
+			if !e.isNeeded(mipLevel, r) {
+				continue
+			}
+			logM := skewLUT[r+1-1]
+
+			if logM == modulus8 {
+				sliceXor(work[r], work[r+1], o)
+			} else {
+				fftDIT28(work[r], work[r+1], logM, o)
+			}
+		}
+	}
+}
diff --git a/leopard_test.go b/leopard_test.go
new file mode 100644
index 0000000..8c3fcee
--- /dev/null
+++ b/leopard_test.go
@@ -0,0 +1,165 @@
+package reedsolomon
+
+import (
+	"bytes"
+	"testing"
+)
+
+func TestEncoderReconstructLeo(t *testing.T) {
+	testEncoderReconstructLeo(t)
+}
+
+func testEncoderReconstructLeo(t *testing.T, o ...Option) {
+	// Create some sample data
+	var data = make([]byte, 2<<20)
+	fillRandom(data)
+
+	// Create 5 data slices of 50000 elements each
+	enc, err := New(500, 300, testOptions(o...)...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	shards, err := enc.Split(data)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = enc.Encode(shards)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Check that it verifies
+	ok, err := enc.Verify(shards)
+	if !ok || err != nil {
+		t.Fatal("not ok:", ok, "err:", err)
+	}
+
+	// Delete a shard
+	shards[0] = nil
+
+	// Should reconstruct
+	err = enc.Reconstruct(shards)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Check that it verifies
+	ok, err = enc.Verify(shards)
+	if !ok || err != nil {
+		t.Fatal("not ok:", ok, "err:", err)
+	}
+
+	// Recover original bytes
+	buf := new(bytes.Buffer)
+	err = enc.Join(buf, shards, len(data))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(buf.Bytes(), data) {
+		t.Fatal("recovered bytes do not match")
+	}
+
+	// Corrupt a shard
+	shards[0] = nil
+	shards[1][0], shards[1][500] = 75, 75
+
+	// Should reconstruct (but with corrupted data)
+	err = enc.Reconstruct(shards)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Check that it verifies
+	ok, err = enc.Verify(shards)
+	if ok || err != nil {
+		t.Fatal("error or ok:", ok, "err:", err)
+	}
+
+	// Recovered data should not match original
+	buf.Reset()
+	err = enc.Join(buf, shards, len(data))
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bytes.Equal(buf.Bytes(), data) {
+		t.Fatal("corrupted data matches original")
+	}
+}
+
+func TestEncoderReconstructFailLeo(t *testing.T) {
+	// Create some sample data
+	var data = make([]byte, 2<<20)
+	fillRandom(data)
+
+	// Create 5 data slices of 50000 elements each
+	enc, err := New(500, 300, testOptions()...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	shards, err := enc.Split(data)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = enc.Encode(shards)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Check that it verifies
+	ok, err := enc.Verify(shards)
+	if !ok || err != nil {
+		t.Fatal("not ok:", ok, "err:", err)
+	}
+
+	// Delete more than parity shards
+	for i := 0; i < 301; i++ {
+		shards[i] = nil
+	}
+
+	// Should not reconstruct
+	err = enc.Reconstruct(shards)
+	if err != ErrTooFewShards {
+		t.Fatal("want ErrTooFewShards, got:", err)
+	}
+}
+
+func TestSplitJoinLeo(t *testing.T) {
+	var data = make([]byte, (250<<10)-1)
+	fillRandom(data)
+
+	enc, _ := New(500, 300, testOptions()...)
+	shards, err := enc.Split(data)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = enc.Split([]byte{})
+	if err != ErrShortData {
+		t.Errorf("expected %v, got %v", ErrShortData, err)
+	}
+
+	buf := new(bytes.Buffer)
+	err = enc.Join(buf, shards, 5000)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(buf.Bytes(), data[:5000]) {
+		t.Fatal("recovered data does match original")
+	}
+
+	err = enc.Join(buf, [][]byte{}, 0)
+	if err != ErrTooFewShards {
+		t.Errorf("expected %v, got %v", ErrTooFewShards, err)
+	}
+
+	err = enc.Join(buf, shards, len(data)+500*64)
+	if err != ErrShortData {
+		t.Errorf("expected %v, got %v", ErrShortData, err)
+	}
+
+	shards[0] = nil
+	err = enc.Join(buf, shards, len(data))
+	if err != ErrReconstructRequired {
+		t.Errorf("expected %v, got %v", ErrReconstructRequired, err)
+	}
+}
diff --git a/matrix.go b/matrix.go
index 22669c2..497a3d9 100644
--- a/matrix.go
+++ b/matrix.go
@@ -175,8 +175,7 @@ func (m matrix) SwapRows(r1, r2 int) error {
 	return nil
 }
 
-// IsSquare will return true if the matrix is square
-// and nil if the matrix is square
+// IsSquare will return true if the matrix is square, otherwise false.
 func (m matrix) IsSquare() bool {
 	return len(m) == len(m[0])
 }
diff --git a/matrix_test.go b/matrix_test.go
index 052d5c2..1ff5428 100644
--- a/matrix_test.go
+++ b/matrix_test.go
@@ -90,8 +90,8 @@ func TestMatrixIdentity(t *testing.T) {
 func TestMatrixMultiply(t *testing.T) {
 	m1, err := newMatrixData(
 		[][]byte{
-			[]byte{1, 2},
-			[]byte{3, 4},
+			{1, 2},
+			{3, 4},
 		})
 	if err != nil {
 		t.Fatal(err)
@@ -99,8 +99,8 @@ func TestMatrixMultiply(t *testing.T) {
 
 	m2, err := newMatrixData(
 		[][]byte{
-			[]byte{5, 6},
-			[]byte{7, 8},
+			{5, 6},
+			{7, 8},
 		})
 	if err != nil {
 		t.Fatal(err)
@@ -131,9 +131,9 @@ func TestMatrixInverse(t *testing.T) {
 		{
 			// input data to construct the matrix.
 			[][]byte{
-				[]byte{56, 23, 98},
-				[]byte{3, 100, 200},
-				[]byte{45, 201, 123},
+				{56, 23, 98},
+				{3, 100, 200},
+				{45, 201, 123},
 			},
 			// expected Inverse matrix.
 			"[[175, 133, 33], [130, 13, 245], [112, 35, 126]]",
@@ -146,11 +146,11 @@ func TestMatrixInverse(t *testing.T) {
 		{
 			// input data to construct the matrix.
 			[][]byte{
-				[]byte{1, 0, 0, 0, 0},
-				[]byte{0, 1, 0, 0, 0},
-				[]byte{0, 0, 0, 1, 0},
-				[]byte{0, 0, 0, 0, 1},
-				[]byte{7, 7, 6, 6, 1},
+				{1, 0, 0, 0, 0},
+				{0, 1, 0, 0, 0},
+				{0, 0, 0, 1, 0},
+				{0, 0, 0, 0, 1},
+				{7, 7, 6, 6, 1},
 			},
 			// expectedInverse matrix.
 			"[[1, 0, 0, 0, 0]," +
@@ -166,9 +166,9 @@ func TestMatrixInverse(t *testing.T) {
 		// expected to fail with errNotSquare.
 		{
 			[][]byte{
-				[]byte{56, 23},
-				[]byte{3, 100},
-				[]byte{45, 201},
+				{56, 23},
+				{3, 100},
+				{45, 201},
 			},
 			"",
 			false,
@@ -179,8 +179,8 @@ func TestMatrixInverse(t *testing.T) {
 		{
 
 			[][]byte{
-				[]byte{4, 2},
-				[]byte{12, 6},
+				{4, 2},
+				{12, 6},
 			},
 			"",
 			false,
diff --git a/options.go b/options.go
index 26269eb..f74fe00 100644
--- a/options.go
+++ b/options.go
@@ -15,11 +15,15 @@ type options struct {
 	shardSize     int
 	perRound      int
 
-	useAVX512, useAVX2, useSSSE3, useSSE2 bool
-	usePAR1Matrix                         bool
-	useCauchy                             bool
-	fastOneParity                         bool
-	inversionCache                        bool
+	useGFNI, useAVX512, useAVX2, useSSSE3, useSSE2 bool
+	useJerasureMatrix                              bool
+	usePAR1Matrix                                  bool
+	useCauchy                                      bool
+	fastOneParity                                  bool
+	inversionCache                                 bool
+	forcedInversionCache                           bool
+	customMatrix                                   [][]byte
+	withLeopard                                    leopardMode
 
 	// stream options
 	concReads  bool
@@ -37,9 +41,24 @@ var defaultOptions = options{
 	useSSSE3:  cpuid.CPU.Supports(cpuid.SSSE3),
 	useSSE2:   cpuid.CPU.Supports(cpuid.SSE2),
 	useAVX2:   cpuid.CPU.Supports(cpuid.AVX2),
-	useAVX512: cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512BW),
+	useAVX512: cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512BW, cpuid.AVX512VL),
+	useGFNI:   cpuid.CPU.Supports(cpuid.AVX512F, cpuid.GFNI, cpuid.AVX512DQ),
 }
 
+// leopardMode controls the use of leopard GF in encoding and decoding.
+type leopardMode int
+
+const (
+	// leopardAsNeeded only switches to leopard 16-bit when there are more than
+	// 256 shards.
+	leopardAsNeeded leopardMode = iota
+	// leopardGF16 uses leopard in 16-bit mode for all shard counts.
+	leopardGF16
+	// leopardAlways uses 8-bit leopard for shards less than or equal to 256,
+	// 16-bit leopard otherwise.
+	leopardAlways
+)
+
 func init() {
 	if runtime.GOMAXPROCS(0) <= 1 {
 		defaultOptions.maxGoroutines = 1
@@ -113,10 +132,11 @@ func WithConcurrentStreamWrites(enabled bool) Option {
 
 // WithInversionCache allows to control the inversion cache.
 // This will cache reconstruction matrices so they can be reused.
-// Enabled by default.
+// Enabled by default, or <= 64 shards for Leopard encoding.
 func WithInversionCache(enabled bool) Option {
 	return func(o *options) {
 		o.inversionCache = enabled
+		o.forcedInversionCache = true
 	}
 }
 
@@ -130,27 +150,55 @@ func WithStreamBlockSize(n int) Option {
 	}
 }
 
-func withSSSE3(enabled bool) Option {
+// WithSSSE3 allows to enable/disable SSSE3 instructions.
+// If not set, SSSE3 will be turned on or off automatically based on CPU ID information.
+func WithSSSE3(enabled bool) Option {
 	return func(o *options) {
 		o.useSSSE3 = enabled
 	}
 }
 
-func withAVX2(enabled bool) Option {
+// WithAVX2 allows to enable/disable AVX2 instructions.
+// If not set, AVX2 will be turned on or off automatically based on CPU ID information.
+func WithAVX2(enabled bool) Option {
 	return func(o *options) {
 		o.useAVX2 = enabled
 	}
 }
 
-func withSSE2(enabled bool) Option {
+// WithSSE2 allows to enable/disable SSE2 instructions.
+// If not set, SSE2 will be turned on or off automatically based on CPU ID information.
+func WithSSE2(enabled bool) Option {
 	return func(o *options) {
 		o.useSSE2 = enabled
 	}
 }
 
-func withAVX512(enabled bool) Option {
+// WithAVX512 allows to enable/disable AVX512 (and GFNI) instructions.
+func WithAVX512(enabled bool) Option {
 	return func(o *options) {
 		o.useAVX512 = enabled
+		o.useGFNI = enabled
+	}
+}
+
+// WithGFNI allows to enable/disable AVX512+GFNI instructions.
+// If not set, GFNI will be turned on or off automatically based on CPU ID information.
+func WithGFNI(enabled bool) Option {
+	return func(o *options) {
+		o.useGFNI = enabled
+	}
+}
+
+// WithJerasureMatrix causes the encoder to build the Reed-Solomon-Vandermonde
+// matrix in the same way as done by the Jerasure library.
+// The first row and column of the coding matrix only contains 1's in this method
+// so the first parity chunk is always equal to XOR of all data chunks.
+func WithJerasureMatrix() Option {
+	return func(o *options) {
+		o.useJerasureMatrix = true
+		o.usePAR1Matrix = false
+		o.useCauchy = false
 	}
 }
 
@@ -160,6 +208,7 @@ func withAVX512(enabled bool) Option {
 // shards.
 func WithPAR1Matrix() Option {
 	return func(o *options) {
+		o.useJerasureMatrix = false
 		o.usePAR1Matrix = true
 		o.useCauchy = false
 	}
@@ -171,8 +220,9 @@ func WithPAR1Matrix() Option {
 // but will result in slightly faster start-up time.
 func WithCauchyMatrix() Option {
 	return func(o *options) {
-		o.useCauchy = true
+		o.useJerasureMatrix = false
 		o.usePAR1Matrix = false
+		o.useCauchy = true
 	}
 }
 
@@ -184,3 +234,44 @@ func WithFastOneParityMatrix() Option {
 		o.fastOneParity = true
 	}
 }
+
+// WithCustomMatrix causes the encoder to use the manually specified matrix.
+// customMatrix represents only the parity chunks.
+// customMatrix must have at least ParityShards rows and DataShards columns.
+// It can be used for interoperability with libraries which generate
+// the matrix differently or to implement more complex coding schemes like LRC
+// (locally reconstructible codes).
+func WithCustomMatrix(customMatrix [][]byte) Option {
+	return func(o *options) {
+		o.customMatrix = customMatrix
+	}
+}
+
+// WithLeopardGF16 will always use leopard GF16 for encoding,
+// even when there is less than 256 shards.
+// This will likely improve reconstruction time for some setups.
+// This is not compatible with Leopard output for <= 256 shards.
+// Note that Leopard places certain restrictions on use see other documentation.
+func WithLeopardGF16(enabled bool) Option {
+	return func(o *options) {
+		if enabled {
+			o.withLeopard = leopardGF16
+		} else {
+			o.withLeopard = leopardAsNeeded
+		}
+	}
+}
+
+// WithLeopardGF will use leopard GF for encoding, even when there are fewer than
+// 256 shards.
+// This will likely improve reconstruction time for some setups.
+// Note that Leopard places certain restrictions on use see other documentation.
+func WithLeopardGF(enabled bool) Option {
+	return func(o *options) {
+		if enabled {
+			o.withLeopard = leopardAlways
+		} else {
+			o.withLeopard = leopardAsNeeded
+		}
+	}
+}
diff --git a/race_none_test.go b/race_none_test.go
new file mode 100644
index 0000000..3c0d24b
--- /dev/null
+++ b/race_none_test.go
@@ -0,0 +1,8 @@
+// Copyright 2022, Klaus Post, see LICENSE for details.
+
+//go:build !race
+// +build !race
+
+package reedsolomon
+
+const raceEnabled = false
diff --git a/race_test.go b/race_test.go
new file mode 100644
index 0000000..417a0e5
--- /dev/null
+++ b/race_test.go
@@ -0,0 +1,8 @@
+// Copyright 2022, Klaus Post, see LICENSE for details.
+
+//go:build race
+// +build race
+
+package reedsolomon
+
+const raceEnabled = true
diff --git a/reedsolomon.go b/reedsolomon.go
index 87f39db..20e3974 100644
--- a/reedsolomon.go
+++ b/reedsolomon.go
@@ -8,12 +8,12 @@
 // Package reedsolomon enables Erasure Coding in Go
 //
 // For usage and examples, see https://github.com/klauspost/reedsolomon
-//
 package reedsolomon
 
 import (
 	"bytes"
 	"errors"
+	"fmt"
 	"io"
 	"runtime"
 	"sync"
@@ -32,6 +32,12 @@ type Encoder interface {
 	// data shards while this is running.
 	Encode(shards [][]byte) error
 
+	// EncodeIdx will add parity for a single data shard.
+	// Parity shards should start out as 0. The caller must zero them.
+	// Data shards must be delivered exactly once. There is no check for this.
+	// The parity shards will always be updated and the data shards will remain the same.
+	EncodeIdx(dataShard []byte, idx int, parity [][]byte) error
+
 	// Verify returns true if the parity shards contain correct data.
 	// The data is the same format as Encode. No data is modified, so
 	// you are allowed to read from data while this is running.
@@ -71,6 +77,24 @@ type Encoder interface {
 	// calling the Verify function is likely to fail.
 	ReconstructData(shards [][]byte) error
 
+	// ReconstructSome will recreate only requested data shards, if possible.
+	//
+	// Given a list of shards, some of which contain data, fills in the
+	// data shards indicated by true values in the "required" parameter.
+	// The length of "required" array must be equal to DataShards.
+	//
+	// The length of "shards" array must be equal to Shards.
+	// You indicate that a shard is missing by setting it to nil or zero-length.
+	// If a shard is zero-length but has sufficient capacity, that memory will
+	// be used, otherwise a new []byte will be allocated.
+	//
+	// If there are too few shards to reconstruct the missing
+	// ones, ErrTooFewShards will be returned.
+	//
+	// As the reconstructed shard set may contain missing parity shards,
+	// calling the Verify function is likely to fail.
+	ReconstructSome(shards [][]byte, required []bool) error
+
 	// Update parity is use for change a few data shards and update it's parity.
 	// Input 'newDatashards' containing data shards changed.
 	// Input 'shards' containing old data shards (if data shard not changed, it can be nil) and old parity shards.
@@ -80,12 +104,16 @@ type Encoder interface {
 	Update(shards [][]byte, newDatashards [][]byte) error
 
 	// Split a data slice into the number of shards given to the encoder,
-	// and create empty parity shards.
+	// and create empty parity shards if necessary.
 	//
 	// The data will be split into equally sized shards.
-	// If the data size isn't dividable by the number of shards,
+	// If the data size isn't divisible by the number of shards,
 	// the last shard will contain extra zeros.
 	//
+	// If there is extra capacity on the provided data slice
+	// it will be used instead of allocating parity shards.
+	// It will be zeroed out.
+	//
 	// There must be at least 1 byte otherwise ErrShortData will be
 	// returned.
 	//
@@ -102,24 +130,72 @@ type Encoder interface {
 	Join(dst io.Writer, shards [][]byte, outSize int) error
 }
 
+// Extensions is an optional interface.
+// All returned instances will support this interface.
+type Extensions interface {
+	// ShardSizeMultiple will return the size the shard sizes must be a multiple of.
+	ShardSizeMultiple() int
+
+	// DataShards will return the number of data shards.
+	DataShards() int
+
+	// ParityShards will return the number of parity shards.
+	ParityShards() int
+
+	// TotalShards will return the total number of shards.
+	TotalShards() int
+
+	// AllocAligned will allocate TotalShards number of slices,
+	// aligned to reasonable memory sizes.
+	// Provide the size of each shard.
+	AllocAligned(each int) [][]byte
+}
+
 const (
 	avx2CodeGenMinSize       = 64
 	avx2CodeGenMinShards     = 3
 	avx2CodeGenMaxGoroutines = 8
+	gfniCodeGenMaxGoroutines = 4
+
+	intSize = 32 << (^uint(0) >> 63) // 32 or 64
+	maxInt  = 1<<(intSize-1) - 1
 )
 
 // reedSolomon contains a matrix for a specific
 // distribution of datashards and parity shards.
 // Construct if using New()
 type reedSolomon struct {
-	DataShards   int // Number of data shards, should not be modified.
-	ParityShards int // Number of parity shards, should not be modified.
-	Shards       int // Total number of shards. Calculated, and should not be modified.
+	dataShards   int // Number of data shards, should not be modified.
+	parityShards int // Number of parity shards, should not be modified.
+	totalShards  int // Total number of shards. Calculated, and should not be modified.
 	m            matrix
 	tree         *inversionTree
 	parity       [][]byte
 	o            options
-	mPool        sync.Pool
+	mPoolSz      int
+	mPool        sync.Pool // Pool for temp matrices, etc
+}
+
+var _ = Extensions(&reedSolomon{})
+
+func (r *reedSolomon) ShardSizeMultiple() int {
+	return 1
+}
+
+func (r *reedSolomon) DataShards() int {
+	return r.dataShards
+}
+
+func (r *reedSolomon) ParityShards() int {
+	return r.parityShards
+}
+
+func (r *reedSolomon) TotalShards() int {
+	return r.totalShards
+}
+
+func (r *reedSolomon) AllocAligned(each int) [][]byte {
+	return AllocAligned(r.totalShards, each)
 }
 
 // ErrInvShardNum will be returned by New, if you attempt to create
@@ -132,6 +208,9 @@ var ErrInvShardNum = errors.New("cannot create Encoder with less than one data s
 // GF(2^8).
 var ErrMaxShardNum = errors.New("cannot create Encoder with more than 256 data+parity shards")
 
+// ErrNotSupported is returned when an operation is not supported.
+var ErrNotSupported = errors.New("operation not supported")
+
 // buildMatrix creates the matrix to use for encoding, given the
 // number of data shards and the number of total shards.
 //
@@ -164,6 +243,87 @@ func buildMatrix(dataShards, totalShards int) (matrix, error) {
 	return vm.Multiply(topInv)
 }
 
+// buildMatrixJerasure creates the same encoding matrix as Jerasure library
+//
+// The top square of the matrix is guaranteed to be an identity
+// matrix, which means that the data shards are unchanged after
+// encoding.
+func buildMatrixJerasure(dataShards, totalShards int) (matrix, error) {
+	// Start with a Vandermonde matrix.  This matrix would work,
+	// in theory, but doesn't have the property that the data
+	// shards are unchanged after encoding.
+	vm, err := vandermonde(totalShards, dataShards)
+	if err != nil {
+		return nil, err
+	}
+
+	// Jerasure does this:
+	// first row is always 100..00
+	vm[0][0] = 1
+	for i := 1; i < dataShards; i++ {
+		vm[0][i] = 0
+	}
+	// last row is always 000..01
+	for i := 0; i < dataShards-1; i++ {
+		vm[totalShards-1][i] = 0
+	}
+	vm[totalShards-1][dataShards-1] = 1
+
+	for i := 0; i < dataShards; i++ {
+		// Find the row where i'th col is not 0
+		r := i
+		for ; r < totalShards && vm[r][i] == 0; r++ {
+		}
+		if r != i {
+			// Swap it with i'th row if not already
+			t := vm[r]
+			vm[r] = vm[i]
+			vm[i] = t
+		}
+		// Multiply by the inverted matrix (same as vm.Multiply(vm[0:dataShards].Invert()))
+		if vm[i][i] != 1 {
+			// Make vm[i][i] = 1 by dividing the column by vm[i][i]
+			tmp := galDivide(1, vm[i][i])
+			for j := 0; j < totalShards; j++ {
+				vm[j][i] = galMultiply(vm[j][i], tmp)
+			}
+		}
+		for j := 0; j < dataShards; j++ {
+			// Make vm[i][j] = 0 where j != i by adding vm[i][j]*vm[.][i] to each column
+			tmp := vm[i][j]
+			if j != i && tmp != 0 {
+				for r := 0; r < totalShards; r++ {
+					vm[r][j] = galAdd(vm[r][j], galMultiply(tmp, vm[r][i]))
+				}
+			}
+		}
+	}
+
+	// Make vm[dataShards] row all ones - divide each column j by vm[dataShards][j]
+	for j := 0; j < dataShards; j++ {
+		tmp := vm[dataShards][j]
+		if tmp != 1 {
+			tmp = galDivide(1, tmp)
+			for i := dataShards; i < totalShards; i++ {
+				vm[i][j] = galMultiply(vm[i][j], tmp)
+			}
+		}
+	}
+
+	// Make vm[dataShards...totalShards-1][0] column all ones - divide each row
+	for i := dataShards + 1; i < totalShards; i++ {
+		tmp := vm[i][0]
+		if tmp != 1 {
+			tmp = galDivide(1, tmp)
+			for j := 0; j < dataShards; j++ {
+				vm[i][j] = galMultiply(vm[i][j], tmp)
+			}
+		}
+	}
+
+	return vm, nil
+}
+
 // buildMatrixPAR1 creates the matrix to use for encoding according to
 // the PARv1 spec, given the number of data shards and the number of
 // total shards. Note that the method they use is buggy, and may lead
@@ -243,25 +403,39 @@ func buildXorMatrix(dataShards, totalShards int) (matrix, error) {
 // New creates a new encoder and initializes it to
 // the number of data shards and parity shards that
 // you want to use. You can reuse this encoder.
-// Note that the maximum number of total shards is 256.
+// Note that the maximum number of total shards is 65536, with some
+// restrictions for a total larger than 256:
+//
+//   - Shard sizes must be multiple of 64
+//   - The methods Join/Split/Update/EncodeIdx are not supported
+//
 // If no options are supplied, default options are used.
 func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
-	r := reedSolomon{
-		DataShards:   dataShards,
-		ParityShards: parityShards,
-		Shards:       dataShards + parityShards,
-		o:            defaultOptions,
+	o := defaultOptions
+	for _, opt := range opts {
+		opt(&o)
 	}
 
-	for _, opt := range opts {
-		opt(&r.o)
+	totShards := dataShards + parityShards
+	switch {
+	case o.withLeopard == leopardGF16 && parityShards > 0 || totShards > 256:
+		return newFF16(dataShards, parityShards, o)
+	case o.withLeopard == leopardAlways && parityShards > 0:
+		return newFF8(dataShards, parityShards, o)
 	}
-	if dataShards <= 0 || parityShards < 0 {
-		return nil, ErrInvShardNum
+	if totShards > 256 {
+		return nil, ErrMaxShardNum
 	}
 
-	if dataShards+parityShards > 256 {
-		return nil, ErrMaxShardNum
+	r := reedSolomon{
+		dataShards:   dataShards,
+		parityShards: parityShards,
+		totalShards:  dataShards + parityShards,
+		o:            o,
+	}
+
+	if dataShards <= 0 || parityShards < 0 {
+		return nil, ErrInvShardNum
 	}
 
 	if parityShards == 0 {
@@ -270,14 +444,32 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
 
 	var err error
 	switch {
+	case r.o.customMatrix != nil:
+		if len(r.o.customMatrix) < parityShards {
+			return nil, errors.New("coding matrix must contain at least parityShards rows")
+		}
+		r.m = make([][]byte, r.totalShards)
+		for i := 0; i < dataShards; i++ {
+			r.m[i] = make([]byte, dataShards)
+			r.m[i][i] = 1
+		}
+		for k, row := range r.o.customMatrix {
+			if len(row) < dataShards {
+				return nil, errors.New("coding matrix must contain at least dataShards columns")
+			}
+			r.m[dataShards+k] = make([]byte, dataShards)
+			copy(r.m[dataShards+k], row)
+		}
 	case r.o.fastOneParity && parityShards == 1:
-		r.m, err = buildXorMatrix(dataShards, r.Shards)
+		r.m, err = buildXorMatrix(dataShards, r.totalShards)
 	case r.o.useCauchy:
-		r.m, err = buildMatrixCauchy(dataShards, r.Shards)
+		r.m, err = buildMatrixCauchy(dataShards, r.totalShards)
 	case r.o.usePAR1Matrix:
-		r.m, err = buildMatrixPAR1(dataShards, r.Shards)
+		r.m, err = buildMatrixPAR1(dataShards, r.totalShards)
+	case r.o.useJerasureMatrix:
+		r.m, err = buildMatrixJerasure(dataShards, r.totalShards)
 	default:
-		r.m, err = buildMatrix(dataShards, r.Shards)
+		r.m, err = buildMatrix(dataShards, r.totalShards)
 	}
 	if err != nil {
 		return nil, err
@@ -285,20 +477,45 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
 
 	// Calculate what we want per round
 	r.o.perRound = cpuid.CPU.Cache.L2
-	if r.o.perRound <= 0 {
-		// Set to 128K if undetectable.
+	if r.o.perRound < 128<<10 {
 		r.o.perRound = 128 << 10
 	}
 
+	divide := parityShards + 1
+	if avx2CodeGen && r.o.useAVX2 && (dataShards > maxAvx2Inputs || parityShards > maxAvx2Outputs) {
+		// Base on L1 cache if we have many inputs.
+		r.o.perRound = cpuid.CPU.Cache.L1D
+		if r.o.perRound < 32<<10 {
+			r.o.perRound = 32 << 10
+		}
+		divide = 0
+		if dataShards > maxAvx2Inputs {
+			divide += maxAvx2Inputs
+		} else {
+			divide += dataShards
+		}
+		if parityShards > maxAvx2Inputs {
+			divide += maxAvx2Outputs
+		} else {
+			divide += parityShards
+		}
+	}
+
 	if cpuid.CPU.ThreadsPerCore > 1 && r.o.maxGoroutines > cpuid.CPU.PhysicalCores {
 		// If multiple threads per core, make sure they don't contend for cache.
 		r.o.perRound /= cpuid.CPU.ThreadsPerCore
 	}
+
 	// 1 input + parity must fit in cache, and we add one more to be safer.
-	r.o.perRound = r.o.perRound / (1 + parityShards)
+	r.o.perRound = r.o.perRound / divide
 	// Align to 64 bytes.
 	r.o.perRound = ((r.o.perRound + 63) / 64) * 64
 
+	// Final sanity check...
+	if r.o.perRound < 1<<10 {
+		r.o.perRound = 1 << 10
+	}
+
 	if r.o.minSplitSize <= 0 {
 		// Set minsplit as high as we can, but still have parity in L1.
 		cacheSize := cpuid.CPU.Cache.L1D
@@ -313,10 +530,6 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
 		}
 	}
 
-	if r.o.perRound < r.o.minSplitSize {
-		r.o.perRound = r.o.minSplitSize
-	}
-
 	if r.o.shardSize > 0 {
 		p := runtime.GOMAXPROCS(0)
 		if p == 1 || r.o.shardSize <= r.o.minSplitSize*2 {
@@ -341,10 +554,14 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
 
 	// Generated AVX2 does not need data to stay in L1 cache between runs.
 	// We will be purely limited by RAM speed.
-	if r.canAVX2C(avx2CodeGenMinSize, r.DataShards, r.ParityShards) && r.o.maxGoroutines > avx2CodeGenMaxGoroutines {
+	if r.canAVX2C(avx2CodeGenMinSize, maxAvx2Inputs, maxAvx2Outputs) && r.o.maxGoroutines > avx2CodeGenMaxGoroutines {
 		r.o.maxGoroutines = avx2CodeGenMaxGoroutines
 	}
 
+	if r.canGFNI(avx2CodeGenMinSize, maxAvx2Inputs, maxAvx2Outputs) && r.o.maxGoroutines > gfniCodeGenMaxGoroutines {
+		r.o.maxGoroutines = gfniCodeGenMaxGoroutines
+	}
+
 	// Inverted matrices are cached in a tree keyed by the indices
 	// of the invalid rows of the data to reconstruct.
 	// The inversion root node will have the identity matrix as
@@ -360,13 +577,30 @@ func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
 	}
 
 	if avx2CodeGen && r.o.useAVX2 {
+		sz := r.dataShards * r.parityShards * 2 * 32
 		r.mPool.New = func() interface{} {
-			return make([]byte, r.Shards*2*32)
+			return AllocAligned(1, sz)[0]
 		}
+		r.mPoolSz = sz
 	}
 	return &r, err
 }
 
+func (r *reedSolomon) getTmpSlice() []byte {
+	return r.mPool.Get().([]byte)
+}
+
+func (r *reedSolomon) putTmpSlice(b []byte) {
+	if b != nil && cap(b) >= r.mPoolSz {
+		r.mPool.Put(b[:r.mPoolSz])
+		return
+	}
+	if false {
+		// Sanity check
+		panic(fmt.Sprintf("got short tmp returned, want %d, got %d", r.mPoolSz, cap(b)))
+	}
+}
+
 // ErrTooFewShards is returned if too few shards where given to
 // Encode/Verify/Reconstruct/Update. It will also be returned from Reconstruct
 // if there were too few shards to reconstruct the missing data.
@@ -379,7 +613,7 @@ var ErrTooFewShards = errors.New("too few shards given")
 // The parity shards will always be overwritten and the data shards
 // will remain the same.
 func (r *reedSolomon) Encode(shards [][]byte) error {
-	if len(shards) != r.Shards {
+	if len(shards) != r.totalShards {
 		return ErrTooFewShards
 	}
 
@@ -389,10 +623,65 @@ func (r *reedSolomon) Encode(shards [][]byte) error {
 	}
 
 	// Get the slice of output buffers.
-	output := shards[r.DataShards:]
+	output := shards[r.dataShards:]
 
 	// Do the coding.
-	r.codeSomeShards(r.parity, shards[0:r.DataShards], output, r.ParityShards, len(shards[0]))
+	r.codeSomeShards(r.parity, shards[0:r.dataShards], output[:r.parityShards], len(shards[0]))
+	return nil
+}
+
+// EncodeIdx will add parity for a single data shard.
+// Parity shards should start out zeroed. The caller must zero them before first call.
+// Data shards should only be delivered once. There is no check for this.
+// The parity shards will always be updated and the data shards will remain the unchanged.
+func (r *reedSolomon) EncodeIdx(dataShard []byte, idx int, parity [][]byte) error {
+	if len(parity) != r.parityShards {
+		return ErrTooFewShards
+	}
+	if len(parity) == 0 {
+		return nil
+	}
+	if idx < 0 || idx >= r.dataShards {
+		return ErrInvShardNum
+	}
+	err := checkShards(parity, false)
+	if err != nil {
+		return err
+	}
+	if len(parity[0]) != len(dataShard) {
+		return ErrShardSize
+	}
+
+	if avx2CodeGen && len(dataShard) >= r.o.perRound && len(parity) >= avx2CodeGenMinShards && (r.o.useAVX2 || r.o.useGFNI) {
+		m := make([][]byte, r.parityShards)
+		for iRow := range m {
+			m[iRow] = r.parity[iRow][idx : idx+1]
+		}
+		if r.o.useGFNI {
+			r.codeSomeShardsGFNI(m, [][]byte{dataShard}, parity, len(dataShard), false)
+		} else {
+			r.codeSomeShardsAVXP(m, [][]byte{dataShard}, parity, len(dataShard), false)
+		}
+		return nil
+	}
+
+	// Process using no goroutines for now.
+	start, end := 0, r.o.perRound
+	if end > len(dataShard) {
+		end = len(dataShard)
+	}
+
+	for start < len(dataShard) {
+		in := dataShard[start:end]
+		for iRow := 0; iRow < r.parityShards; iRow++ {
+			galMulSliceXor(r.parity[iRow][idx], in, parity[iRow][start:end], &r.o)
+		}
+		start = end
+		end += r.o.perRound
+		if end > len(dataShard) {
+			end = len(dataShard)
+		}
+	}
 	return nil
 }
 
@@ -400,11 +689,11 @@ func (r *reedSolomon) Encode(shards [][]byte) error {
 var ErrInvalidInput = errors.New("invalid input")
 
 func (r *reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error {
-	if len(shards) != r.Shards {
+	if len(shards) != r.totalShards {
 		return ErrTooFewShards
 	}
 
-	if len(newDatashards) != r.DataShards {
+	if len(newDatashards) != r.dataShards {
 		return ErrTooFewShards
 	}
 
@@ -423,7 +712,7 @@ func (r *reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error {
 			return ErrInvalidInput
 		}
 	}
-	for _, p := range shards[r.DataShards:] {
+	for _, p := range shards[r.dataShards:] {
 		if p == nil {
 			return ErrInvalidInput
 		}
@@ -432,10 +721,10 @@ func (r *reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error {
 	shardSize := shardSize(shards)
 
 	// Get the slice of output buffers.
-	output := shards[r.DataShards:]
+	output := shards[r.dataShards:]
 
 	// Do the coding.
-	r.updateParityShards(r.parity, shards[0:r.DataShards], newDatashards[0:r.DataShards], output, r.ParityShards, shardSize)
+	r.updateParityShards(r.parity, shards[0:r.dataShards], newDatashards[0:r.dataShards], output, r.parityShards, shardSize)
 	return nil
 }
 
@@ -449,7 +738,7 @@ func (r *reedSolomon) updateParityShards(matrixRows, oldinputs, newinputs, outpu
 		return
 	}
 
-	for c := 0; c < r.DataShards; c++ {
+	for c := 0; c < r.dataShards; c++ {
 		in := newinputs[c]
 		if in == nil {
 			continue
@@ -476,7 +765,7 @@ func (r *reedSolomon) updateParityShardsP(matrixRows, oldinputs, newinputs, outp
 		}
 		wg.Add(1)
 		go func(start, stop int) {
-			for c := 0; c < r.DataShards; c++ {
+			for c := 0; c < r.dataShards; c++ {
 				in := newinputs[c]
 				if in == nil {
 					continue
@@ -498,7 +787,7 @@ func (r *reedSolomon) updateParityShardsP(matrixRows, oldinputs, newinputs, outp
 // Verify returns true if the parity shards contain the right data.
 // The data is the same format as Encode. No data is modified.
 func (r *reedSolomon) Verify(shards [][]byte) (bool, error) {
-	if len(shards) != r.Shards {
+	if len(shards) != r.totalShards {
 		return false, ErrTooFewShards
 	}
 	err := checkShards(shards, false)
@@ -507,10 +796,10 @@ func (r *reedSolomon) Verify(shards [][]byte) (bool, error) {
 	}
 
 	// Slice of buffers being checked.
-	toCheck := shards[r.DataShards:]
+	toCheck := shards[r.dataShards:]
 
 	// Do the checking.
-	return r.checkSomeShards(r.parity, shards[0:r.DataShards], toCheck, r.ParityShards, len(shards[0])), nil
+	return r.checkSomeShards(r.parity, shards[:r.dataShards], toCheck[:r.parityShards], len(shards[0])), nil
 }
 
 func (r *reedSolomon) canAVX2C(byteCount int, inputs, outputs int) bool {
@@ -519,28 +808,27 @@ func (r *reedSolomon) canAVX2C(byteCount int, inputs, outputs int) bool {
 		inputs <= maxAvx2Inputs && outputs <= maxAvx2Outputs
 }
 
+func (r *reedSolomon) canGFNI(byteCount int, inputs, outputs int) bool {
+	return avx2CodeGen && r.o.useGFNI &&
+		byteCount >= avx2CodeGenMinSize && inputs+outputs >= avx2CodeGenMinShards &&
+		inputs <= maxAvx2Inputs && outputs <= maxAvx2Outputs
+}
+
 // Multiplies a subset of rows from a coding matrix by a full set of
-// input shards to produce some output shards.
+// input totalShards to produce some output totalShards.
 // 'matrixRows' is The rows from the matrix to use.
 // 'inputs' An array of byte arrays, each of which is one input shard.
 // The number of inputs used is determined by the length of each matrix row.
-// outputs Byte arrays where the computed shards are stored.
+// outputs Byte arrays where the computed totalShards are stored.
 // The number of outputs computed, and the
 // number of matrix rows used, is determined by
 // outputCount, which is the number of outputs to compute.
-func (r *reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
+func (r *reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, byteCount int) {
 	if len(outputs) == 0 {
 		return
 	}
-	switch {
-	case r.o.useAVX512 && r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize && len(inputs) >= 4 && len(outputs) >= 2:
-		r.codeSomeShardsAvx512P(matrixRows, inputs, outputs, outputCount, byteCount)
-		return
-	case r.o.useAVX512 && len(inputs) >= 4 && len(outputs) >= 2:
-		r.codeSomeShardsAvx512(matrixRows, inputs, outputs, outputCount, byteCount)
-		return
-	case r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize:
-		r.codeSomeShardsP(matrixRows, inputs, outputs, outputCount, byteCount)
+	if byteCount > r.o.minSplitSize {
+		r.codeSomeShardsP(matrixRows, inputs, outputs, byteCount)
 		return
 	}
 
@@ -549,17 +837,65 @@ func (r *reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, outpu
 	if end > len(inputs[0]) {
 		end = len(inputs[0])
 	}
-	if r.canAVX2C(byteCount, len(inputs), len(outputs)) {
-		m := genAvx2Matrix(matrixRows, len(inputs), len(outputs), r.mPool.Get().([]byte))
+	if r.canGFNI(byteCount, len(inputs), len(outputs)) {
+		var gfni [maxAvx2Inputs * maxAvx2Outputs]uint64
+		m := genGFNIMatrix(matrixRows, len(inputs), 0, len(outputs), gfni[:])
+		start += galMulSlicesGFNI(m, inputs, outputs, 0, byteCount)
+		end = len(inputs[0])
+	} else if r.canAVX2C(byteCount, len(inputs), len(outputs)) {
+		m := genAvx2Matrix(matrixRows, len(inputs), 0, len(outputs), r.getTmpSlice())
 		start += galMulSlicesAvx2(m, inputs, outputs, 0, byteCount)
-		r.mPool.Put(m)
+		r.putTmpSlice(m)
 		end = len(inputs[0])
+	} else if len(inputs)+len(outputs) > avx2CodeGenMinShards && r.canAVX2C(byteCount, maxAvx2Inputs, maxAvx2Outputs) {
+		var gfni [maxAvx2Inputs * maxAvx2Outputs]uint64
+		end = len(inputs[0])
+		inIdx := 0
+		m := r.getTmpSlice()
+		defer r.putTmpSlice(m)
+		ins := inputs
+		for len(ins) > 0 {
+			inPer := ins
+			if len(inPer) > maxAvx2Inputs {
+				inPer = inPer[:maxAvx2Inputs]
+			}
+			outs := outputs
+			outIdx := 0
+			for len(outs) > 0 {
+				outPer := outs
+				if len(outPer) > maxAvx2Outputs {
+					outPer = outPer[:maxAvx2Outputs]
+				}
+				if r.o.useGFNI {
+					m := genGFNIMatrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), gfni[:])
+					if inIdx == 0 {
+						galMulSlicesGFNI(m, inPer, outPer, 0, byteCount)
+					} else {
+						galMulSlicesGFNIXor(m, inPer, outPer, 0, byteCount)
+					}
+				} else {
+					m = genAvx2Matrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), m)
+					if inIdx == 0 {
+						galMulSlicesAvx2(m, inPer, outPer, 0, byteCount)
+					} else {
+						galMulSlicesAvx2Xor(m, inPer, outPer, 0, byteCount)
+					}
+				}
+				start = byteCount & avxSizeMask
+				outIdx += len(outPer)
+				outs = outs[len(outPer):]
+			}
+			inIdx += len(inPer)
+			ins = ins[len(inPer):]
+		}
+		if start >= end {
+			return
+		}
 	}
-
 	for start < len(inputs[0]) {
-		for c := 0; c < r.DataShards; c++ {
+		for c := 0; c < len(inputs); c++ {
 			in := inputs[c][start:end]
-			for iRow := 0; iRow < outputCount; iRow++ {
+			for iRow := 0; iRow < len(outputs); iRow++ {
 				if c == 0 {
 					galMulSlice(matrixRows[iRow][c], in, outputs[iRow][start:end], &r.o)
 				} else {
@@ -577,15 +913,32 @@ func (r *reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, outpu
 
 // Perform the same as codeSomeShards, but split the workload into
 // several goroutines.
-func (r *reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
+func (r *reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, byteCount int) {
 	var wg sync.WaitGroup
 	gor := r.o.maxGoroutines
 
 	var avx2Matrix []byte
+	var gfniMatrix []uint64
 	useAvx2 := r.canAVX2C(byteCount, len(inputs), len(outputs))
-	if useAvx2 {
-		avx2Matrix = genAvx2Matrix(matrixRows, len(inputs), len(outputs), r.mPool.Get().([]byte))
-		defer r.mPool.Put(avx2Matrix)
+	useGFNI := r.canGFNI(byteCount, len(inputs), len(outputs))
+	if useGFNI {
+		var tmp [maxAvx2Inputs * maxAvx2Outputs]uint64
+		gfniMatrix = genGFNIMatrix(matrixRows, len(inputs), 0, len(outputs), tmp[:])
+	} else if useAvx2 {
+		avx2Matrix = genAvx2Matrix(matrixRows, len(inputs), 0, len(outputs), r.getTmpSlice())
+		defer r.putTmpSlice(avx2Matrix)
+	} else if r.o.useGFNI && byteCount < 10<<20 && len(inputs)+len(outputs) > avx2CodeGenMinShards &&
+		r.canGFNI(byteCount/4, maxAvx2Inputs, maxAvx2Outputs) {
+		// It appears there is a switchover point at around 10MB where
+		// Regular processing is faster...
+		r.codeSomeShardsGFNI(matrixRows, inputs, outputs, byteCount, true)
+		return
+	} else if r.o.useAVX2 && byteCount < 10<<20 && len(inputs)+len(outputs) > avx2CodeGenMinShards &&
+		r.canAVX2C(byteCount/4, maxAvx2Inputs, maxAvx2Outputs) {
+		// It appears there is a switchover point at around 10MB where
+		// Regular processing is faster...
+		r.codeSomeShardsAVXP(matrixRows, inputs, outputs, byteCount, true)
+		return
 	}
 
 	do := byteCount / gor
@@ -593,6 +946,44 @@ func (r *reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outp
 		do = r.o.minSplitSize
 	}
 
+	exec := func(start, stop int) {
+		if stop-start >= 64 {
+			if useGFNI {
+				start += galMulSlicesGFNI(gfniMatrix, inputs, outputs, start, stop)
+			} else if useAvx2 {
+				start += galMulSlicesAvx2(avx2Matrix, inputs, outputs, start, stop)
+			}
+		}
+
+		lstart, lstop := start, start+r.o.perRound
+		if lstop > stop {
+			lstop = stop
+		}
+		for lstart < stop {
+			for c := 0; c < len(inputs); c++ {
+				in := inputs[c][lstart:lstop]
+				for iRow := 0; iRow < len(outputs); iRow++ {
+					if c == 0 {
+						galMulSlice(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
+					} else {
+						galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
+					}
+				}
+			}
+			lstart = lstop
+			lstop += r.o.perRound
+			if lstop > stop {
+				lstop = stop
+			}
+		}
+		wg.Done()
+	}
+	if gor <= 1 {
+		wg.Add(1)
+		exec(0, byteCount)
+		return
+	}
+
 	// Make sizes divisible by 64
 	do = (do + 63) & (^63)
 	start := 0
@@ -602,34 +993,311 @@ func (r *reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outp
 		}
 
 		wg.Add(1)
-		go func(start, stop int) {
-			if useAvx2 && stop-start >= 64 {
-				start += galMulSlicesAvx2(avx2Matrix, inputs, outputs, start, stop)
+		go exec(start, start+do)
+		start += do
+	}
+	wg.Wait()
+}
+
+// Perform the same as codeSomeShards, but split the workload into
+// several goroutines.
+// If clear is set, the first write will overwrite the output.
+func (r *reedSolomon) codeSomeShardsAVXP(matrixRows, inputs, outputs [][]byte, byteCount int, clear bool) {
+	var wg sync.WaitGroup
+	gor := r.o.maxGoroutines
+
+	type state struct {
+		input  [][]byte
+		output [][]byte
+		m      []byte
+		first  bool
+	}
+	// Make a plan...
+	plan := make([]state, 0, ((len(inputs)+maxAvx2Inputs-1)/maxAvx2Inputs)*((len(outputs)+maxAvx2Outputs-1)/maxAvx2Outputs))
+
+	tmp := r.getTmpSlice()
+	defer r.putTmpSlice(tmp)
+
+	// Flips between input first to output first.
+	// We put the smallest data load in the inner loop.
+	if len(inputs) > len(outputs) {
+		inIdx := 0
+		ins := inputs
+		for len(ins) > 0 {
+			inPer := ins
+			if len(inPer) > maxAvx2Inputs {
+				inPer = inPer[:maxAvx2Inputs]
+			}
+			outs := outputs
+			outIdx := 0
+			for len(outs) > 0 {
+				outPer := outs
+				if len(outPer) > maxAvx2Outputs {
+					outPer = outPer[:maxAvx2Outputs]
+				}
+				// Generate local matrix
+				m := genAvx2Matrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), tmp)
+				tmp = tmp[len(m):]
+				plan = append(plan, state{
+					input:  inPer,
+					output: outPer,
+					m:      m,
+					first:  inIdx == 0 && clear,
+				})
+				outIdx += len(outPer)
+				outs = outs[len(outPer):]
+			}
+			inIdx += len(inPer)
+			ins = ins[len(inPer):]
+		}
+	} else {
+		outs := outputs
+		outIdx := 0
+		for len(outs) > 0 {
+			outPer := outs
+			if len(outPer) > maxAvx2Outputs {
+				outPer = outPer[:maxAvx2Outputs]
 			}
 
-			lstart, lstop := start, start+r.o.perRound
+			inIdx := 0
+			ins := inputs
+			for len(ins) > 0 {
+				inPer := ins
+				if len(inPer) > maxAvx2Inputs {
+					inPer = inPer[:maxAvx2Inputs]
+				}
+				// Generate local matrix
+				m := genAvx2Matrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), tmp)
+				tmp = tmp[len(m):]
+				//fmt.Println("bytes:", len(inPer)*r.o.perRound, "out:", len(outPer)*r.o.perRound)
+				plan = append(plan, state{
+					input:  inPer,
+					output: outPer,
+					m:      m,
+					first:  inIdx == 0 && clear,
+				})
+				inIdx += len(inPer)
+				ins = ins[len(inPer):]
+			}
+			outIdx += len(outPer)
+			outs = outs[len(outPer):]
+		}
+	}
+
+	do := byteCount / gor
+	if do < r.o.minSplitSize {
+		do = r.o.minSplitSize
+	}
+
+	exec := func(start, stop int) {
+		defer wg.Done()
+		lstart, lstop := start, start+r.o.perRound
+		if lstop > stop {
+			lstop = stop
+		}
+		for lstart < stop {
+			if lstop-lstart >= minAvx2Size {
+				// Execute plan...
+				for _, p := range plan {
+					if p.first {
+						galMulSlicesAvx2(p.m, p.input, p.output, lstart, lstop)
+					} else {
+						galMulSlicesAvx2Xor(p.m, p.input, p.output, lstart, lstop)
+					}
+				}
+				lstart += (lstop - lstart) & avxSizeMask
+				if lstart == lstop {
+					lstop += r.o.perRound
+					if lstop > stop {
+						lstop = stop
+					}
+					continue
+				}
+			}
+
+			for c := range inputs {
+				in := inputs[c][lstart:lstop]
+				for iRow := 0; iRow < len(outputs); iRow++ {
+					if c == 0 && clear {
+						galMulSlice(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
+					} else {
+						galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
+					}
+				}
+			}
+			lstart = lstop
+			lstop += r.o.perRound
 			if lstop > stop {
 				lstop = stop
 			}
-			for lstart < stop {
-				for c := 0; c < r.DataShards; c++ {
-					in := inputs[c][lstart:lstop]
-					for iRow := 0; iRow < outputCount; iRow++ {
-						if c == 0 {
-							galMulSlice(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
-						} else {
-							galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
-						}
+		}
+	}
+	if gor == 1 {
+		wg.Add(1)
+		exec(0, byteCount)
+		return
+	}
+
+	// Make sizes divisible by 64
+	do = (do + 63) & (^63)
+	start := 0
+	for start < byteCount {
+		if start+do > byteCount {
+			do = byteCount - start
+		}
+
+		wg.Add(1)
+		go exec(start, start+do)
+		start += do
+	}
+	wg.Wait()
+}
+
+// Perform the same as codeSomeShards, but split the workload into
+// several goroutines.
+// If clear is set, the first write will overwrite the output.
+func (r *reedSolomon) codeSomeShardsGFNI(matrixRows, inputs, outputs [][]byte, byteCount int, clear bool) {
+	var wg sync.WaitGroup
+	gor := r.o.maxGoroutines
+
+	type state struct {
+		input  [][]byte
+		output [][]byte
+		m      []uint64
+		first  bool
+	}
+	// Make a plan...
+	plan := make([]state, 0, ((len(inputs)+maxAvx2Inputs-1)/maxAvx2Inputs)*((len(outputs)+maxAvx2Outputs-1)/maxAvx2Outputs))
+
+	// Flips between input first to output first.
+	// We put the smallest data load in the inner loop.
+	if len(inputs) > len(outputs) {
+		inIdx := 0
+		ins := inputs
+		for len(ins) > 0 {
+			inPer := ins
+			if len(inPer) > maxAvx2Inputs {
+				inPer = inPer[:maxAvx2Inputs]
+			}
+			outs := outputs
+			outIdx := 0
+			for len(outs) > 0 {
+				outPer := outs
+				if len(outPer) > maxAvx2Outputs {
+					outPer = outPer[:maxAvx2Outputs]
+				}
+				// Generate local matrix
+				m := genGFNIMatrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), make([]uint64, len(inPer)*len(outPer)))
+				plan = append(plan, state{
+					input:  inPer,
+					output: outPer,
+					m:      m,
+					first:  inIdx == 0 && clear,
+				})
+				outIdx += len(outPer)
+				outs = outs[len(outPer):]
+			}
+			inIdx += len(inPer)
+			ins = ins[len(inPer):]
+		}
+	} else {
+		outs := outputs
+		outIdx := 0
+		for len(outs) > 0 {
+			outPer := outs
+			if len(outPer) > maxAvx2Outputs {
+				outPer = outPer[:maxAvx2Outputs]
+			}
+
+			inIdx := 0
+			ins := inputs
+			for len(ins) > 0 {
+				inPer := ins
+				if len(inPer) > maxAvx2Inputs {
+					inPer = inPer[:maxAvx2Inputs]
+				}
+				// Generate local matrix
+				m := genGFNIMatrix(matrixRows[outIdx:], len(inPer), inIdx, len(outPer), make([]uint64, len(inPer)*len(outPer)))
+				//fmt.Println("bytes:", len(inPer)*r.o.perRound, "out:", len(outPer)*r.o.perRound)
+				plan = append(plan, state{
+					input:  inPer,
+					output: outPer,
+					m:      m,
+					first:  inIdx == 0 && clear,
+				})
+				inIdx += len(inPer)
+				ins = ins[len(inPer):]
+			}
+			outIdx += len(outPer)
+			outs = outs[len(outPer):]
+		}
+	}
+
+	do := byteCount / gor
+	if do < r.o.minSplitSize {
+		do = r.o.minSplitSize
+	}
+
+	exec := func(start, stop int) {
+		defer wg.Done()
+		lstart, lstop := start, start+r.o.perRound
+		if lstop > stop {
+			lstop = stop
+		}
+		for lstart < stop {
+			if lstop-lstart >= minAvx2Size {
+				// Execute plan...
+				for _, p := range plan {
+					if p.first {
+						galMulSlicesGFNI(p.m, p.input, p.output, lstart, lstop)
+					} else {
+						galMulSlicesGFNIXor(p.m, p.input, p.output, lstart, lstop)
 					}
 				}
-				lstart = lstop
-				lstop += r.o.perRound
-				if lstop > stop {
-					lstop = stop
+				lstart += (lstop - lstart) & avxSizeMask
+				if lstart == lstop {
+					lstop += r.o.perRound
+					if lstop > stop {
+						lstop = stop
+					}
+					continue
 				}
 			}
-			wg.Done()
-		}(start, start+do)
+
+			for c := range inputs {
+				in := inputs[c][lstart:lstop]
+				for iRow := 0; iRow < len(outputs); iRow++ {
+					if c == 0 && clear {
+						galMulSlice(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
+					} else {
+						galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow][lstart:lstop], &r.o)
+					}
+				}
+			}
+			lstart = lstop
+			lstop += r.o.perRound
+			if lstop > stop {
+				lstop = stop
+			}
+		}
+	}
+
+	if gor == 1 {
+		wg.Add(1)
+		exec(0, byteCount)
+		return
+	}
+
+	// Make sizes divisible by 64
+	do = (do + 63) & (^63)
+	start := 0
+	for start < byteCount {
+		if start+do > byteCount {
+			do = byteCount - start
+		}
+
+		wg.Add(1)
+		go exec(start, start+do)
 		start += do
 	}
 	wg.Wait()
@@ -638,16 +1306,13 @@ func (r *reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outp
 // checkSomeShards is mostly the same as codeSomeShards,
 // except this will check values and return
 // as soon as a difference is found.
-func (r *reedSolomon) checkSomeShards(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
+func (r *reedSolomon) checkSomeShards(matrixRows, inputs, toCheck [][]byte, byteCount int) bool {
 	if len(toCheck) == 0 {
 		return true
 	}
 
-	outputs := make([][]byte, len(toCheck))
-	for i := range outputs {
-		outputs[i] = make([]byte, byteCount)
-	}
-	r.codeSomeShards(matrixRows, inputs, outputs, outputCount, byteCount)
+	outputs := AllocAligned(len(toCheck), byteCount)
+	r.codeSomeShards(matrixRows, inputs, outputs, byteCount)
 
 	for i, calc := range outputs {
 		if !bytes.Equal(calc, toCheck[i]) {
@@ -665,6 +1330,10 @@ var ErrShardNoData = errors.New("no shard data")
 // shards.
 var ErrShardSize = errors.New("shard sizes do not match")
 
+// ErrInvalidShardSize is returned if shard length doesn't meet the requirements,
+// typically a multiple of N.
+var ErrInvalidShardSize = errors.New("invalid shard size")
+
 // checkShards will check if shards are the same size
 // or 0, if allowed. An error is returned if this fails.
 // An error is also returned if all shards are size 0.
@@ -700,7 +1369,7 @@ func shardSize(shards [][]byte) int {
 // Given a list of shards, some of which contain data, fills in the
 // ones that don't have data.
 //
-// The length of the array must be equal to Shards.
+// The length of the array must be equal to shards.
 // You indicate that a shard is missing by setting it to nil or zero-length.
 // If a shard is zero-length but has sufficient capacity, that memory will
 // be used, otherwise a new []byte will be allocated.
@@ -711,7 +1380,7 @@ func shardSize(shards [][]byte) int {
 // The reconstructed shard set is complete, but integrity is not verified.
 // Use the Verify function to check if data set is ok.
 func (r *reedSolomon) Reconstruct(shards [][]byte) error {
-	return r.reconstruct(shards, false)
+	return r.reconstruct(shards, false, nil)
 }
 
 // ReconstructData will recreate any missing data shards, if possible.
@@ -719,7 +1388,7 @@ func (r *reedSolomon) Reconstruct(shards [][]byte) error {
 // Given a list of shards, some of which contain data, fills in the
 // data shards that don't have data.
 //
-// The length of the array must be equal to Shards.
+// The length of the array must be equal to shards.
 // You indicate that a shard is missing by setting it to nil or zero-length.
 // If a shard is zero-length but has sufficient capacity, that memory will
 // be used, otherwise a new []byte will be allocated.
@@ -730,19 +1399,39 @@ func (r *reedSolomon) Reconstruct(shards [][]byte) error {
 // As the reconstructed shard set may contain missing parity shards,
 // calling the Verify function is likely to fail.
 func (r *reedSolomon) ReconstructData(shards [][]byte) error {
-	return r.reconstruct(shards, true)
+	return r.reconstruct(shards, true, nil)
 }
 
-// reconstruct will recreate the missing data shards, and unless
-// dataOnly is true, also the missing parity shards
+// ReconstructSome will recreate only requested data shards, if possible.
 //
-// The length of the array must be equal to Shards.
-// You indicate that a shard is missing by setting it to nil.
+// Given a list of shards, some of which contain data, fills in the
+// data shards indicated by true values in the "required" parameter.
+// The length of "required" array must be equal to dataShards.
+//
+// The length of "shards" array must be equal to shards.
+// You indicate that a shard is missing by setting it to nil or zero-length.
+// If a shard is zero-length but has sufficient capacity, that memory will
+// be used, otherwise a new []byte will be allocated.
 //
 // If there are too few shards to reconstruct the missing
 // ones, ErrTooFewShards will be returned.
-func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
-	if len(shards) != r.Shards {
+//
+// As the reconstructed shard set may contain missing parity shards,
+// calling the Verify function is likely to fail.
+func (r *reedSolomon) ReconstructSome(shards [][]byte, required []bool) error {
+	return r.reconstruct(shards, true, required)
+}
+
+// reconstruct will recreate the missing data totalShards, and unless
+// dataOnly is true, also the missing parity totalShards
+//
+// The length of "shards" array must be equal to totalShards.
+// You indicate that a shard is missing by setting it to nil.
+//
+// If there are too few totalShards to reconstruct the missing
+// ones, ErrTooFewShards will be returned.
+func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool, required []bool) error {
+	if len(shards) != r.totalShards || required != nil && len(required) < r.dataShards {
 		return ErrTooFewShards
 	}
 	// Check arguments.
@@ -757,22 +1446,26 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
 	// nothing to do.
 	numberPresent := 0
 	dataPresent := 0
-	for i := 0; i < r.Shards; i++ {
+	missingRequired := 0
+	for i := 0; i < r.totalShards; i++ {
 		if len(shards[i]) != 0 {
 			numberPresent++
-			if i < r.DataShards {
+			if i < r.dataShards {
 				dataPresent++
 			}
+		} else if required != nil && required[i] {
+			missingRequired++
 		}
 	}
-	if numberPresent == r.Shards || dataOnly && dataPresent == r.DataShards {
-		// Cool.  All of the shards data data.  We don't
+	if numberPresent == r.totalShards || dataOnly && dataPresent == r.dataShards ||
+		required != nil && missingRequired == 0 {
+		// Cool. All of the shards have data. We don't
 		// need to do anything.
 		return nil
 	}
 
 	// More complete sanity check
-	if numberPresent < r.DataShards {
+	if numberPresent < r.dataShards {
 		return ErrTooFewShards
 	}
 
@@ -783,11 +1476,11 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
 	//
 	// Also, create an array of indices of the valid rows we do have
 	// and the invalid rows we don't have up until we have enough valid rows.
-	subShards := make([][]byte, r.DataShards)
-	validIndices := make([]int, r.DataShards)
+	subShards := make([][]byte, r.dataShards)
+	validIndices := make([]int, r.dataShards)
 	invalidIndices := make([]int, 0)
 	subMatrixRow := 0
-	for matrixRow := 0; matrixRow < r.Shards && subMatrixRow < r.DataShards; matrixRow++ {
+	for matrixRow := 0; matrixRow < r.totalShards && subMatrixRow < r.dataShards; matrixRow++ {
 		if len(shards[matrixRow]) != 0 {
 			subShards[subMatrixRow] = shards[matrixRow]
 			validIndices[subMatrixRow] = matrixRow
@@ -809,9 +1502,9 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
 		// shards that we have and build a square matrix.  This
 		// matrix could be used to generate the shards that we have
 		// from the original data.
-		subMatrix, _ := newMatrix(r.DataShards, r.DataShards)
+		subMatrix, _ := newMatrix(r.dataShards, r.dataShards)
 		for subMatrixRow, validIndex := range validIndices {
-			for c := 0; c < r.DataShards; c++ {
+			for c := 0; c < r.dataShards; c++ {
 				subMatrix[subMatrixRow][c] = r.m[validIndex][c]
 			}
 		}
@@ -827,7 +1520,7 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
 
 		// Cache the inverted matrix in the tree for future use keyed on the
 		// indices of the invalid rows.
-		err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.Shards)
+		err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.totalShards)
 		if err != nil {
 			return err
 		}
@@ -838,23 +1531,23 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
 	// The input to the coding is all of the shards we actually
 	// have, and the output is the missing data shards.  The computation
 	// is done using the special decode matrix we just built.
-	outputs := make([][]byte, r.ParityShards)
-	matrixRows := make([][]byte, r.ParityShards)
+	outputs := make([][]byte, r.parityShards)
+	matrixRows := make([][]byte, r.parityShards)
 	outputCount := 0
 
-	for iShard := 0; iShard < r.DataShards; iShard++ {
-		if len(shards[iShard]) == 0 {
+	for iShard := 0; iShard < r.dataShards; iShard++ {
+		if len(shards[iShard]) == 0 && (required == nil || required[iShard]) {
 			if cap(shards[iShard]) >= shardSize {
 				shards[iShard] = shards[iShard][0:shardSize]
 			} else {
-				shards[iShard] = make([]byte, shardSize)
+				shards[iShard] = AllocAligned(1, shardSize)[0]
 			}
 			outputs[outputCount] = shards[iShard]
 			matrixRows[outputCount] = dataDecodeMatrix[iShard]
 			outputCount++
 		}
 	}
-	r.codeSomeShards(matrixRows, subShards, outputs[:outputCount], outputCount, shardSize)
+	r.codeSomeShards(matrixRows, subShards, outputs[:outputCount], shardSize)
 
 	if dataOnly {
 		// Exit out early if we are only interested in the data shards
@@ -868,19 +1561,19 @@ func (r *reedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
 	// any that we just calculated.  The output is whichever of the
 	// data shards were missing.
 	outputCount = 0
-	for iShard := r.DataShards; iShard < r.Shards; iShard++ {
-		if len(shards[iShard]) == 0 {
+	for iShard := r.dataShards; iShard < r.totalShards; iShard++ {
+		if len(shards[iShard]) == 0 && (required == nil || required[iShard]) {
 			if cap(shards[iShard]) >= shardSize {
 				shards[iShard] = shards[iShard][0:shardSize]
 			} else {
-				shards[iShard] = make([]byte, shardSize)
+				shards[iShard] = AllocAligned(1, shardSize)[0]
 			}
 			outputs[outputCount] = shards[iShard]
-			matrixRows[outputCount] = r.parity[iShard-r.DataShards]
+			matrixRows[outputCount] = r.parity[iShard-r.dataShards]
 			outputCount++
 		}
 	}
-	r.codeSomeShards(matrixRows, shards[:r.DataShards], outputs[:outputCount], outputCount, shardSize)
+	r.codeSomeShards(matrixRows, shards[:r.dataShards], outputs[:outputCount], shardSize)
 	return nil
 }
 
@@ -895,6 +1588,10 @@ var ErrShortData = errors.New("not enough data to fill the number of requested s
 // If the data size isn't divisible by the number of shards,
 // the last shard will contain extra zeros.
 //
+// If there is extra capacity on the provided data slice
+// it will be used instead of allocating parity shards.
+// It will be zeroed out.
+//
 // There must be at least 1 byte otherwise ErrShortData will be
 // returned.
 //
@@ -904,25 +1601,48 @@ func (r *reedSolomon) Split(data []byte) ([][]byte, error) {
 	if len(data) == 0 {
 		return nil, ErrShortData
 	}
+	if r.totalShards == 1 {
+		return [][]byte{data}, nil
+	}
+
+	dataLen := len(data)
 	// Calculate number of bytes per data shard.
-	perShard := (len(data) + r.DataShards - 1) / r.DataShards
+	perShard := (len(data) + r.dataShards - 1) / r.dataShards
+	needTotal := r.totalShards * perShard
 
 	if cap(data) > len(data) {
-		data = data[:cap(data)]
+		if cap(data) > needTotal {
+			data = data[:needTotal]
+		} else {
+			data = data[:cap(data)]
+		}
+		clear := data[dataLen:]
+		for i := range clear {
+			clear[i] = 0
+		}
 	}
 
 	// Only allocate memory if necessary
-	var padding []byte
-	if len(data) < (r.Shards * perShard) {
+	var padding [][]byte
+	if len(data) < needTotal {
 		// calculate maximum number of full shards in `data` slice
 		fullShards := len(data) / perShard
-		padding = make([]byte, r.Shards*perShard-perShard*fullShards)
-		copy(padding, data[perShard*fullShards:])
-		data = data[0 : perShard*fullShards]
+		padding = AllocAligned(r.totalShards-fullShards, perShard)
+
+		if dataLen > perShard*fullShards {
+			// Copy partial shards
+			copyFrom := data[perShard*fullShards : dataLen]
+			for i := range padding {
+				if len(copyFrom) <= 0 {
+					break
+				}
+				copyFrom = copyFrom[copy(padding[i], copyFrom):]
+			}
+		}
 	}
 
 	// Split into equal-length shards.
-	dst := make([][]byte, r.Shards)
+	dst := make([][]byte, r.totalShards)
 	i := 0
 	for ; i < len(dst) && len(data) >= perShard; i++ {
 		dst[i] = data[:perShard:perShard]
@@ -930,8 +1650,8 @@ func (r *reedSolomon) Split(data []byte) ([][]byte, error) {
 	}
 
 	for j := 0; i+j < len(dst); j++ {
-		dst[i+j] = padding[:perShard:perShard]
-		padding = padding[perShard:]
+		dst[i+j] = padding[0]
+		padding = padding[1:]
 	}
 
 	return dst, nil
@@ -951,10 +1671,10 @@ var ErrReconstructRequired = errors.New("reconstruction required as one or more
 // If one or more required data shards are nil, ErrReconstructRequired will be returned.
 func (r *reedSolomon) Join(dst io.Writer, shards [][]byte, outSize int) error {
 	// Do we have enough shards?
-	if len(shards) < r.DataShards {
+	if len(shards) < r.dataShards {
 		return ErrTooFewShards
 	}
-	shards = shards[:r.DataShards]
+	shards = shards[:r.dataShards]
 
 	// Do we have enough data?
 	size := 0
diff --git a/reedsolomon_test.go b/reedsolomon_test.go
index 4a51838..2932787 100644
--- a/reedsolomon_test.go
+++ b/reedsolomon_test.go
@@ -9,18 +9,22 @@ package reedsolomon
 
 import (
 	"bytes"
+	"errors"
 	"flag"
 	"fmt"
 	"math/rand"
 	"os"
 	"runtime"
+	"strconv"
 	"testing"
+	"time"
 )
 
 var noSSE2 = flag.Bool("no-sse2", !defaultOptions.useSSE2, "Disable SSE2")
 var noSSSE3 = flag.Bool("no-ssse3", !defaultOptions.useSSSE3, "Disable SSSE3")
 var noAVX2 = flag.Bool("no-avx2", !defaultOptions.useAVX2, "Disable AVX2")
 var noAVX512 = flag.Bool("no-avx512", !defaultOptions.useAVX512, "Disable AVX512")
+var noGNFI = flag.Bool("no-gfni", !defaultOptions.useGFNI, "Disable AVX512+GFNI")
 
 func TestMain(m *testing.M) {
 	flag.Parse()
@@ -28,17 +32,21 @@ func TestMain(m *testing.M) {
 }
 
 func testOptions(o ...Option) []Option {
+	o = append(o, WithFastOneParityMatrix())
 	if *noSSSE3 {
-		o = append(o, withSSSE3(false))
+		o = append(o, WithSSSE3(false))
 	}
 	if *noSSE2 {
-		o = append(o, withSSE2(false))
+		o = append(o, WithSSE2(false))
 	}
 	if *noAVX2 {
-		o = append(o, withAVX2(false))
+		o = append(o, WithAVX2(false))
 	}
 	if *noAVX512 {
-		o = append(o, withAVX512(false))
+		o = append(o, WithAVX512(false))
+	}
+	if *noGNFI {
+		o = append(o, WithGFNI(false))
 	}
 	return o
 }
@@ -108,6 +116,35 @@ func findSingularSubMatrix(m matrix) (matrix, error) {
 	return nil, nil
 }
 
+func TestBuildMatrixJerasure(t *testing.T) {
+	totalShards := 12
+	dataShards := 8
+	m, err := buildMatrixJerasure(dataShards, totalShards)
+	if err != nil {
+		t.Fatal(err)
+	}
+	refMatrix := matrix{
+		{1, 1, 1, 1, 1, 1, 1, 1},
+		{1, 55, 39, 73, 84, 181, 225, 217},
+		{1, 39, 217, 161, 92, 60, 172, 90},
+		{1, 172, 70, 235, 143, 34, 200, 101},
+	}
+	for i := 0; i < 8; i++ {
+		for j := 0; j < 8; j++ {
+			if i != j && m[i][j] != 0 || i == j && m[i][j] != 1 {
+				t.Fatal("Top part of the matrix is not identity")
+			}
+		}
+	}
+	for i := 0; i < 4; i++ {
+		for j := 0; j < 8; j++ {
+			if m[8+i][j] != refMatrix[i][j] {
+				t.Fatal("Coding matrix for EC 8+4 differs from Jerasure")
+			}
+		}
+	}
+}
+
 func TestBuildMatrixPAR1Singular(t *testing.T) {
 	totalShards := 8
 	dataShards := 4
@@ -131,83 +168,172 @@ func TestBuildMatrixPAR1Singular(t *testing.T) {
 func testOpts() [][]Option {
 	if testing.Short() {
 		return [][]Option{
-			{WithPAR1Matrix()}, {WithCauchyMatrix()},
+			{WithCauchyMatrix()}, {WithLeopardGF16(true)}, {WithLeopardGF(true)},
 		}
 	}
 	opts := [][]Option{
 		{WithPAR1Matrix()}, {WithCauchyMatrix()},
 		{WithFastOneParityMatrix()}, {WithPAR1Matrix(), WithFastOneParityMatrix()}, {WithCauchyMatrix(), WithFastOneParityMatrix()},
-		{WithMaxGoroutines(1), WithMinSplitSize(500), withSSSE3(false), withAVX2(false), withAVX512(false)},
-		{WithMaxGoroutines(5000), WithMinSplitSize(50), withSSSE3(false), withAVX2(false), withAVX512(false)},
-		{WithMaxGoroutines(5000), WithMinSplitSize(500000), withSSSE3(false), withAVX2(false), withAVX512(false)},
-		{WithMaxGoroutines(1), WithMinSplitSize(500000), withSSSE3(false), withAVX2(false), withAVX512(false)},
+		{WithMaxGoroutines(1), WithMinSplitSize(500), WithSSSE3(false), WithAVX2(false), WithAVX512(false)},
+		{WithMaxGoroutines(5000), WithMinSplitSize(50), WithSSSE3(false), WithAVX2(false), WithAVX512(false)},
+		{WithMaxGoroutines(5000), WithMinSplitSize(500000), WithSSSE3(false), WithAVX2(false), WithAVX512(false)},
+		{WithMaxGoroutines(1), WithMinSplitSize(500000), WithSSSE3(false), WithAVX2(false), WithAVX512(false)},
 		{WithAutoGoroutines(50000), WithMinSplitSize(500)},
 		{WithInversionCache(false)},
+		{WithJerasureMatrix()},
+		{WithLeopardGF16(true)},
+		{WithLeopardGF(true)},
 	}
+
 	for _, o := range opts[:] {
 		if defaultOptions.useSSSE3 {
 			n := make([]Option, len(o), len(o)+1)
 			copy(n, o)
-			n = append(n, withSSSE3(true))
+			n = append(n, WithSSSE3(true))
 			opts = append(opts, n)
 		}
 		if defaultOptions.useAVX2 {
 			n := make([]Option, len(o), len(o)+1)
 			copy(n, o)
-			n = append(n, withAVX2(true))
+			n = append(n, WithAVX2(true))
 			opts = append(opts, n)
 		}
 		if defaultOptions.useAVX512 {
 			n := make([]Option, len(o), len(o)+1)
 			copy(n, o)
-			n = append(n, withAVX512(true))
+			n = append(n, WithAVX512(true))
+			opts = append(opts, n)
+		}
+		if defaultOptions.useGFNI {
+			n := make([]Option, len(o), len(o)+1)
+			copy(n, o)
+			n = append(n, WithGFNI(false))
 			opts = append(opts, n)
 		}
 	}
 	return opts
 }
 
+func parallelIfNotShort(t *testing.T) {
+	if !testing.Short() {
+		t.Parallel()
+	}
+}
+
 func TestEncoding(t *testing.T) {
 	t.Run("default", func(t *testing.T) {
+		parallelIfNotShort(t)
 		testEncoding(t, testOptions()...)
 	})
-	for i, o := range testOpts() {
-		t.Run(fmt.Sprintf("opt-%d", i), func(t *testing.T) {
-			testEncoding(t, o...)
+	t.Run("default-idx", func(t *testing.T) {
+		parallelIfNotShort(t)
+		testEncodingIdx(t, testOptions()...)
+	})
+	if testing.Short() {
+		return
+	}
+	// Spread somewhat, but don't overload...
+	to := testOpts()
+	to2 := to[len(to)/2:]
+	to = to[:len(to)/2]
+	t.Run("reg", func(t *testing.T) {
+		parallelIfNotShort(t)
+		for i, o := range to {
+			t.Run(fmt.Sprintf("opt-%d", i), func(t *testing.T) {
+				testEncoding(t, o...)
+			})
+		}
+	})
+	t.Run("reg2", func(t *testing.T) {
+		parallelIfNotShort(t)
+		for i, o := range to2 {
+			t.Run(fmt.Sprintf("opt-%d", i), func(t *testing.T) {
+				testEncoding(t, o...)
+			})
+		}
+	})
+	if !testing.Short() {
+		t.Run("idx", func(t *testing.T) {
+			parallelIfNotShort(t)
+			for i, o := range to {
+				t.Run(fmt.Sprintf("idx-opt-%d", i), func(t *testing.T) {
+					testEncodingIdx(t, o...)
+				})
+			}
+		})
+		t.Run("idx2", func(t *testing.T) {
+			parallelIfNotShort(t)
+			for i, o := range to2 {
+				t.Run(fmt.Sprintf("idx-opt-%d", i), func(t *testing.T) {
+					testEncodingIdx(t, o...)
+				})
+			}
 		})
+
 	}
 }
 
 // matrix sizes to test.
-// note that par1 matric will fail on some combinations.
-var testSizes = [][2]int{
-	{1, 0}, {3, 0}, {5, 0}, {8, 0}, {10, 0}, {12, 0}, {14, 0}, {41, 0}, {49, 0},
-	{1, 1}, {1, 2}, {3, 3}, {3, 1}, {5, 3}, {8, 4}, {10, 30}, {12, 10}, {14, 7}, {41, 17}, {49, 1}}
+// note that par1 matrix will fail on some combinations.
+func testSizes() [][2]int {
+	if testing.Short() {
+		return [][2]int{
+			{3, 0},
+			{1, 1}, {1, 2}, {8, 4}, {10, 30}, {41, 17},
+			{256, 20}, {500, 300},
+		}
+	}
+	return [][2]int{
+		{1, 0}, {10, 0}, {12, 0}, {49, 0},
+		{1, 1}, {1, 2}, {3, 3}, {3, 1}, {5, 3}, {8, 4}, {10, 30}, {12, 10}, {14, 7}, {41, 17}, {49, 1}, {5, 20},
+		{256, 20}, {500, 300}, {2945, 129},
+	}
+}
+
 var testDataSizes = []int{10, 100, 1000, 10001, 100003, 1000055}
 var testDataSizesShort = []int{10, 10001, 100003}
 
 func testEncoding(t *testing.T, o ...Option) {
-	for _, size := range testSizes {
+	for _, size := range testSizes() {
 		data, parity := size[0], size[1]
 		rng := rand.New(rand.NewSource(0xabadc0cac01a))
 		t.Run(fmt.Sprintf("%dx%d", data, parity), func(t *testing.T) {
 			sz := testDataSizes
-			if testing.Short() {
+			if testing.Short() || data+parity > 256 {
 				sz = testDataSizesShort
+				if raceEnabled {
+					sz = testDataSizesShort[:1]
+				}
 			}
 			for _, perShard := range sz {
+				r, err := New(data, parity, testOptions(o...)...)
+				if err != nil {
+					t.Fatal(err)
+				}
+				x := r.(Extensions)
+				if want, got := data, x.DataShards(); want != got {
+					t.Errorf("DataShards returned %d, want %d", got, want)
+				}
+				if want, got := parity, x.ParityShards(); want != got {
+					t.Errorf("ParityShards returned %d, want %d", got, want)
+				}
+				if want, got := parity+data, x.TotalShards(); want != got {
+					t.Errorf("TotalShards returned %d, want %d", got, want)
+				}
+				mul := x.ShardSizeMultiple()
+				if mul <= 0 {
+					t.Fatalf("Got unexpected ShardSizeMultiple: %d", mul)
+				}
+				perShard = ((perShard + mul - 1) / mul) * mul
+
 				t.Run(fmt.Sprint(perShard), func(t *testing.T) {
 
-					r, err := New(data, parity, testOptions(o...)...)
-					if err != nil {
-						t.Fatal(err)
-					}
 					shards := make([][]byte, data+parity)
 					for s := range shards {
 						shards[s] = make([]byte, perShard)
 					}
 
-					for s := 0; s < data; s++ {
+					for s := 0; s < len(shards); s++ {
 						rng.Read(shards[s])
 					}
 
@@ -281,7 +407,118 @@ func testEncoding(t *testing.T, o ...Option) {
 	}
 }
 
+func testEncodingIdx(t *testing.T, o ...Option) {
+	for _, size := range testSizes() {
+		data, parity := size[0], size[1]
+		rng := rand.New(rand.NewSource(0xabadc0cac01a))
+		t.Run(fmt.Sprintf("%dx%d", data, parity), func(t *testing.T) {
+
+			sz := testDataSizes
+			if testing.Short() {
+				sz = testDataSizesShort
+			}
+			for _, perShard := range sz {
+				r, err := New(data, parity, testOptions(o...)...)
+				if err != nil {
+					t.Fatal(err)
+				}
+				if err := r.EncodeIdx(nil, 0, nil); err == ErrNotSupported {
+					t.Skip(err)
+					return
+				}
+				mul := r.(Extensions).ShardSizeMultiple()
+				perShard = ((perShard + mul - 1) / mul) * mul
+
+				t.Run(fmt.Sprint(perShard), func(t *testing.T) {
+
+					shards := make([][]byte, data+parity)
+					for s := range shards {
+						shards[s] = make([]byte, perShard)
+					}
+					shuffle := make([]int, data)
+					for i := range shuffle {
+						shuffle[i] = i
+					}
+					rng.Shuffle(len(shuffle), func(i, j int) { shuffle[i], shuffle[j] = shuffle[j], shuffle[i] })
+
+					// Send shards in random order.
+					for s := 0; s < data; s++ {
+						s := shuffle[s]
+						rng.Read(shards[s])
+						err = r.EncodeIdx(shards[s], s, shards[data:])
+						if err != nil {
+							t.Fatal(err)
+						}
+					}
+
+					ok, err := r.Verify(shards)
+					if err != nil {
+						t.Fatal(err)
+					}
+					if !ok {
+						t.Fatal("Verification failed")
+					}
+
+					if parity == 0 {
+						// Check that Reconstruct and ReconstructData do nothing
+						err = r.ReconstructData(shards)
+						if err != nil {
+							t.Fatal(err)
+						}
+						err = r.Reconstruct(shards)
+						if err != nil {
+							t.Fatal(err)
+						}
+
+						// Skip integrity checks
+						return
+					}
+
+					// Delete one in data
+					idx := rng.Intn(data)
+					want := shards[idx]
+					shards[idx] = nil
+
+					err = r.ReconstructData(shards)
+					if err != nil {
+						t.Fatal(err)
+					}
+					if !bytes.Equal(shards[idx], want) {
+						t.Fatal("did not ReconstructData correctly")
+					}
+
+					// Delete one randomly
+					idx = rng.Intn(data + parity)
+					want = shards[idx]
+					shards[idx] = nil
+					err = r.Reconstruct(shards)
+					if err != nil {
+						t.Fatal(err)
+					}
+					if !bytes.Equal(shards[idx], want) {
+						t.Fatal("did not Reconstruct correctly")
+					}
+
+					err = r.Encode(make([][]byte, 1))
+					if err != ErrTooFewShards {
+						t.Errorf("expected %v, got %v", ErrTooFewShards, err)
+					}
+
+					// Make one too short.
+					shards[idx] = shards[idx][:perShard-1]
+					err = r.Encode(shards)
+					if err != ErrShardSize {
+						t.Errorf("expected %v, got %v", ErrShardSize, err)
+					}
+				})
+			}
+		})
+
+	}
+}
+
 func TestUpdate(t *testing.T) {
+	parallelIfNotShort(t)
 	for i, o := range testOpts() {
 		t.Run(fmt.Sprintf("options %d", i), func(t *testing.T) {
 			testUpdate(t, o...)
@@ -290,7 +527,6 @@ func TestUpdate(t *testing.T) {
 }
 
 func testUpdate(t *testing.T, o ...Option) {
-	rand.Seed(0)
 	for _, size := range [][2]int{{10, 3}, {17, 2}} {
 		data, parity := size[0], size[1]
 		t.Run(fmt.Sprintf("%dx%d", data, parity), func(t *testing.T) {
@@ -299,11 +535,15 @@ func testUpdate(t *testing.T, o ...Option) {
 				sz = []int{50000}
 			}
 			for _, perShard := range sz {
+				r, err := New(data, parity, testOptions(o...)...)
+				if err != nil {
+					t.Fatal(err)
+				}
+				mul := r.(Extensions).ShardSizeMultiple()
+				perShard = ((perShard + mul - 1) / mul) * mul
+
 				t.Run(fmt.Sprint(perShard), func(t *testing.T) {
-					r, err := New(data, parity, testOptions(o...)...)
-					if err != nil {
-						t.Fatal(err)
-					}
+
 					shards := make([][]byte, data+parity)
 					for s := range shards {
 						shards[s] = make([]byte, perShard)
@@ -331,6 +571,10 @@ func testUpdate(t *testing.T, o ...Option) {
 						fillRandom(newdatashards[s])
 						err = r.Update(shards, newdatashards)
 						if err != nil {
+							if errors.Is(err, ErrNotSupported) {
+								t.Skip(err)
+								return
+							}
 							t.Fatal(err)
 						}
 						shards[s] = newdatashards[s]
@@ -396,6 +640,7 @@ func testUpdate(t *testing.T, o ...Option) {
 }
 
 func TestReconstruct(t *testing.T) {
+	parallelIfNotShort(t)
 	testReconstruct(t)
 	for i, o := range testOpts() {
 		t.Run(fmt.Sprintf("options %d", i), func(t *testing.T) {
@@ -410,12 +655,16 @@ func testReconstruct(t *testing.T, o ...Option) {
 	if err != nil {
 		t.Fatal(err)
 	}
+	xt := r.(Extensions)
+	mul := xt.ShardSizeMultiple()
+	perShard = ((perShard + mul - 1) / mul) * mul
+
+	t.Log(perShard)
 	shards := make([][]byte, 13)
 	for s := range shards {
 		shards[s] = make([]byte, perShard)
 	}
 
-	rand.Seed(0)
 	for s := 0; s < 13; s++ {
 		fillRandom(shards[s])
 	}
@@ -476,11 +725,74 @@ func testReconstruct(t *testing.T, o ...Option) {
 	}
 }
 
+func TestReconstructCustom(t *testing.T) {
+	perShard := 50000
+	r, err := New(4, 3, WithCustomMatrix([][]byte{
+		{1, 1, 0, 0},
+		{0, 0, 1, 1},
+		{1, 2, 3, 4},
+	}))
+	if err != nil {
+		t.Fatal(err)
+	}
+	shards := make([][]byte, 7)
+	for s := range shards {
+		shards[s] = make([]byte, perShard)
+	}
+
+	for s := 0; s < len(shards); s++ {
+		fillRandom(shards[s])
+	}
+
+	err = r.Encode(shards)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Reconstruct with 1 shard absent.
+	shards1 := make([][]byte, len(shards))
+	copy(shards1, shards)
+	shards1[0] = nil
+
+	err = r.Reconstruct(shards1)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ok, err := r.Verify(shards)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !ok {
+		t.Fatal("Verification failed")
+	}
+
+	// Reconstruct with 3 shards absent.
+	copy(shards1, shards)
+	shards1[0] = nil
+	shards1[1] = nil
+	shards1[2] = nil
+
+	err = r.Reconstruct(shards1)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ok, err = r.Verify(shards)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !ok {
+		t.Fatal("Verification failed")
+	}
+}
+
 func TestReconstructData(t *testing.T) {
+	parallelIfNotShort(t)
 	testReconstructData(t)
 	for i, o := range testOpts() {
 		t.Run(fmt.Sprintf("options %d", i), func(t *testing.T) {
-			testReconstruct(t, o...)
+			testReconstructData(t, o...)
 		})
 	}
 }
@@ -491,14 +803,16 @@ func testReconstructData(t *testing.T, o ...Option) {
 	if err != nil {
 		t.Fatal(err)
 	}
+	mul := r.(Extensions).ShardSizeMultiple()
+	perShard = ((perShard + mul - 1) / mul) * mul
+
 	shards := make([][]byte, 13)
 	for s := range shards {
 		shards[s] = make([]byte, perShard)
 	}
 
-	rand.Seed(0)
 	for s := 0; s < 13; s++ {
-		fillRandom(shards[s])
+		fillRandom(shards[s], int64(s))
 	}
 
 	err = r.Encode(shards)
@@ -512,12 +826,39 @@ func testReconstructData(t *testing.T, o ...Option) {
 		t.Fatal(err)
 	}
 
+	// Reconstruct 3 shards with 3 data and 5 parity shards
+	shardsCopy := make([][]byte, 13)
+	copy(shardsCopy, shards)
+	shardsCopy[2] = nil
+	shardsCopy[3] = nil
+	shardsCopy[4] = nil
+	shardsCopy[5] = nil
+	shardsCopy[6] = nil
+
+	shardsRequired := make([]bool, 8)
+	shardsRequired[3] = true
+	shardsRequired[4] = true
+	err = r.ReconstructSome(shardsCopy, shardsRequired)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if 0 != bytes.Compare(shardsCopy[3], shards[3]) ||
+		0 != bytes.Compare(shardsCopy[4], shards[4]) {
+		t.Fatal("ReconstructSome did not reconstruct required shards correctly")
+	}
+
+	if shardsCopy[2] != nil || shardsCopy[5] != nil || shardsCopy[6] != nil {
+		// This is expected in some cases.
+		t.Log("ReconstructSome reconstructed extra shards")
+	}
+
 	// Reconstruct with 10 shards present. Use pre-allocated memory for one of them.
 	shards[0] = nil
 	shards[2] = nil
 	shard4 := shards[4]
 	shards[4] = shard4[:0]
-	fillRandom(shard4)
+	fillRandom(shard4, 4)
 
 	err = r.ReconstructData(shards)
 	if err != nil {
@@ -594,6 +935,7 @@ func testReconstructData(t *testing.T, o ...Option) {
 }
 
 func TestReconstructPAR1Singular(t *testing.T) {
+	parallelIfNotShort(t)
 	perShard := 50
 	r, err := New(4, 4, testOptions(WithPAR1Matrix())...)
 	if err != nil {
@@ -604,7 +946,6 @@ func TestReconstructPAR1Singular(t *testing.T) {
 		shards[s] = make([]byte, perShard)
 	}
 
-	rand.Seed(0)
 	for s := 0; s < 8; s++ {
 		fillRandom(shards[s])
 	}
@@ -631,6 +972,7 @@ func TestReconstructPAR1Singular(t *testing.T) {
 }
 
 func TestVerify(t *testing.T) {
+	parallelIfNotShort(t)
 	testVerify(t)
 	for i, o := range testOpts() {
 		t.Run(fmt.Sprintf("options %d", i), func(t *testing.T) {
@@ -645,14 +987,16 @@ func testVerify(t *testing.T, o ...Option) {
 	if err != nil {
 		t.Fatal(err)
 	}
+	mul := r.(Extensions).ShardSizeMultiple()
+	perShard = ((perShard + mul - 1) / mul) * mul
+
 	shards := make([][]byte, 14)
 	for s := range shards {
 		shards[s] = make([]byte, perShard)
 	}
 
-	rand.Seed(0)
 	for s := 0; s < 10; s++ {
-		fillRandom(shards[s])
+		fillRandom(shards[s], 0)
 	}
 
 	err = r.Encode(shards)
@@ -669,7 +1013,7 @@ func testVerify(t *testing.T, o ...Option) {
 	}
 
 	// Put in random data. Verification should fail
-	fillRandom(shards[10])
+	fillRandom(shards[10], 1)
 	ok, err = r.Verify(shards)
 	if err != nil {
 		t.Fatal(err)
@@ -683,7 +1027,7 @@ func testVerify(t *testing.T, o ...Option) {
 		t.Fatal(err)
 	}
 	// Fill a data segment with random data
-	fillRandom(shards[0])
+	fillRandom(shards[0], 2)
 	ok, err = r.Verify(shards)
 	if err != nil {
 		t.Fatal(err)
@@ -755,9 +1099,14 @@ func TestOneEncode(t *testing.T) {
 
 }
 
-func fillRandom(p []byte) {
+func fillRandom(p []byte, seed ...int64) {
+	src := rand.NewSource(time.Now().UnixNano())
+	if len(seed) > 0 {
+		src = rand.NewSource(seed[0])
+	}
+	rng := rand.New(src)
 	for i := 0; i < len(p); i += 7 {
-		val := rand.Int63()
+		val := rng.Int63()
 		for j := 0; i+j < len(p) && j < 7; j++ {
 			p[i+j] = byte(val)
 			val >>= 8
@@ -765,23 +1114,26 @@ func fillRandom(p []byte) {
 	}
 }
 
-func benchmarkEncode(b *testing.B, dataShards, parityShards, shardSize int) {
-	r, err := New(dataShards, parityShards, testOptions(WithAutoGoroutines(shardSize))...)
+func benchmarkEncode(b *testing.B, dataShards, parityShards, shardSize int, opts ...Option) {
+	opts = append(testOptions(WithAutoGoroutines(shardSize)), opts...)
+	r, err := New(dataShards, parityShards, opts...)
 	if err != nil {
 		b.Fatal(err)
 	}
-	shards := make([][]byte, dataShards+parityShards)
-	for s := range shards {
-		shards[s] = make([]byte, shardSize)
-	}
 
-	rand.Seed(0)
+	shards := r.(Extensions).AllocAligned(shardSize)
 	for s := 0; s < dataShards; s++ {
 		fillRandom(shards[s])
 	}
+	// Warm up so initialization is eliminated.
+	err = r.Encode(shards)
+	if err != nil {
+		b.Fatal(err)
+	}
 
 	b.SetBytes(int64(shardSize * (dataShards + parityShards)))
 	b.ResetTimer()
+	b.ReportAllocs()
 	for i := 0; i < b.N; i++ {
 		err = r.Encode(shards)
 		if err != nil {
@@ -790,10 +1142,116 @@ func benchmarkEncode(b *testing.B, dataShards, parityShards, shardSize int) {
 	}
 }
 
+func benchmarkDecode(b *testing.B, dataShards, parityShards, shardSize, deleteShards int, opts ...Option) {
+	opts = append(testOptions(WithAutoGoroutines(shardSize)), opts...)
+	r, err := New(dataShards, parityShards, opts...)
+	if err != nil {
+		b.Fatal(err)
+	}
+
+	shards := r.(Extensions).AllocAligned(shardSize)
+	for s := 0; s < dataShards; s++ {
+		fillRandom(shards[s])
+	}
+	if err := r.Encode(shards); err != nil {
+		b.Fatal(err)
+	}
+
+	b.SetBytes(int64(shardSize * (dataShards + parityShards)))
+	b.ResetTimer()
+	b.ReportAllocs()
+	for i := 0; i < b.N; i++ {
+		// Clear maximum number of data shards.
+		for s := 0; s < deleteShards; s++ {
+			shards[s] = nil
+		}
+
+		err = r.Reconstruct(shards)
+		if err != nil {
+			b.Fatal(err)
+		}
+	}
+}
+
 func BenchmarkEncode2x1x1M(b *testing.B) {
 	benchmarkEncode(b, 2, 1, 1024*1024)
 }
 
+// Benchmark 800 data slices with 200 parity slices
+func BenchmarkEncode800x200(b *testing.B) {
+	for size := 64; size <= 1<<20; size *= 4 {
+		b.Run(fmt.Sprintf("%v", size), func(b *testing.B) {
+			benchmarkEncode(b, 800, 200, size)
+		})
+	}
+}
+
+// Benchmark 1K encode with symmetric shard sizes.
+func BenchmarkEncode1K(b *testing.B) {
+	for shards := 4; shards < 65536; shards *= 2 {
+		b.Run(fmt.Sprintf("%v+%v", shards, shards), func(b *testing.B) {
+			if shards*2 <= 256 {
+				b.Run(fmt.Sprint("cauchy"), func(b *testing.B) {
+					benchmarkEncode(b, shards, shards, 1024, WithCauchyMatrix())
+				})
+				b.Run(fmt.Sprint("leopard-gf8"), func(b *testing.B) {
+					benchmarkEncode(b, shards, shards, 1024, WithLeopardGF(true))
+				})
+			}
+			b.Run(fmt.Sprint("leopard-gf16"), func(b *testing.B) {
+				benchmarkEncode(b, shards, shards, 1024, WithLeopardGF16(true))
+			})
+		})
+	}
+}
+
+// Benchmark 1K decode with symmetric shard sizes.
+func BenchmarkDecode1K(b *testing.B) {
+	for shards := 4; shards < 65536; shards *= 2 {
+		b.Run(fmt.Sprintf("%v+%v", shards, shards), func(b *testing.B) {
+			if shards*2 <= 256 {
+				b.Run(fmt.Sprint("cauchy"), func(b *testing.B) {
+					benchmarkDecode(b, shards, shards, 1024, shards, WithCauchyMatrix(), WithInversionCache(false))
+				})
+				b.Run(fmt.Sprint("cauchy-inv"), func(b *testing.B) {
+					benchmarkDecode(b, shards, shards, 1024, shards, WithCauchyMatrix(), WithInversionCache(true))
+				})
+				b.Run(fmt.Sprint("cauchy-single"), func(b *testing.B) {
+					benchmarkDecode(b, shards, shards, 1024, 1, WithCauchyMatrix(), WithInversionCache(false))
+				})
+				b.Run(fmt.Sprint("cauchy-single-inv"), func(b *testing.B) {
+					benchmarkDecode(b, shards, shards, 1024, 1, WithCauchyMatrix(), WithInversionCache(true))
+				})
+				b.Run(fmt.Sprint("leopard-gf8"), func(b *testing.B) {
+					benchmarkDecode(b, shards, shards, 1024, shards, WithLeopardGF(true), WithInversionCache(false))
+				})
+				b.Run(fmt.Sprint("leopard-gf8-inv"), func(b *testing.B) {
+					benchmarkDecode(b, shards, shards, 1024, shards, WithLeopardGF(true), WithInversionCache(true))
+				})
+				b.Run(fmt.Sprint("leopard-gf8-single"), func(b *testing.B) {
+					benchmarkDecode(b, shards, shards, 1024, 1, WithLeopardGF(true), WithInversionCache(false))
+				})
+				b.Run(fmt.Sprint("leopard-gf8-single-inv"), func(b *testing.B) {
+					benchmarkDecode(b, shards, shards, 1024, 1, WithLeopardGF(true), WithInversionCache(true))
+				})
+			}
+			b.Run(fmt.Sprint("leopard-gf16"), func(b *testing.B) {
+				benchmarkDecode(b, shards, shards, 1024, shards, WithLeopardGF16(true))
+			})
+			b.Run(fmt.Sprint("leopard-gf16-single"), func(b *testing.B) {
+				benchmarkDecode(b, shards, shards, 1024, 1, WithLeopardGF16(true))
+			})
+		})
+	}
+}
+
+func BenchmarkEncodeLeopard(b *testing.B) {
+	size := (64 << 20) / 800 / 64 * 64
+	b.Run(strconv.Itoa(size), func(b *testing.B) {
+		benchmarkEncode(b, 800, 200, size)
+	})
+}
+
 func BenchmarkEncode10x2x10000(b *testing.B) {
 	benchmarkEncode(b, 10, 2, 10000)
 }
@@ -826,11 +1284,16 @@ func BenchmarkEncode10x4x1M(b *testing.B) {
 	benchmarkEncode(b, 10, 4, 1024*1024)
 }
 
-// Benchmark 50 data shards and 20 parity shards with 1MB each.
+// Benchmark 50 data shards and 20 parity shards with 1M each.
 func BenchmarkEncode50x20x1M(b *testing.B) {
 	benchmarkEncode(b, 50, 20, 1024*1024)
 }
 
+// Benchmark 50 data shards and 20 parity shards with 1M each.
+func BenchmarkEncodeLeopard50x20x1M(b *testing.B) {
+	benchmarkEncode(b, 50, 20, 1024*1024, WithLeopardGF(true))
+}
+
 // Benchmark 17 data shards and 3 parity shards with 16MB each.
 func BenchmarkEncode17x3x16M(b *testing.B) {
 	benchmarkEncode(b, 17, 3, 16*1024*1024)
@@ -862,12 +1325,8 @@ func benchmarkVerify(b *testing.B, dataShards, parityShards, shardSize int) {
 	if err != nil {
 		b.Fatal(err)
 	}
-	shards := make([][]byte, parityShards+dataShards)
-	for s := range shards {
-		shards[s] = make([]byte, shardSize)
-	}
+	shards := r.(Extensions).AllocAligned(shardSize)
 
-	rand.Seed(0)
 	for s := 0; s < dataShards; s++ {
 		fillRandom(shards[s])
 	}
@@ -878,6 +1337,7 @@ func benchmarkVerify(b *testing.B, dataShards, parityShards, shardSize int) {
 
 	b.SetBytes(int64(shardSize * (dataShards + parityShards)))
 	b.ResetTimer()
+	b.ReportAllocs()
 	for i := 0; i < b.N; i++ {
 		_, err = r.Verify(shards)
 		if err != nil {
@@ -886,13 +1346,22 @@ func benchmarkVerify(b *testing.B, dataShards, parityShards, shardSize int) {
 	}
 }
 
+// Benchmark 800 data slices with 200 parity slices
+func BenchmarkVerify800x200(b *testing.B) {
+	for size := 64; size <= 1<<20; size *= 4 {
+		b.Run(fmt.Sprintf("%v", size), func(b *testing.B) {
+			benchmarkVerify(b, 800, 200, size)
+		})
+	}
+}
+
 // Benchmark 10 data slices with 2 parity slices holding 10000 bytes each
 func BenchmarkVerify10x2x10000(b *testing.B) {
 	benchmarkVerify(b, 10, 2, 10000)
 }
 
 // Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
-func BenchmarkVerify50x5x50000(b *testing.B) {
+func BenchmarkVerify50x5x100000(b *testing.B) {
 	benchmarkVerify(b, 50, 5, 100000)
 }
 
@@ -929,17 +1398,15 @@ func corruptRandom(shards [][]byte, dataShards, parityShards int) {
 	}
 }
 
-func benchmarkReconstruct(b *testing.B, dataShards, parityShards, shardSize int) {
-	r, err := New(dataShards, parityShards, testOptions(WithAutoGoroutines(shardSize))...)
+func benchmarkReconstruct(b *testing.B, dataShards, parityShards, shardSize int, opts ...Option) {
+	o := []Option{WithAutoGoroutines(shardSize)}
+	o = append(o, opts...)
+	r, err := New(dataShards, parityShards, testOptions(o...)...)
 	if err != nil {
 		b.Fatal(err)
 	}
-	shards := make([][]byte, parityShards+dataShards)
-	for s := range shards {
-		shards[s] = make([]byte, shardSize)
-	}
+	shards := r.(Extensions).AllocAligned(shardSize)
 
-	rand.Seed(0)
 	for s := 0; s < dataShards; s++ {
 		fillRandom(shards[s])
 	}
@@ -950,6 +1417,7 @@ func benchmarkReconstruct(b *testing.B, dataShards, parityShards, shardSize int)
 
 	b.SetBytes(int64(shardSize * (dataShards + parityShards)))
 	b.ResetTimer()
+	b.ReportAllocs()
 	for i := 0; i < b.N; i++ {
 		corruptRandom(shards, dataShards, parityShards)
 
@@ -965,6 +1433,15 @@ func BenchmarkReconstruct10x2x10000(b *testing.B) {
 	benchmarkReconstruct(b, 10, 2, 10000)
 }
 
+// Benchmark 800 data slices with 200 parity slices
+func BenchmarkReconstruct800x200(b *testing.B) {
+	for size := 64; size <= 1<<20; size *= 4 {
+		b.Run(fmt.Sprintf("%v", size), func(b *testing.B) {
+			benchmarkReconstruct(b, 800, 200, size)
+		})
+	}
+}
+
 // Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
 func BenchmarkReconstruct50x5x50000(b *testing.B) {
 	benchmarkReconstruct(b, 50, 5, 100000)
@@ -990,6 +1467,11 @@ func BenchmarkReconstruct50x20x1M(b *testing.B) {
 	benchmarkReconstruct(b, 50, 20, 1024*1024)
 }
 
+// Benchmark 5 data slices with 2 parity slices holding 1MB bytes each
+func BenchmarkReconstructLeopard50x20x1M(b *testing.B) {
+	benchmarkReconstruct(b, 50, 20, 1024*1024, WithLeopardGF(true), WithInversionCache(true))
+}
+
 // Benchmark 10 data slices with 4 parity slices holding 16MB bytes each
 func BenchmarkReconstruct10x4x16M(b *testing.B) {
 	benchmarkReconstruct(b, 10, 4, 16*1024*1024)
@@ -1008,12 +1490,8 @@ func benchmarkReconstructData(b *testing.B, dataShards, parityShards, shardSize
 	if err != nil {
 		b.Fatal(err)
 	}
-	shards := make([][]byte, parityShards+dataShards)
-	for s := range shards {
-		shards[s] = make([]byte, shardSize)
-	}
+	shards := r.(Extensions).AllocAligned(shardSize)
 
-	rand.Seed(0)
 	for s := 0; s < dataShards; s++ {
 		fillRandom(shards[s])
 	}
@@ -1024,6 +1502,7 @@ func benchmarkReconstructData(b *testing.B, dataShards, parityShards, shardSize
 
 	b.SetBytes(int64(shardSize * (dataShards + parityShards)))
 	b.ResetTimer()
+	b.ReportAllocs()
 	for i := 0; i < b.N; i++ {
 		corruptRandomData(shards, dataShards, parityShards)
 
@@ -1039,6 +1518,15 @@ func BenchmarkReconstructData10x2x10000(b *testing.B) {
 	benchmarkReconstructData(b, 10, 2, 10000)
 }
 
+// Benchmark 800 data slices with 200 parity slices
+func BenchmarkReconstructData800x200(b *testing.B) {
+	for size := 64; size <= 1<<20; size *= 4 {
+		b.Run(fmt.Sprintf("%v", size), func(b *testing.B) {
+			benchmarkReconstructData(b, 800, 200, size)
+		})
+	}
+}
+
 // Benchmark 50 data slices with 5 parity slices holding 100000 bytes each
 func BenchmarkReconstructData50x5x50000(b *testing.B) {
 	benchmarkReconstructData(b, 50, 5, 100000)
@@ -1077,14 +1565,11 @@ func benchmarkReconstructP(b *testing.B, dataShards, parityShards, shardSize int
 
 	b.SetBytes(int64(shardSize * (dataShards + parityShards)))
 	b.ResetTimer()
+	b.ReportAllocs()
 
 	b.RunParallel(func(pb *testing.PB) {
-		shards := make([][]byte, parityShards+dataShards)
-		for s := range shards {
-			shards[s] = make([]byte, shardSize)
-		}
+		shards := r.(Extensions).AllocAligned(shardSize)
 
-		rand.Seed(0)
 		for s := 0; s < dataShards; s++ {
 			fillRandom(shards[s])
 		}
@@ -1115,6 +1600,7 @@ func BenchmarkReconstructP10x5x20000(b *testing.B) {
 }
 
 func TestEncoderReconstruct(t *testing.T) {
+	parallelIfNotShort(t)
 	testEncoderReconstruct(t)
 	for _, o := range testOpts() {
 		testEncoderReconstruct(t, o...)
@@ -1123,11 +1609,11 @@ func TestEncoderReconstruct(t *testing.T) {
 
 func testEncoderReconstruct(t *testing.T, o ...Option) {
 	// Create some sample data
-	var data = make([]byte, 250000)
+	var data = make([]byte, 250<<10)
 	fillRandom(data)
 
 	// Create 5 data slices of 50000 elements each
-	enc, err := New(5, 3, testOptions(o...)...)
+	enc, err := New(7, 6, testOptions(o...)...)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -1199,44 +1685,82 @@ func testEncoderReconstruct(t *testing.T, o ...Option) {
 }
 
 func TestSplitJoin(t *testing.T) {
-	var data = make([]byte, 250000)
-	rand.Seed(0)
-	fillRandom(data)
-
-	enc, _ := New(5, 3, testOptions()...)
-	shards, err := enc.Split(data)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	_, err = enc.Split([]byte{})
-	if err != ErrShortData {
-		t.Errorf("expected %v, got %v", ErrShortData, err)
-	}
+	opts := [][]Option{
+		testOptions(),
+		append(testOptions(), WithLeopardGF(true)),
+		append(testOptions(), WithLeopardGF16(true)),
+	}
+	for i, opts := range opts {
+		t.Run("opt-"+strconv.Itoa(i), func(t *testing.T) {
+			for _, dp := range [][2]int{{1, 0}, {5, 0}, {5, 1}, {12, 4}, {2, 15}, {17, 1}} {
+				enc, _ := New(dp[0], dp[1], opts...)
+				ext := enc.(Extensions)
+
+				_, err := enc.Split([]byte{})
+				if err != ErrShortData {
+					t.Errorf("expected %v, got %v", ErrShortData, err)
+				}
 
-	buf := new(bytes.Buffer)
-	err = enc.Join(buf, shards, 50)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if !bytes.Equal(buf.Bytes(), data[:50]) {
-		t.Fatal("recovered data does match original")
-	}
+				buf := new(bytes.Buffer)
+				err = enc.Join(buf, [][]byte{}, 0)
+				if err != ErrTooFewShards {
+					t.Errorf("expected %v, got %v", ErrTooFewShards, err)
+				}
+				for _, size := range []int{ext.DataShards(), 1337, 2699} {
+					for _, extra := range []int{0, 1, ext.ShardSizeMultiple(), ext.ShardSizeMultiple() * ext.DataShards(), ext.ShardSizeMultiple()*ext.ParityShards() + 1, 255} {
+						buf.Reset()
+						t.Run(fmt.Sprintf("d-%d-p-%d-sz-%d-cap%d", ext.DataShards(), ext.ParityShards(), size, extra), func(t *testing.T) {
+							var data = make([]byte, size, size+extra)
+							var ref = make([]byte, size, size)
+							fillRandom(data)
+							copy(ref, data)
+
+							shards, err := enc.Split(data)
+							if err != nil {
+								t.Fatal(err)
+							}
+							err = enc.Encode(shards)
+							if err != nil {
+								t.Fatal(err)
+							}
+							_, err = enc.Verify(shards)
+							if err != nil {
+								t.Fatal(err)
+							}
+							for i := range shards[:ext.ParityShards()] {
+								// delete data shards up to parity
+								shards[i] = nil
+							}
+							err = enc.Reconstruct(shards)
+							if err != nil {
+								t.Fatal(err)
+							}
 
-	err = enc.Join(buf, [][]byte{}, 0)
-	if err != ErrTooFewShards {
-		t.Errorf("expected %v, got %v", ErrTooFewShards, err)
-	}
+							// Rejoin....
+							err = enc.Join(buf, shards, size)
+							if err != nil {
+								t.Fatal(err)
+							}
+							if !bytes.Equal(buf.Bytes(), ref) {
+								t.Log("")
+								t.Fatal("recovered data does match original")
+							}
 
-	err = enc.Join(buf, shards, len(data)+1)
-	if err != ErrShortData {
-		t.Errorf("expected %v, got %v", ErrShortData, err)
-	}
+							err = enc.Join(buf, shards, len(data)+ext.DataShards()*ext.ShardSizeMultiple())
+							if err != ErrShortData {
+								t.Errorf("expected %v, got %v", ErrShortData, err)
+							}
 
-	shards[0] = nil
-	err = enc.Join(buf, shards, len(data))
-	if err != ErrReconstructRequired {
-		t.Errorf("expected %v, got %v", ErrReconstructRequired, err)
+							shards[0] = nil
+							err = enc.Join(buf, shards, len(data))
+							if err != ErrReconstructRequired {
+								t.Errorf("expected %v, got %v", ErrReconstructRequired, err)
+							}
+						})
+					}
+				}
+			}
+		})
 	}
 }
 
@@ -1248,11 +1772,11 @@ func TestCodeSomeShards(t *testing.T) {
 	shards, _ := enc.Split(data)
 
 	old := runtime.GOMAXPROCS(1)
-	r.codeSomeShards(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
+	r.codeSomeShards(r.parity, shards[:r.dataShards], shards[r.dataShards:r.dataShards+r.parityShards], len(shards[0]))
 
 	// hopefully more than 1 CPU
 	runtime.GOMAXPROCS(runtime.NumCPU())
-	r.codeSomeShards(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
+	r.codeSomeShards(r.parity, shards[:r.dataShards], shards[r.dataShards:r.dataShards+r.parityShards], len(shards[0]))
 
 	// reset MAXPROCS, otherwise testing complains
 	runtime.GOMAXPROCS(old)
@@ -1266,7 +1790,7 @@ func TestStandardMatrices(t *testing.T) {
 	for i := 1; i < 256; i++ {
 		i := i
 		t.Run(fmt.Sprintf("x%d", i), func(t *testing.T) {
-			t.Parallel()
+			parallelIfNotShort(t)
 			// i == n.o. datashards
 			var shards = make([][]byte, 255)
 			for p := range shards {
@@ -1327,7 +1851,7 @@ func TestCauchyMatrices(t *testing.T) {
 	for i := 1; i < 256; i++ {
 		i := i
 		t.Run(fmt.Sprintf("x%d", i), func(t *testing.T) {
-			t.Parallel()
+			parallelIfNotShort(t)
 			var shards = make([][]byte, 255)
 			for p := range shards {
 				v := byte(i)
@@ -1387,7 +1911,7 @@ func TestPar1Matrices(t *testing.T) {
 	for i := 1; i < 256; i++ {
 		i := i
 		t.Run(fmt.Sprintf("x%d", i), func(t *testing.T) {
-			t.Parallel()
+			parallelIfNotShort(t)
 			var shards = make([][]byte, 255)
 			for p := range shards {
 				v := byte(i)
@@ -1458,11 +1982,11 @@ func TestNew(t *testing.T) {
 		{255, 1, nil},
 		{255, 0, nil},
 		{1, 0, nil},
-		{256, 256, ErrMaxShardNum},
+		{65536, 65536, ErrMaxShardNum},
 
 		{0, 1, ErrInvShardNum},
 		{1, -1, ErrInvShardNum},
-		{256, 1, ErrMaxShardNum},
+		{65636, 1, ErrMaxShardNum},
 
 		// overflow causes r.Shards to be negative
 		{256, int(^uint(0) >> 1), errInvalidRowSize},
@@ -1475,6 +1999,20 @@ func TestNew(t *testing.T) {
 	}
 }
 
+func TestSplitZero(t *testing.T) {
+	data := make([]byte, 512)
+	for _, opts := range testOpts() {
+		ecctest, err := New(1, 0, opts...)
+		if err != nil {
+			t.Fatal(err)
+		}
+		_, err = ecctest.Split(data)
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+}
+
 // Benchmark 10 data shards and 4 parity shards and 160MB data.
 func BenchmarkSplit10x4x160M(b *testing.B) {
 	benchmarkSplit(b, 10, 4, 160*1024*1024)
@@ -1531,15 +2069,14 @@ func benchmarkParallel(b *testing.B, dataShards, parityShards, shardSize int) {
 	c := runtime.GOMAXPROCS(0)
 
 	// Note that concurrency also affects total data size and will make caches less effective.
-	b.Log("Total data:", (c*dataShards*shardSize)>>20, "MiB", "parity:", (c*parityShards*shardSize)>>20, "MiB")
+	if testing.Verbose() {
+		b.Log("Total data:", (c*dataShards*shardSize)>>20, "MiB", "parity:", (c*parityShards*shardSize)>>20, "MiB")
+	}
 	// Create independent shards
 	shardsCh := make(chan [][]byte, c)
 	for i := 0; i < c; i++ {
-		rand.Seed(int64(i))
-		shards := make([][]byte, dataShards+parityShards)
-		for s := range shards {
-			shards[s] = make([]byte, shardSize)
-		}
+		shards := r.(Extensions).AllocAligned(shardSize)
+
 		for s := 0; s < dataShards; s++ {
 			fillRandom(shards[s])
 		}
@@ -1573,3 +2110,108 @@ func BenchmarkParallel_8x8x32M(b *testing.B)   { benchmarkParallel(b, 8, 8, 32<<
 func BenchmarkParallel_8x3x1M(b *testing.B) { benchmarkParallel(b, 8, 3, 1<<20) }
 func BenchmarkParallel_8x4x1M(b *testing.B) { benchmarkParallel(b, 8, 4, 1<<20) }
 func BenchmarkParallel_8x5x1M(b *testing.B) { benchmarkParallel(b, 8, 5, 1<<20) }
+
+func TestReentrant(t *testing.T) {
+	for optN, o := range testOpts() {
+		for _, size := range testSizes() {
+			data, parity := size[0], size[1]
+			rng := rand.New(rand.NewSource(0xabadc0cac01a))
+			t.Run(fmt.Sprintf("opt-%d-%dx%d", optN, data, parity), func(t *testing.T) {
+				perShard := 16384 + 1
+				if testing.Short() {
+					perShard = 1024 + 1
+				}
+				r, err := New(data, parity, testOptions(o...)...)
+				if err != nil {
+					t.Fatal(err)
+				}
+				x := r.(Extensions)
+				if want, got := data, x.DataShards(); want != got {
+					t.Errorf("DataShards returned %d, want %d", got, want)
+				}
+				if want, got := parity, x.ParityShards(); want != got {
+					t.Errorf("ParityShards returned %d, want %d", got, want)
+				}
+				if want, got := parity+data, x.TotalShards(); want != got {
+					t.Errorf("TotalShards returned %d, want %d", got, want)
+				}
+				mul := x.ShardSizeMultiple()
+				if mul <= 0 {
+					t.Fatalf("Got unexpected ShardSizeMultiple: %d", mul)
+				}
+				perShard = ((perShard + mul - 1) / mul) * mul
+				runs := 10
+				if testing.Short() {
+					runs = 2
+				}
+				for i := 0; i < runs; i++ {
+					shards := AllocAligned(data+parity, perShard)
+
+					err = r.Encode(shards)
+					if err != nil {
+						t.Fatal(err)
+					}
+					ok, err := r.Verify(shards)
+					if err != nil {
+						t.Fatal(err)
+					}
+					if !ok {
+						t.Fatal("Verification failed")
+					}
+
+					if parity == 0 {
+						// Check that Reconstruct and ReconstructData do nothing
+						err = r.ReconstructData(shards)
+						if err != nil {
+							t.Fatal(err)
+						}
+						err = r.Reconstruct(shards)
+						if err != nil {
+							t.Fatal(err)
+						}
+
+						// Skip integrity checks
+						continue
+					}
+
+					// Delete one in data
+					idx := rng.Intn(data)
+					want := shards[idx]
+					shards[idx] = nil
+
+					err = r.ReconstructData(shards)
+					if err != nil {
+						t.Fatal(err)
+					}
+					if !bytes.Equal(shards[idx], want) {
+						t.Fatal("did not ReconstructData correctly")
+					}
+
+					// Delete one randomly
+					idx = rng.Intn(data + parity)
+					want = shards[idx]
+					shards[idx] = nil
+					err = r.Reconstruct(shards)
+					if err != nil {
+						t.Fatal(err)
+					}
+					if !bytes.Equal(shards[idx], want) {
+						t.Fatal("did not Reconstruct correctly")
+					}
+
+					err = r.Encode(make([][]byte, 1))
+					if err != ErrTooFewShards {
+						t.Errorf("expected %v, got %v", ErrTooFewShards, err)
+					}
+
+					// Make one too short.
+					shards[idx] = shards[idx][:perShard-1]
+					err = r.Encode(shards)
+					if err != ErrShardSize {
+						t.Errorf("expected %v, got %v", ErrShardSize, err)
+					}
+				}
+			})
+		}
+	}
+}
diff --git a/streaming.go b/streaming.go
index d048ba0..f7aba3b 100644
--- a/streaming.go
+++ b/streaming.go
@@ -8,7 +8,6 @@
 package reedsolomon
 
 import (
-	"bytes"
 	"errors"
 	"fmt"
 	"io"
@@ -147,6 +146,10 @@ type rsStream struct {
 // you want to use. You can reuse this encoder.
 // Note that the maximum number of data shards is 256.
 func NewStream(dataShards, parityShards int, o ...Option) (StreamEncoder, error) {
+	if dataShards+parityShards > 256 {
+		return nil, ErrMaxShardNum
+	}
+
 	r := rsStream{o: defaultOptions}
 	for _, opt := range o {
 		opt(&r.o)
@@ -169,11 +172,7 @@ func NewStream(dataShards, parityShards int, o ...Option) (StreamEncoder, error)
 	r.r = enc.(*reedSolomon)
 
 	r.blockPool.New = func() interface{} {
-		out := make([][]byte, dataShards+parityShards)
-		for i := range out {
-			out[i] = make([]byte, r.o.streamBS)
-		}
-		return out
+		return AllocAligned(dataShards+parityShards, r.o.streamBS)
 	}
 	r.readShards = readShards
 	r.writeShards = writeShards
@@ -219,18 +218,18 @@ func (r *rsStream) createSlice() [][]byte {
 // will be returned. If a parity writer returns an error, a
 // StreamWriteError will be returned.
 func (r *rsStream) Encode(data []io.Reader, parity []io.Writer) error {
-	if len(data) != r.r.DataShards {
+	if len(data) != r.r.dataShards {
 		return ErrTooFewShards
 	}
 
-	if len(parity) != r.r.ParityShards {
+	if len(parity) != r.r.parityShards {
 		return ErrTooFewShards
 	}
 
 	all := r.createSlice()
 	defer r.blockPool.Put(all)
-	in := all[:r.r.DataShards]
-	out := all[r.r.DataShards:]
+	in := all[:r.r.dataShards]
+	out := all[r.r.dataShards:]
 	read := 0
 
 	for {
@@ -425,7 +424,7 @@ func cWriteShards(out []io.Writer, in [][]byte) error {
 // If a shard stream returns an error, a StreamReadError type error
 // will be returned.
 func (r *rsStream) Verify(shards []io.Reader) (bool, error) {
-	if len(shards) != r.r.Shards {
+	if len(shards) != r.r.totalShards {
 		return false, ErrTooFewShards
 	}
 
@@ -472,10 +471,10 @@ var ErrReconstructMismatch = errors.New("valid shards and fill shards are mutual
 // However its integrity is not automatically verified.
 // Use the Verify function to check in case the data set is complete.
 func (r *rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error {
-	if len(valid) != r.r.Shards {
+	if len(valid) != r.r.totalShards {
 		return ErrTooFewShards
 	}
-	if len(fill) != r.r.Shards {
+	if len(fill) != r.r.totalShards {
 		return ErrTooFewShards
 	}
 
@@ -486,7 +485,7 @@ func (r *rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error {
 		if valid[i] != nil && fill[i] != nil {
 			return ErrReconstructMismatch
 		}
-		if i >= r.r.DataShards && fill[i] != nil {
+		if i >= r.r.dataShards && fill[i] != nil {
 			reconDataOnly = false
 		}
 	}
@@ -530,12 +529,12 @@ func (r *rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error {
 // If the total data size is less than outSize, ErrShortData will be returned.
 func (r *rsStream) Join(dst io.Writer, shards []io.Reader, outSize int64) error {
 	// Do we have enough shards?
-	if len(shards) < r.r.DataShards {
+	if len(shards) < r.r.dataShards {
 		return ErrTooFewShards
 	}
 
 	// Trim off parity shards if any
-	shards = shards[:r.r.DataShards]
+	shards = shards[:r.r.dataShards]
 	for i := range shards {
 		if shards[i] == nil {
 			return StreamReadError{Err: ErrShardNoData, Stream: i}
@@ -571,7 +570,7 @@ func (r *rsStream) Split(data io.Reader, dst []io.Writer, size int64) error {
 	if size == 0 {
 		return ErrShortData
 	}
-	if len(dst) != r.r.DataShards {
+	if len(dst) != r.r.dataShards {
 		return ErrInvShardNum
 	}
 
@@ -582,11 +581,11 @@ func (r *rsStream) Split(data io.Reader, dst []io.Writer, size int64) error {
 	}
 
 	// Calculate number of bytes per shard.
-	perShard := (size + int64(r.r.DataShards) - 1) / int64(r.r.DataShards)
+	perShard := (size + int64(r.r.dataShards) - 1) / int64(r.r.dataShards)
 
 	// Pad data to r.Shards*perShard.
-	padding := make([]byte, (int64(r.r.Shards)*perShard)-size)
-	data = io.MultiReader(data, bytes.NewBuffer(padding))
+	paddingSize := (int64(r.r.totalShards) * perShard) - size
+	data = io.MultiReader(data, io.LimitReader(zeroPaddingReader{}, paddingSize))
 
 	// Split into equal-length shards and copy.
 	for i := range dst {
@@ -601,3 +600,15 @@ func (r *rsStream) Split(data io.Reader, dst []io.Writer, size int64) error {
 
 	return nil
 }
+
+type zeroPaddingReader struct{}
+
+var _ io.Reader = &zeroPaddingReader{}
+
+func (t zeroPaddingReader) Read(p []byte) (n int, err error) {
+	n = len(p)
+	for i := 0; i < n; i++ {
+		p[i] = 0
+	}
+	return n, nil
+}
diff --git a/testlevel.go b/testlevel.go
new file mode 100644
index 0000000..a905748
--- /dev/null
+++ b/testlevel.go
@@ -0,0 +1,31 @@
+//go:build ignore
+// +build ignore
+
+package main
+
+import (
+	"flag"
+	"log"
+	"strconv"
+
+	"github.com/klauspost/cpuid/v2"
+)
+
+func main() {
+	flag.Parse()
+	args := flag.Args()
+	if len(args) != 1 {
+		log.Fatalln("Supply CPU level 1-4 to test as argument")
+	}
+	l, err := strconv.Atoi(args[0])
+	if err != nil {
+		log.Fatalln("Unable to parse level:", err)
+	}
+	if l < 1 || l > 4 {
+		log.Fatalln("Supply CPU level 1-4 to test as argument")
+	}
+	if cpuid.CPU.X64Level() < l {
+		// Does os.Exit(1)
+		log.Fatalln("CPU level not supported")
+	}
+}
diff --git a/unsafe.go b/unsafe.go
new file mode 100644
index 0000000..d85892f
--- /dev/null
+++ b/unsafe.go
@@ -0,0 +1,41 @@
+//go:build !noasm && !nounsafe && !gccgo && !appengine
+
+/**
+ * Reed-Solomon Coding over 8-bit values.
+ *
+ * Copyright 2023, Klaus Post
+ */
+
+package reedsolomon
+
+import (
+	"unsafe"
+)
+
+// AllocAligned allocates 'shards' slices, with 'each' bytes.
+// Each slice will start on a 64 byte aligned boundary.
+func AllocAligned(shards, each int) [][]byte {
+	if false {
+		res := make([][]byte, shards)
+		for i := range res {
+			res[i] = make([]byte, each)
+		}
+		return res
+	}
+	const (
+		alignEach  = 64
+		alignStart = 64
+	)
+	eachAligned := ((each + alignEach - 1) / alignEach) * alignEach
+	total := make([]byte, eachAligned*shards+63)
+	align := uint(uintptr(unsafe.Pointer(&total[0]))) & (alignStart - 1)
+	if align > 0 {
+		total = total[alignStart-align:]
+	}
+	res := make([][]byte, shards)
+	for i := range res {
+		res[i] = total[:each:eachAligned]
+		total = total[eachAligned:]
+	}
+	return res
+}
diff --git a/unsafe_disabled.go b/unsafe_disabled.go
new file mode 100644
index 0000000..95cb8e6
--- /dev/null
+++ b/unsafe_disabled.go
@@ -0,0 +1,23 @@
+//go:build noasm || nounsafe || gccgo || appengine
+
+/**
+ * Reed-Solomon Coding over 8-bit values.
+ *
+ * Copyright 2023, Klaus Post
+ */
+
+package reedsolomon
+
+// AllocAligned allocates 'shards' slices, with 'each' bytes.
+// Each slice will start on a 64 byte aligned boundary.
+func AllocAligned(shards, each int) [][]byte {
+	eachAligned := ((each + 63) / 64) * 64
+	total := make([]byte, eachAligned*shards+63)
+	// We cannot do initial align without "unsafe", just use native alignment.
+	res := make([][]byte, shards)
+	for i := range res {
+		res[i] = total[:each:eachAligned]
+		total = total[eachAligned:]
+	}
+	return res
+}

More details

Full run details

Historical runs