diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
new file mode 100644
index 0000000..4a03b68
--- /dev/null
+++ b/.github/workflows/go.yml
@@ -0,0 +1,88 @@
+name: Go
+
+on:
+  push:
+    branches: [ master ]
+  pull_request:
+    branches: [ master ]
+
+
+jobs:
+  build:
+    strategy:
+      matrix:
+        go-version: [1.15.x, 1.16.x, 1.17.x]
+        os: [ubuntu-latest, macos-latest, windows-latest]
+    env:
+      CGO_ENABLED: 0
+    runs-on: ${{ matrix.os }}
+    steps:
+    - name: Set up Go
+      uses: actions/setup-go@v2
+      with:
+        go-version: ${{ matrix.go-version }}
+
+    - name: Checkout code
+      uses: actions/checkout@v2
+
+    - name: Vet
+      run: go vet ./...
+
+    - name: Test
+      run: go test ./...
+
+    - name: Test Noasm
+      run: go test -tags=noasm&&go test -no-avx512&&go test -no-avx512 -no-avx2&&go test -no-avx512 -no-avx2 -no-ssse3
+
+    - name: Test Race
+      env:
+        CGO_ENABLED: 1
+      run: go test -cpu="1,4" -short -race -v .
+
+  build-special:
+    env:
+      CGO_ENABLED: 0
+    runs-on: ubuntu-latest
+    steps:
+    - name: Set up Go
+      uses: actions/setup-go@v2
+      with:
+        go-version: 1.17.x
+
+    - name: Checkout code
+      uses: actions/checkout@v2
+
+    - name: fmt
+      run: diff <(gofmt -d .) <(printf "")
+
+    - name: Test 386
+      run: GOOS=linux GOARCH=386 go test -short ./...
+
+    - name: Build examples
+      run: go build examples/simple-decoder.go&&go build examples/simple-encoder.go&&go build examples/stream-decoder.go&&go build examples/stream-encoder.go
+
+    - name: Test Races, noasm, 1 cpu
+      env:
+        CGO_ENABLED: 1
+      run: go test -tags=noasm -cpu=1 -short -race .
+
+    - name: Test Races, noasm, 4 cpu
+      env:
+        CGO_ENABLED: 1
+      run: go test -tags=noasm -cpu=4 -short -race .
+
+    - name: Test Races, no avx512
+      env:
+        CGO_ENABLED: 1
+      run: go test -no-avx512 -short -race .
+
+    - name: Test Races, no avx2
+      env:
+        CGO_ENABLED: 1
+      run: go test -no-avx512 -no-avx2 -short -race .
+
+    - name: Test Races, no ssse3
+      env:
+        CGO_ENABLED: 1
+      run: go test -no-avx512 -no-avx2 -no-ssse3 -short -race .
+
diff --git a/README.md b/README.md
index ff50f43..ee8f2ae 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,6 @@
 # Reed-Solomon
-[![GoDoc][1]][2] [![Build Status][3]][4]
+[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/reedsolomon.svg)](https://pkg.go.dev/github.com/klauspost/reedsolomon) [![Build Status][3]][4]
 
-[1]: https://godoc.org/github.com/klauspost/reedsolomon?status.svg
-[2]: https://pkg.go.dev/github.com/klauspost/reedsolomon?tab=doc
 [3]: https://travis-ci.org/klauspost/reedsolomon.svg?branch=master
 [4]: https://travis-ci.org/klauspost/reedsolomon
 
@@ -26,6 +24,15 @@ go get -u github.com/klauspost/reedsolomon
 Using Go modules recommended.
 
 # Changes
+## 2021
+
+* Add progressive shard encoding.
+* Wider AVX2 loops
+* Limit concurrency on AVX2, since we are likely memory bound.
+* Allow 0 parity shards.
+* Allow disabling inversion cache.
+* Faster AVX2 encoding.
+
 
 ## May 2020
 
@@ -211,6 +218,49 @@ To join a data set, use the `Join()` function, which will join the shards and wr
    err = enc.Join(io.Discard, data, len(bigfile))
 ```
 
+# Progressive encoding
+
+It is possible to encode individual shards using EncodeIdx:
+
+```Go
+	// EncodeIdx will add parity for a single data shard.
+	// Parity shards should start out as 0. The caller must zero them.
+	// Data shards must be delivered exactly once. There is no check for this.
+	// The parity shards will always be updated and the data shards will remain the same.
+	EncodeIdx(dataShard []byte, idx int, parity [][]byte) error
+```
+
+This allows progressively encoding the parity by sending individual data shards.
+There is no requirement on shards being delivered in order, 
+but when sent in order it allows encoding shards one at the time,
+effectively allowing the operation to be streaming. 
+
+The result will be the same as encoding all shards at once.
+There is a minor speed penalty using this method, so send 
+shards at once if they are available.
+
+## Example
+
+```Go
+func test() {
+    // Create an encoder with 7 data and 3 parity slices.
+    enc, _ := reedsolomon.New(7, 3)
+
+    // This will be our output parity.
+    parity := make([][]byte, 3)
+    for i := range parity {
+        parity[i] = make([]byte, 10000)
+    }
+
+    for i := 0; i < 7; i++ {
+        // Send data shards one at the time.
+        _ = enc.EncodeIdx(make([]byte, 10000), i, parity)
+    }
+
+    // parity now contains parity, as if all data was sent in one call.
+}
+```
+
 # Streaming/Merging
 
 It might seem like a limitation that all data should be in memory, 
diff --git a/_gen/gen.go b/_gen/gen.go
index c544390..36709e8 100644
--- a/_gen/gen.go
+++ b/_gen/gen.go
@@ -1,7 +1,9 @@
-//+build generate
+//go:build generate
+// +build generate
 
 //go:generate go run gen.go -out ../galois_gen_amd64.s -stubs ../galois_gen_amd64.go -pkg=reedsolomon
-//go:generate gofmt -w ../galois_gen_switch_amd64.go
+//go:generate go fmt ../galois_gen_switch_amd64.go
+//go:generate go fmt ../galois_gen_amd64.go
 
 package main
 
@@ -35,14 +37,15 @@ func main() {
 	Constraint(buildtags.Not("nogen").ToConstraint())
 	Constraint(buildtags.Term("gc").ToConstraint())
 
-	const perLoopBits = 5
+	const perLoopBits = 6
 	const perLoop = 1 << perLoopBits
 
 	for i := 1; i <= inputMax; i++ {
 		for j := 1; j <= outputMax; j++ {
-			//genMulAvx2(fmt.Sprintf("mulAvxTwoXor_%dx%d", i, j), i, j, true)
 			genMulAvx2(fmt.Sprintf("mulAvxTwo_%dx%d", i, j), i, j, false)
 			genMulAvx2Sixty64(fmt.Sprintf("mulAvxTwo_%dx%d_64", i, j), i, j, false)
+			genMulAvx2(fmt.Sprintf("mulAvxTwo_%dx%dXor", i, j), i, j, true)
+			genMulAvx2Sixty64(fmt.Sprintf("mulAvxTwo_%dx%d_64Xor", i, j), i, j, true)
 		}
 	}
 	f, err := os.Create("../galois_gen_switch_amd64.go")
@@ -61,19 +64,26 @@ func main() {
 
 package reedsolomon
 
-import "fmt"
+import (
+	"fmt"
+)
 
 `)
 
-	w.WriteString("const avx2CodeGen = true\n")
-	w.WriteString(fmt.Sprintf("const maxAvx2Inputs = %d\nconst maxAvx2Outputs = %d\n", inputMax, outputMax))
+	w.WriteString(fmt.Sprintf(`const (
+avx2CodeGen = true
+maxAvx2Inputs = %d
+maxAvx2Outputs = %d
+minAvx2Size = %d
+avxSizeMask = maxInt - (minAvx2Size-1)
+)`, inputMax, outputMax, perLoop))
 	w.WriteString(`
 
 func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
-	n := stop-start
+	n := (stop-start) & avxSizeMask
+
 `)
 
-	w.WriteString(fmt.Sprintf("n = (n>>%d)<<%d\n\n", perLoopBits, perLoopBits))
 	w.WriteString(`switch len(in) {
 `)
 	for in, defs := range switchDefs[:] {
@@ -87,6 +97,25 @@ func galMulSlicesAvx2(matrix []byte, in, out [][]byte, start, stop int) int {
 	w.WriteString(`}
 	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
 }
+
+func galMulSlicesAvx2Xor(matrix []byte, in, out [][]byte, start, stop int) int {
+	n := (stop-start) & avxSizeMask
+
+`)
+
+	w.WriteString(`switch len(in) {
+`)
+	for in, defs := range switchDefsX[:] {
+		w.WriteString(fmt.Sprintf("		case %d:\n			switch len(out) {\n", in+1))
+		for out, def := range defs[:] {
+			w.WriteString(fmt.Sprintf("				case %d:\n", out+1))
+			w.WriteString(def)
+		}
+		w.WriteString("}\n")
+	}
+	w.WriteString(`}
+	panic(fmt.Sprintf("unhandled size: %dx%d", len(in), len(out)))
+}
 `)
 	Generate()
 }
@@ -128,12 +157,21 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 		}
 	}
 
+	x := ""
+	if xor {
+		x = "Xor"
+	}
+
 	TEXT(name, attr.NOSPLIT, fmt.Sprintf("func(matrix []byte, in [][]byte, out [][]byte, start, n int)"))
 
 	// SWITCH DEFINITION:
-	s := fmt.Sprintf("			mulAvxTwo_%dx%d(matrix, in, out, start, n)\n", inputs, outputs)
+	s := fmt.Sprintf("			mulAvxTwo_%dx%d%s(matrix, in, out, start, n)\n", inputs, outputs, x)
 	s += fmt.Sprintf("\t\t\t\treturn n\n")
-	switchDefs[inputs-1][outputs-1] = s
+	if xor {
+		switchDefsX[inputs-1][outputs-1] = s
+	} else {
+		switchDefs[inputs-1][outputs-1] = s
+	}
 
 	if loadNone {
 		Comment("Loading no tables to registers")
@@ -196,7 +234,6 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 	if err != nil {
 		panic(err)
 	}
-	outBase := addr.Addr
 	outSlicePtr := GP64()
 	MOVQ(addr.Addr, outSlicePtr)
 	for i := range dst {
@@ -240,13 +277,13 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 		SHRQ(U8(perLoopBits), length)
 	}
 	Label(name + "_loop")
-	if xor {
+
+	// Load data before loop or during first iteration?
+	// No clear winner.
+	preloadInput := xor && false
+	if preloadInput {
 		Commentf("Load %d outputs", outputs)
-	} else {
-		Commentf("Clear %d outputs", outputs)
-	}
-	for i := range dst {
-		if xor {
+		for i := range dst {
 			if regDst {
 				VMOVDQU(Mem{Base: dstPtr[i]}, dst[i])
 				if prefetchDst > 0 {
@@ -255,13 +292,11 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 				continue
 			}
 			ptr := GP64()
-			MOVQ(outBase, ptr)
+			MOVQ(Mem{Base: outSlicePtr, Disp: i * 24}, ptr)
 			VMOVDQU(Mem{Base: ptr, Index: offset, Scale: 1}, dst[i])
 			if prefetchDst > 0 {
 				PREFETCHT0(Mem{Base: ptr, Disp: prefetchDst, Index: offset, Scale: 1})
 			}
-		} else {
-			VPXOR(dst[i], dst[i], dst[i])
 		}
 	}
 
@@ -278,6 +313,22 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 		VPAND(lowMask, inLow, inLow)
 		VPAND(lowMask, inHigh, inHigh)
 		for j := range dst {
+			//Commentf(" xor:%v i: %v", xor, i)
+			if !preloadInput && xor && i == 0 {
+				if regDst {
+					VMOVDQU(Mem{Base: dstPtr[j]}, dst[j])
+					if prefetchDst > 0 {
+						PREFETCHT0(Mem{Base: dstPtr[j], Disp: prefetchDst})
+					}
+				} else {
+					ptr := GP64()
+					MOVQ(Mem{Base: outSlicePtr, Disp: j * 24}, ptr)
+					VMOVDQU(Mem{Base: ptr, Index: offset, Scale: 1}, dst[j])
+					if prefetchDst > 0 {
+						PREFETCHT0(Mem{Base: ptr, Disp: prefetchDst, Index: offset, Scale: 1})
+					}
+				}
+			}
 			if loadNone {
 				VMOVDQU(Mem{Base: matrixBase, Disp: 64 * (i*outputs + j)}, lookLow)
 				VMOVDQU(Mem{Base: matrixBase, Disp: 32 + 64*(i*outputs+j)}, lookHigh)
@@ -287,8 +338,13 @@ func genMulAvx2(name string, inputs int, outputs int, xor bool) {
 				VPSHUFB(inLow, inLo[i*outputs+j], lookLow)
 				VPSHUFB(inHigh, inHi[i*outputs+j], lookHigh)
 			}
-			VPXOR(lookLow, lookHigh, lookLow)
-			VPXOR(lookLow, dst[j], dst[j])
+			if i == 0 && !xor {
+				// We don't have any existing data, write directly.
+				VPXOR(lookLow, lookHigh, dst[j])
+			} else {
+				VPXOR(lookLow, lookHigh, lookLow)
+				VPXOR(lookLow, dst[j], dst[j])
+			}
 		}
 	}
 	Commentf("Store %d outputs", outputs)
@@ -339,35 +395,42 @@ func genMulAvx2Sixty64(name string, inputs int, outputs int, xor bool) {
 	// Load shuffle masks on every use.
 	var loadNone bool
 	// Use registers for destination registers.
-	var regDst = false
+	var regDst = true
 	var reloadLength = false
 
 	// lo, hi, 1 in, 1 out, 2 tmp, 1 mask
-	est := total*2 + outputs + 5
+	est := total*4 + outputs + 7
 	if outputs == 1 {
 		// We don't need to keep a copy of the input if only 1 output.
 		est -= 2
 	}
 
-	if true || est > 16 {
+	if est > 16 {
 		loadNone = true
 		// We run out of GP registers first, now.
 		if inputs+outputs > 13 {
 			regDst = false
 		}
 		// Save one register by reloading length.
-		if true || inputs+outputs > 12 && regDst {
+		if inputs+outputs > 12 && regDst {
 			reloadLength = true
 		}
 	}
 
 	TEXT(name, 0, fmt.Sprintf("func(matrix []byte, in [][]byte, out [][]byte, start, n int)"))
-
+	x := ""
+	if xor {
+		x = "Xor"
+	}
 	// SWITCH DEFINITION:
-	s := fmt.Sprintf("n = (n>>%d)<<%d\n", perLoopBits, perLoopBits)
-	s += fmt.Sprintf("			mulAvxTwo_%dx%d_64(matrix, in, out, start, n)\n", inputs, outputs)
+	//s := fmt.Sprintf("n = (n>>%d)<<%d\n", perLoopBits, perLoopBits)
+	s := fmt.Sprintf("			mulAvxTwo_%dx%d_64%s(matrix, in, out, start, n)\n", inputs, outputs, x)
 	s += fmt.Sprintf("\t\t\t\treturn n\n")
-	switchDefs[inputs-1][outputs-1] = s
+	if xor {
+		switchDefsX[inputs-1][outputs-1] = s
+	} else {
+		switchDefs[inputs-1][outputs-1] = s
+	}
 
 	if loadNone {
 		Comment("Loading no tables to registers")
@@ -473,33 +536,31 @@ func genMulAvx2Sixty64(name string, inputs int, outputs int, xor bool) {
 	VPBROADCASTB(lowMask.AsX(), lowMask)
 
 	if reloadLength {
+		Commentf("Reload length to save a register")
 		length = Load(Param("n"), GP64())
 		SHRQ(U8(perLoopBits), length)
 	}
 	Label(name + "_loop")
+
 	if xor {
 		Commentf("Load %d outputs", outputs)
-	} else {
-		Commentf("Clear %d outputs", outputs)
-	}
-	for i := range dst {
-		if xor {
+		for i := range dst {
 			if regDst {
 				VMOVDQU(Mem{Base: dstPtr[i]}, dst[i])
+				VMOVDQU(Mem{Base: dstPtr[i], Disp: 32}, dst2[i])
 				if prefetchDst > 0 {
 					PREFETCHT0(Mem{Base: dstPtr[i], Disp: prefetchDst})
 				}
 				continue
 			}
 			ptr := GP64()
-			MOVQ(outBase, ptr)
+			MOVQ(Mem{Base: outSlicePtr, Disp: i * 24}, ptr)
 			VMOVDQU(Mem{Base: ptr, Index: offset, Scale: 1}, dst[i])
+			VMOVDQU(Mem{Base: ptr, Index: offset, Scale: 1, Disp: 32}, dst2[i])
+
 			if prefetchDst > 0 {
 				PREFETCHT0(Mem{Base: ptr, Disp: prefetchDst, Index: offset, Scale: 1})
 			}
-		} else {
-			VPXOR(dst[i], dst[i], dst[i])
-			VPXOR(dst2[i], dst2[i], dst2[i])
 		}
 	}
 
@@ -535,10 +596,16 @@ func genMulAvx2Sixty64(name string, inputs int, outputs int, xor bool) {
 				VPSHUFB(inHigh, inHi[i*outputs+j], lookHigh)
 				VPSHUFB(in2High, inHi[i*outputs+j], lookHigh2)
 			}
-			VPXOR(lookLow, lookHigh, lookLow)
-			VPXOR(lookLow2, lookHigh2, lookLow2)
-			VPXOR(lookLow, dst[j], dst[j])
-			VPXOR(lookLow2, dst2[j], dst2[j])
+			if i == 0 && !xor {
+				// We don't have any existing data, write directly.
+				VPXOR(lookLow, lookHigh, dst[j])
+				VPXOR(lookLow2, lookHigh2, dst2[j])
+			} else {
+				VPXOR(lookLow, lookHigh, lookLow)
+				VPXOR(lookLow2, lookHigh2, lookLow2)
+				VPXOR(lookLow, dst[j], dst[j])
+				VPXOR(lookLow2, dst2[j], dst2[j])
+			}
 		}
 	}
 	Commentf("Store %d outputs", outputs)
diff --git a/examples/simple-decoder.go b/examples/simple-decoder.go
index c251104..19e91ca 100644
--- a/examples/simple-decoder.go
+++ b/examples/simple-decoder.go
@@ -1,4 +1,5 @@
-//+build ignore
+//go:build ignore
+// +build ignore
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 //
diff --git a/examples/simple-encoder.go b/examples/simple-encoder.go
index 1f0ed66..d90904c 100644
--- a/examples/simple-encoder.go
+++ b/examples/simple-encoder.go
@@ -1,4 +1,5 @@
-//+build ignore
+//go:build ignore
+// +build ignore
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 //
diff --git a/examples/stream-decoder.go b/examples/stream-decoder.go
index 1e27183..ffa890c 100644
--- a/examples/stream-decoder.go
+++ b/examples/stream-decoder.go
@@ -1,4 +1,5 @@
-//+build ignore
+//go:build ignore
+// +build ignore
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 //
diff --git a/examples/stream-encoder.go b/examples/stream-encoder.go
index 9f18d9b..658dc87 100644
--- a/examples/stream-encoder.go
+++ b/examples/stream-encoder.go
@@ -1,4 +1,5 @@
-//+build ignore
+//go:build ignore
+// +build ignore
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 //
diff --git a/examples_test.go b/examples_test.go
index 7ba7407..faf97d4 100644
--- a/examples_test.go
+++ b/examples_test.go
@@ -58,6 +58,63 @@ func ExampleEncoder() {
 	// ok
 }
 
+// Simple example of how to use all functions of the EncoderIdx.
+// Note that all error checks have been removed to keep it short.
+func ExampleEncoder_EncodeIdx() {
+	const dataShards = 7
+	const erasureShards = 3
+
+	// Create some sample data
+	var data = make([]byte, 250000)
+	fillRandom(data)
+
+	// Create an encoder with 7 data and 3 parity slices.
+	enc, _ := reedsolomon.New(dataShards, erasureShards)
+
+	// Split the data into shards
+	shards, _ := enc.Split(data)
+
+	// Zero erasure shards.
+	for i := 0; i < erasureShards; i++ {
+		clear := shards[dataShards+i]
+		for j := range clear {
+			clear[j] = 0
+		}
+	}
+
+	for i := 0; i < dataShards; i++ {
+		// Encode one shard at the time.
+		// Note how this gives linear access.
+		// There is however no requirement on shards being delivered in order.
+		// All parity shards will be updated on each run.
+		_ = enc.EncodeIdx(shards[i], i, shards[dataShards:])
+	}
+
+	// Verify the parity set
+	ok, err := enc.Verify(shards)
+	if ok {
+		fmt.Println("ok")
+	} else {
+		fmt.Println(err)
+	}
+
+	// Delete two shards
+	shards[dataShards-2], shards[dataShards-2] = nil, nil
+
+	// Reconstruct the shards
+	_ = enc.Reconstruct(shards)
+
+	// Verify the data set
+	ok, err = enc.Verify(shards)
+	if ok {
+		fmt.Println("ok")
+	} else {
+		fmt.Println(err)
+	}
+	// Output: ok
+	// ok
+}
+
 // This demonstrates that shards can be arbitrary sliced and
 // merged and still remain valid.
 func ExampleEncoder_slicing() {
diff --git a/galois.go b/galois.go
index ff93d65..30e9e03 100644
--- a/galois.go
+++ b/galois.go
@@ -76,7 +76,7 @@ func galSub(a, b byte) byte {
 // Table from https://github.com/templexxx/reedsolomon
 var invTable = [256]byte{0x0, 0x1, 0x8e, 0xf4, 0x47, 0xa7, 0x7a, 0xba, 0xad, 0x9d, 0xdd, 0x98, 0x3d, 0xaa, 0x5d, 0x96, 0xd8, 0x72, 0xc0, 0x58, 0xe0, 0x3e, 0x4c, 0x66, 0x90, 0xde, 0x55, 0x80, 0xa0, 0x83, 0x4b, 0x2a, 0x6c, 0xed, 0x39, 0x51, 0x60, 0x56, 0x2c, 0x8a, 0x70, 0xd0, 0x1f, 0x4a, 0x26, 0x8b, 0x33, 0x6e, 0x48, 0x89, 0x6f, 0x2e, 0xa4, 0xc3, 0x40, 0x5e, 0x50, 0x22, 0xcf, 0xa9, 0xab, 0xc, 0x15, 0xe1, 0x36, 0x5f, 0xf8, 0xd5, 0x92, 0x4e, 0xa6, 0x4, 0x30, 0x88, 0x2b, 0x1e, 0x16, 0x67, 0x45, 0x93, 0x38, 0x23, 0x68, 0x8c, 0x81, 0x1a, 0x25, 0x61, 0x13, 0xc1, 0xcb, 0x63, 0x97, 0xe, 0x37, 0x41, 0x24, 0x57, 0xca, 0x5b, 0xb9, 0xc4, 0x17, 0x4d, 0x52, 0x8d, 0xef, 0xb3, 0x20, 0xec, 0x2f, 0x32, 0x28, 0xd1, 0x11, 0xd9, 0xe9, 0xfb, 0xda, 0x79, 0xdb, 0x77, 0x6, 0xbb, 0x84, 0xcd, 0xfe, 0xfc, 0x1b, 0x54, 0xa1, 0x1d, 0x7c, 0xcc, 0xe4, 0xb0, 0x49, 0x31, 0x27, 0x2d, 0x53, 0x69, 0x2, 0xf5, 0x18, 0xdf, 0x44, 0x4f, 0x9b, 0xbc, 0xf, 0x5c, 0xb, 0xdc, 0xbd, 0x94, 0xac, 0x9, 0xc7, 0xa2, 0x1c, 0x82, 0x9f, 0xc6, 0x34, 0xc2, 0x46, 0x5, 0xce, 0x3b, 0xd, 0x3c, 0x9c, 0x8, 0xbe, 0xb7, 0x87, 0xe5, 0xee, 0x6b, 0xeb, 0xf2, 0xbf, 0xaf, 0xc5, 0x64, 0x7, 0x7b, 0x95, 0x9a, 0xae, 0xb6, 0x12, 0x59, 0xa5, 0x35, 0x65, 0xb8, 0xa3, 0x9e, 0xd2, 0xf7, 0x62, 0x5a, 0x85, 0x7d, 0xa8, 0x3a, 0x29, 0x71, 0xc8, 0xf6, 0xf9, 0x43, 0xd7, 0xd6, 0x10, 0x73, 0x76, 0x78, 0x99, 0xa, 0x19, 0x91, 0x14, 0x3f, 0xe6, 0xf0, 0x86, 0xb1, 0xe2, 0xf1, 0xfa, 0x74, 0xf3, 0xb4, 0x6d, 0x21, 0xb2, 0x6a, 0xe3, 0xe7, 0xb5, 0xea, 0x3, 0x8f, 0xd3, 0xc9, 0x42, 0xd4, 0xe8, 0x75, 0x7f, 0xff, 0x7e, 0xfd}
 
-var mulTable = [256][256]uint8{[256]uint8{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+var mulTable = [256][256]uint8{{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
 	{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff},
 	{0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e, 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e, 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe, 0x1d, 0x1f, 0x19, 0x1b, 0x15, 0x17, 0x11, 0x13, 0xd, 0xf, 0x9, 0xb, 0x5, 0x7, 0x1, 0x3, 0x3d, 0x3f, 0x39, 0x3b, 0x35, 0x37, 0x31, 0x33, 0x2d, 0x2f, 0x29, 0x2b, 0x25, 0x27, 0x21, 0x23, 0x5d, 0x5f, 0x59, 0x5b, 0x55, 0x57, 0x51, 0x53, 0x4d, 0x4f, 0x49, 0x4b, 0x45, 0x47, 0x41, 0x43, 0x7d, 0x7f, 0x79, 0x7b, 0x75, 0x77, 0x71, 0x73, 0x6d, 0x6f, 0x69, 0x6b, 0x65, 0x67, 0x61, 0x63, 0x9d, 0x9f, 0x99, 0x9b, 0x95, 0x97, 0x91, 0x93, 0x8d, 0x8f, 0x89, 0x8b, 0x85, 0x87, 0x81, 0x83, 0xbd, 0xbf, 0xb9, 0xbb, 0xb5, 0xb7, 0xb1, 0xb3, 0xad, 0xaf, 0xa9, 0xab, 0xa5, 0xa7, 0xa1, 0xa3, 0xdd, 0xdf, 0xd9, 0xdb, 0xd5, 0xd7, 0xd1, 0xd3, 0xcd, 0xcf, 0xc9, 0xcb, 0xc5, 0xc7, 0xc1, 0xc3, 0xfd, 0xff, 0xf9, 0xfb, 0xf5, 0xf7, 0xf1, 0xf3, 0xed, 0xef, 0xe9, 0xeb, 0xe5, 0xe7, 0xe1, 0xe3},
 	{0x0, 0x3, 0x6, 0x5, 0xc, 0xf, 0xa, 0x9, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11, 0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21, 0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71, 0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41, 0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1, 0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1, 0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1, 0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81, 0x9d, 0x9e, 0x9b, 0x98, 0x91, 0x92, 0x97, 0x94, 0x85, 0x86, 0x83, 0x80, 0x89, 0x8a, 0x8f, 0x8c, 0xad, 0xae, 0xab, 0xa8, 0xa1, 0xa2, 0xa7, 0xa4, 0xb5, 0xb6, 0xb3, 0xb0, 0xb9, 0xba, 0xbf, 0xbc, 0xfd, 0xfe, 0xfb, 0xf8, 0xf1, 0xf2, 0xf7, 0xf4, 0xe5, 0xe6, 0xe3, 0xe0, 0xe9, 0xea, 0xef, 0xec, 0xcd, 0xce, 0xcb, 0xc8, 0xc1, 0xc2, 0xc7, 0xc4, 0xd5, 0xd6, 0xd3, 0xd0, 0xd9, 0xda, 0xdf, 0xdc, 0x5d, 0x5e, 0x5b, 0x58, 0x51, 0x52, 0x57, 0x54, 0x45, 0x46, 0x43, 0x40, 0x49, 0x4a, 0x4f, 0x4c, 0x6d, 0x6e, 0x6b, 0x68, 0x61, 0x62, 0x67, 0x64, 0x75, 0x76, 0x73, 0x70, 0x79, 0x7a, 0x7f, 0x7c, 0x3d, 0x3e, 0x3b, 0x38, 0x31, 0x32, 0x37, 0x34, 0x25, 0x26, 0x23, 0x20, 0x29, 0x2a, 0x2f, 0x2c, 0xd, 0xe, 0xb, 0x8, 0x1, 0x2, 0x7, 0x4, 0x15, 0x16, 0x13, 0x10, 0x19, 0x1a, 0x1f, 0x1c},
@@ -901,7 +901,7 @@ func galExp(a byte, n int) byte {
 	return expTable[logResult]
 }
 
-func genAvx2Matrix(matrixRows [][]byte, inputs, outputs int, dst []byte) []byte {
+func genAvx2Matrix(matrixRows [][]byte, inputs, inIdx, outputs int, dst []byte) []byte {
 	if !avx2CodeGen {
 		panic("codegen not enabled")
 	}
@@ -915,7 +915,7 @@ func genAvx2Matrix(matrixRows [][]byte, inputs, outputs int, dst []byte) []byte
 		dst = dst[:wantBytes]
 	}
 	for i, row := range matrixRows[:outputs] {
-		for j, idx := range row[:inputs] {
+		for j, idx := range row[inIdx : inIdx+inputs] {
 			dstIdx := (j*outputs + i) * 64
 			dstPart := dst[dstIdx:]
 			dstPart = dstPart[:64]
diff --git a/galoisAvx512_amd64.go b/galoisAvx512_amd64.go
index 720196f..79207e6 100644
--- a/galoisAvx512_amd64.go
+++ b/galoisAvx512_amd64.go
@@ -1,6 +1,5 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
+//go:build !noasm && !appengine && !gccgo
+// +build !noasm,!appengine,!gccgo
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 // Copyright 2019, Minio, Inc.
@@ -226,8 +225,9 @@ func galMulAVX512LastInput(inputOffset int, inputEnd int, outputOffset int, outp
 
 // Perform the same as codeSomeShards, but taking advantage of
 // AVX512 parallelism for up to 4x faster execution as compared to AVX2
-func (r *reedSolomon) codeSomeShardsAvx512(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
+func (r *reedSolomon) codeSomeShardsAvx512(matrixRows, inputs, outputs [][]byte, byteCount int) {
 	// Process using no goroutines
+	outputCount := len(outputs)
 	start, end := 0, r.o.perRound
 	if end > byteCount {
 		end = byteCount
@@ -273,7 +273,8 @@ func (r *reedSolomon) codeSomeShardsAvx512(matrixRows, inputs, outputs [][]byte,
 
 // Perform the same as codeSomeShards, but taking advantage of
 // AVX512 parallelism for up to 4x faster execution as compared to AVX2
-func (r *reedSolomon) codeSomeShardsAvx512P(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
+func (r *reedSolomon) codeSomeShardsAvx512P(matrixRows, inputs, outputs [][]byte, byteCount int) {
+	outputCount := len(outputs)
 	var wg sync.WaitGroup
 	do := byteCount / r.o.maxGoroutines
 	if do < r.o.minSplitSize {
diff --git a/galoisAvx512_amd64_test.go b/galoisAvx512_amd64_test.go
index 685302f..6792e98 100644
--- a/galoisAvx512_amd64_test.go
+++ b/galoisAvx512_amd64_test.go
@@ -1,6 +1,5 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
+//go:build !noasm && !appengine && !gccgo
+// +build !noasm,!appengine,!gccgo
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 // Copyright 2019, Minio, Inc.
@@ -332,9 +331,9 @@ func testCodeSomeShardsAvx512WithLength(t *testing.T, ds, ps, l int, parallel bo
 	}
 
 	if parallel {
-		r.codeSomeShardsAvx512P(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
+		r.codeSomeShardsAvx512P(r.parity, shards[:r.DataShards], shards[r.DataShards:], len(shards[0]))
 	} else {
-		r.codeSomeShardsAvx512(r.parity, shards[:r.DataShards], shards[r.DataShards:], r.ParityShards, len(shards[0]))
+		r.codeSomeShardsAvx512(r.parity, shards[:r.DataShards], shards[r.DataShards:r.DataShards+r.ParityShards], len(shards[0]))
 	}
 
 	correct, _ := r.Verify(shards)
diff --git a/galois_amd64.go b/galois_amd64.go
index f757f9d..d722e31 100644
--- a/galois_amd64.go
+++ b/galois_amd64.go
@@ -1,6 +1,5 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
+//go:build !noasm && !appengine && !gccgo
+// +build !noasm,!appengine,!gccgo
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 
@@ -108,6 +107,9 @@ func galMulSliceXor(c byte, in, out []byte, o *options) {
 		in = in[done:]
 		out = out[done:]
 	}
+	if len(in) == 0 {
+		return
+	}
 	out = out[:len(in)]
 	mt := mulTable[c][:256]
 	for i := range in {
@@ -115,7 +117,7 @@ func galMulSliceXor(c byte, in, out []byte, o *options) {
 	}
 }
 
-// slice galois add
+// simple slice xor
 func sliceXor(in, out []byte, o *options) {
 	if o.useSSE2 {
 		if len(in) >= bigSwitchover {
diff --git a/galois_arm64.go b/galois_arm64.go
index 23a1dd2..df79a98 100644
--- a/galois_arm64.go
+++ b/galois_arm64.go
@@ -1,6 +1,5 @@
-//+build !noasm
-//+build !appengine
-//+build !gccgo
+//go:build !noasm && !appengine && !gccgo
+// +build !noasm,!appengine,!gccgo
 
 // Copyright 2015, Klaus Post, see LICENSE for details.
 // Copyright 2017, Minio, Inc.
@@ -52,7 +51,7 @@ func galMulSliceXor(c byte, in, out []byte, o *options) {
 	}
 }
 
-// slice galois add
+// simple slice xor
 func sliceXor(in, out []byte, o *options) {
 
 	galXorNEON(in, out)
diff --git a/galois_gen_amd64.go b/galois_gen_amd64.go
index dbd77aa..817c7ea 100644
--- a/galois_gen_amd64.go
+++ b/galois_gen_amd64.go
@@ -1,9 +1,7 @@
 // Code generated by command: go run gen.go -out ../galois_gen_amd64.s -stubs ../galois_gen_amd64.go -pkg=reedsolomon. DO NOT EDIT.
 
-// +build !appengine
-// +build !noasm
-// +build !nogen
-// +build gc
+//go:build !appengine && !noasm && !nogen && gc
+// +build !appengine,!noasm,!nogen,gc
 
 package reedsolomon
 
@@ -17,6 +15,14 @@ func mulAvxTwo_1x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_1x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x1Xor takes 1 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_1x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x1_64Xor takes 1 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_1x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x2 takes 1 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -27,6 +33,14 @@ func mulAvxTwo_1x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_1x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x2Xor takes 1 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_1x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x2_64Xor takes 1 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_1x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x3 takes 1 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -37,41 +51,77 @@ func mulAvxTwo_1x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_1x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x3Xor takes 1 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_1x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_1x3_64Xor takes 1 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_1x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x4 takes 1 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_1x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x4Xor takes 1 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_1x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x5 takes 1 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_1x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x5Xor takes 1 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_1x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x6 takes 1 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_1x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x6Xor takes 1 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_1x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x7 takes 1 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_1x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x7Xor takes 1 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_1x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x8 takes 1 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_1x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x8Xor takes 1 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_1x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x9 takes 1 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_1x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x9Xor takes 1 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_1x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_1x10 takes 1 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_1x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_1x10Xor takes 1 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_1x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x1 takes 2 inputs and produces 1 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -82,6 +132,14 @@ func mulAvxTwo_2x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_2x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x1Xor takes 2 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_2x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x1_64Xor takes 2 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_2x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x2 takes 2 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -92,6 +150,14 @@ func mulAvxTwo_2x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_2x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x2Xor takes 2 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_2x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x2_64Xor takes 2 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_2x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x3 takes 2 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -102,41 +168,77 @@ func mulAvxTwo_2x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_2x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x3Xor takes 2 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_2x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_2x3_64Xor takes 2 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_2x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x4 takes 2 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_2x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x4Xor takes 2 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_2x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x5 takes 2 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_2x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x5Xor takes 2 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_2x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x6 takes 2 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_2x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x6Xor takes 2 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_2x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x7 takes 2 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_2x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x7Xor takes 2 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_2x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x8 takes 2 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_2x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x8Xor takes 2 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_2x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x9 takes 2 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_2x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x9Xor takes 2 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_2x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_2x10 takes 2 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_2x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_2x10Xor takes 2 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_2x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x1 takes 3 inputs and produces 1 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -147,6 +249,14 @@ func mulAvxTwo_3x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_3x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x1Xor takes 3 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_3x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x1_64Xor takes 3 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_3x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x2 takes 3 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -157,6 +267,14 @@ func mulAvxTwo_3x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_3x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x2Xor takes 3 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_3x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x2_64Xor takes 3 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_3x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x3 takes 3 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -167,41 +285,77 @@ func mulAvxTwo_3x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_3x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x3Xor takes 3 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_3x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_3x3_64Xor takes 3 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_3x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x4 takes 3 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_3x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x4Xor takes 3 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_3x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x5 takes 3 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_3x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x5Xor takes 3 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_3x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x6 takes 3 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_3x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x6Xor takes 3 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_3x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x7 takes 3 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_3x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x7Xor takes 3 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_3x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x8 takes 3 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_3x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x8Xor takes 3 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_3x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x9 takes 3 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_3x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x9Xor takes 3 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_3x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_3x10 takes 3 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_3x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_3x10Xor takes 3 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_3x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x1 takes 4 inputs and produces 1 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -212,6 +366,14 @@ func mulAvxTwo_4x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_4x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x1Xor takes 4 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_4x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_4x1_64Xor takes 4 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_4x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x2 takes 4 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -222,6 +384,14 @@ func mulAvxTwo_4x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_4x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x2Xor takes 4 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_4x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_4x2_64Xor takes 4 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_4x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x3 takes 4 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -232,41 +402,77 @@ func mulAvxTwo_4x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_4x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x3Xor takes 4 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_4x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_4x3_64Xor takes 4 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_4x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x4 takes 4 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_4x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x4Xor takes 4 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_4x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x5 takes 4 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_4x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x5Xor takes 4 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_4x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x6 takes 4 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_4x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x6Xor takes 4 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_4x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x7 takes 4 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_4x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x7Xor takes 4 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_4x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x8 takes 4 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_4x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x8Xor takes 4 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_4x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x9 takes 4 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_4x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x9Xor takes 4 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_4x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_4x10 takes 4 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_4x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_4x10Xor takes 4 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_4x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x1 takes 5 inputs and produces 1 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -277,6 +483,14 @@ func mulAvxTwo_5x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_5x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x1Xor takes 5 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_5x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_5x1_64Xor takes 5 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_5x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x2 takes 5 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -287,6 +501,14 @@ func mulAvxTwo_5x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_5x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x2Xor takes 5 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_5x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_5x2_64Xor takes 5 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_5x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x3 takes 5 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -297,41 +519,77 @@ func mulAvxTwo_5x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_5x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x3Xor takes 5 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_5x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_5x3_64Xor takes 5 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_5x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x4 takes 5 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_5x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x4Xor takes 5 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_5x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x5 takes 5 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_5x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x5Xor takes 5 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_5x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x6 takes 5 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_5x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x6Xor takes 5 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_5x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x7 takes 5 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_5x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x7Xor takes 5 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_5x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x8 takes 5 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_5x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x8Xor takes 5 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_5x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x9 takes 5 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_5x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x9Xor takes 5 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_5x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_5x10 takes 5 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_5x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_5x10Xor takes 5 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_5x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x1 takes 6 inputs and produces 1 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -342,6 +600,14 @@ func mulAvxTwo_6x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_6x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x1Xor takes 6 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_6x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x1_64Xor takes 6 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_6x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x2 takes 6 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -352,6 +618,14 @@ func mulAvxTwo_6x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_6x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x2Xor takes 6 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_6x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x2_64Xor takes 6 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_6x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x3 takes 6 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -362,41 +636,77 @@ func mulAvxTwo_6x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_6x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x3Xor takes 6 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_6x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_6x3_64Xor takes 6 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_6x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x4 takes 6 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_6x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x4Xor takes 6 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_6x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x5 takes 6 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_6x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x5Xor takes 6 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_6x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x6 takes 6 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_6x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x6Xor takes 6 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_6x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x7 takes 6 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_6x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x7Xor takes 6 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_6x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x8 takes 6 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_6x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x8Xor takes 6 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_6x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x9 takes 6 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_6x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x9Xor takes 6 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_6x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_6x10 takes 6 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_6x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_6x10Xor takes 6 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_6x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x1 takes 7 inputs and produces 1 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -407,6 +717,14 @@ func mulAvxTwo_7x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_7x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x1Xor takes 7 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_7x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x1_64Xor takes 7 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_7x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x2 takes 7 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -417,6 +735,14 @@ func mulAvxTwo_7x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_7x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x2Xor takes 7 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_7x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x2_64Xor takes 7 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_7x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x3 takes 7 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -427,41 +753,77 @@ func mulAvxTwo_7x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_7x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x3Xor takes 7 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_7x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_7x3_64Xor takes 7 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_7x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x4 takes 7 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_7x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x4Xor takes 7 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_7x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x5 takes 7 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_7x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x5Xor takes 7 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_7x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x6 takes 7 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_7x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x6Xor takes 7 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_7x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x7 takes 7 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_7x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x7Xor takes 7 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_7x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x8 takes 7 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_7x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x8Xor takes 7 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_7x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x9 takes 7 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_7x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x9Xor takes 7 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_7x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_7x10 takes 7 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_7x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_7x10Xor takes 7 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_7x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x1 takes 8 inputs and produces 1 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -472,6 +834,14 @@ func mulAvxTwo_8x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_8x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x1Xor takes 8 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_8x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x1_64Xor takes 8 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_8x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x2 takes 8 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -482,6 +852,14 @@ func mulAvxTwo_8x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_8x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x2Xor takes 8 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_8x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x2_64Xor takes 8 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_8x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x3 takes 8 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -492,41 +870,77 @@ func mulAvxTwo_8x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_8x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x3Xor takes 8 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_8x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_8x3_64Xor takes 8 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_8x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x4 takes 8 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_8x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x4Xor takes 8 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_8x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x5 takes 8 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_8x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x5Xor takes 8 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_8x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x6 takes 8 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_8x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x6Xor takes 8 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_8x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x7 takes 8 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_8x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x7Xor takes 8 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_8x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x8 takes 8 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_8x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x8Xor takes 8 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_8x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x9 takes 8 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_8x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x9Xor takes 8 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_8x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_8x10 takes 8 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_8x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_8x10Xor takes 8 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_8x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x1 takes 9 inputs and produces 1 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -537,6 +951,14 @@ func mulAvxTwo_9x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_9x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x1Xor takes 9 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_9x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x1_64Xor takes 9 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_9x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x2 takes 9 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -547,6 +969,14 @@ func mulAvxTwo_9x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_9x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x2Xor takes 9 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_9x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x2_64Xor takes 9 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_9x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x3 takes 9 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -557,41 +987,77 @@ func mulAvxTwo_9x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_9x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x3Xor takes 9 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_9x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_9x3_64Xor takes 9 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_9x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x4 takes 9 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_9x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x4Xor takes 9 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_9x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x5 takes 9 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_9x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x5Xor takes 9 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_9x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x6 takes 9 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_9x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x6Xor takes 9 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_9x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x7 takes 9 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_9x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x7Xor takes 9 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_9x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x8 takes 9 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_9x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x8Xor takes 9 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_9x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x9 takes 9 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_9x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x9Xor takes 9 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_9x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_9x10 takes 9 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_9x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_9x10Xor takes 9 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_9x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x1 takes 10 inputs and produces 1 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -602,6 +1068,14 @@ func mulAvxTwo_10x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_10x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_10x1Xor takes 10 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_10x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x1_64Xor takes 10 inputs and produces 1 outputs.
+//go:noescape
+func mulAvxTwo_10x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x2 takes 10 inputs and produces 2 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -612,6 +1086,14 @@ func mulAvxTwo_10x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_10x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_10x2Xor takes 10 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_10x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x2_64Xor takes 10 inputs and produces 2 outputs.
+//go:noescape
+func mulAvxTwo_10x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x3 takes 10 inputs and produces 3 outputs.
 // The output is initialized to 0.
 //go:noescape
@@ -622,37 +1104,73 @@ func mulAvxTwo_10x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 //go:noescape
 func mulAvxTwo_10x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_10x3Xor takes 10 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_10x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x3_64Xor takes 10 inputs and produces 3 outputs.
+//go:noescape
+func mulAvxTwo_10x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x4 takes 10 inputs and produces 4 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_10x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_10x4Xor takes 10 inputs and produces 4 outputs.
+//go:noescape
+func mulAvxTwo_10x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x5 takes 10 inputs and produces 5 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_10x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_10x5Xor takes 10 inputs and produces 5 outputs.
+//go:noescape
+func mulAvxTwo_10x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x6 takes 10 inputs and produces 6 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_10x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_10x6Xor takes 10 inputs and produces 6 outputs.
+//go:noescape
+func mulAvxTwo_10x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x7 takes 10 inputs and produces 7 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_10x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_10x7Xor takes 10 inputs and produces 7 outputs.
+//go:noescape
+func mulAvxTwo_10x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x8 takes 10 inputs and produces 8 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_10x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_10x8Xor takes 10 inputs and produces 8 outputs.
+//go:noescape
+func mulAvxTwo_10x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x9 takes 10 inputs and produces 9 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_10x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 
+// mulAvxTwo_10x9Xor takes 10 inputs and produces 9 outputs.
+//go:noescape
+func mulAvxTwo_10x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
 // mulAvxTwo_10x10 takes 10 inputs and produces 10 outputs.
 // The output is initialized to 0.
 //go:noescape
 func mulAvxTwo_10x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+
+// mulAvxTwo_10x10Xor takes 10 inputs and produces 10 outputs.
+//go:noescape
+func mulAvxTwo_10x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
diff --git a/galois_gen_amd64.s b/galois_gen_amd64.s
index ab699ac..36e885f 100644
--- a/galois_gen_amd64.s
+++ b/galois_gen_amd64.s
@@ -36,15 +36,124 @@ TEXT ·mulAvxTwo_1x1(SB), NOSPLIT, $0-88
 	VPBROADCASTB X3, Y3
 
 mulAvxTwo_1x1_loop:
-	// Clear 1 outputs
-	VPXOR Y2, Y2, Y2
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (CX), Y2
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y2, Y4
+	VPAND   Y3, Y2, Y2
+	VPAND   Y3, Y4, Y4
+	VPSHUFB Y2, Y0, Y2
+	VPSHUFB Y4, Y1, Y4
+	VPXOR   Y2, Y4, Y2
+
+	// Store 1 outputs
+	VMOVDQU Y2, (DX)
+	ADDQ    $0x20, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x1_loop
+	VZEROUPPER
+
+mulAvxTwo_1x1_end:
+	RET
+
+// func mulAvxTwo_1x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x1_64(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x06, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x1_64_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), DX
+	MOVQ    start+72(FP), BX
+
+	// Add start offset to output
+	ADDQ BX, DX
+
+	// Add start offset to input
+	ADDQ         BX, CX
+	MOVQ         $0x0000000f, BX
+	MOVQ         BX, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_1x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y3
+	ADDQ    $0x40, CX
+	VPSRLQ  $0x04, Y2, Y6
+	VPSRLQ  $0x04, Y3, Y5
+	VPAND   Y4, Y2, Y2
+	VPAND   Y4, Y3, Y3
+	VPAND   Y4, Y6, Y6
+	VPAND   Y4, Y5, Y5
+	VPSHUFB Y2, Y0, Y2
+	VPSHUFB Y3, Y0, Y3
+	VPSHUFB Y6, Y1, Y6
+	VPSHUFB Y5, Y1, Y5
+	VPXOR   Y2, Y6, Y2
+	VPXOR   Y3, Y5, Y3
+
+	// Store 1 outputs
+	VMOVDQU Y2, (DX)
+	VMOVDQU Y3, 32(DX)
+	ADDQ    $0x40, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x1_64_loop
+	VZEROUPPER
+
+mulAvxTwo_1x1_64_end:
+	RET
+
+// func mulAvxTwo_1x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 6 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), DX
+	MOVQ    start+72(FP), BX
+
+	// Add start offset to output
+	ADDQ BX, DX
+
+	// Add start offset to input
+	ADDQ         BX, CX
+	MOVQ         $0x0000000f, BX
+	MOVQ         BX, X3
+	VPBROADCASTB X3, Y3
 
+mulAvxTwo_1x1Xor_loop:
 	// Load and process 32 bytes from input 0 to 1 outputs
 	VMOVDQU (CX), Y4
 	ADDQ    $0x20, CX
 	VPSRLQ  $0x04, Y4, Y5
 	VPAND   Y3, Y4, Y4
 	VPAND   Y3, Y5, Y5
+	VMOVDQU (DX), Y2
 	VPSHUFB Y4, Y0, Y4
 	VPSHUFB Y5, Y1, Y5
 	VPXOR   Y4, Y5, Y4
@@ -56,75 +165,76 @@ mulAvxTwo_1x1_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x1_loop
+	JNZ  mulAvxTwo_1x1Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x1_end:
+mulAvxTwo_1x1Xor_end:
 	RET
 
-// func mulAvxTwo_1x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_1x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_1x1_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 6 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_1x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), AX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  start+72(FP), BX
+TEXT ·mulAvxTwo_1x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x06, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x1_64Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), DX
+	MOVQ    start+72(FP), BX
+
+	// Add start offset to output
+	ADDQ BX, DX
 
 	// Add start offset to input
-	ADDQ         BX, AX
-	MOVQ         $0x0000000f, SI
-	MOVQ         SI, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), SI
-	SHRQ         $0x06, SI
+	ADDQ         BX, CX
+	MOVQ         $0x0000000f, BX
+	MOVQ         BX, X4
+	VPBROADCASTB X4, Y4
 
-mulAvxTwo_1x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+mulAvxTwo_1x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (DX), Y2
+	VMOVDQU 32(DX), Y3
 
 	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y7
+	ADDQ    $0x40, CX
+	VPSRLQ  $0x04, Y5, Y6
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y5, Y5
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y6, Y6
+	VPAND   Y4, Y8, Y8
+	VPSHUFB Y5, Y0, Y5
+	VPSHUFB Y7, Y0, Y7
+	VPSHUFB Y6, Y1, Y6
+	VPSHUFB Y8, Y1, Y8
 	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
 
 	// Store 1 outputs
-	MOVQ    (DX), DI
-	VMOVDQU Y0, (DI)(BX*1)
-	VMOVDQU Y1, 32(DI)(BX*1)
+	VMOVDQU Y2, (DX)
+	VMOVDQU Y3, 32(DX)
+	ADDQ    $0x40, DX
 
 	// Prepare for next loop
-	ADDQ $0x40, BX
-	DECQ SI
-	JNZ  mulAvxTwo_1x1_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x1_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x1_64_end:
+mulAvxTwo_1x1_64Xor_end:
 	RET
 
 // func mulAvxTwo_1x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -160,20 +270,151 @@ TEXT ·mulAvxTwo_1x2(SB), NOSPLIT, $0-88
 	VPBROADCASTB X6, Y6
 
 mulAvxTwo_1x2_loop:
-	// Clear 2 outputs
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (CX), Y8
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y9, Y9
+	VPSHUFB Y8, Y0, Y5
+	VPSHUFB Y9, Y1, Y7
+	VPXOR   Y5, Y7, Y4
+	VPSHUFB Y8, Y2, Y5
+	VPSHUFB Y9, Y3, Y7
+	VPXOR   Y5, Y7, Y5
+
+	// Store 2 outputs
+	VMOVDQU Y4, (BX)
+	ADDQ    $0x20, BX
+	VMOVDQU Y5, (DX)
+	ADDQ    $0x20, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x2_loop
+	VZEROUPPER
+
+mulAvxTwo_1x2_end:
+	RET
+
+// func mulAvxTwo_1x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x2_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x2_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), BX
+	MOVQ  start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+	ADDQ DI, BX
+
+	// Add start offset to input
+	ADDQ         DI, DX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_1x2_64_loop:
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (DX), Y7
+	VMOVDQU 32(DX), Y9
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y8, Y8
+	VPAND   Y4, Y10, Y10
+	VMOVDQU (CX), Y2
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y9, Y2, Y3
+	VPSHUFB Y7, Y2, Y2
+	VPSHUFB Y10, Y6, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y2, Y6, Y0
+	VPXOR   Y3, Y5, Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y9, Y2, Y3
+	VPSHUFB Y7, Y2, Y2
+	VPSHUFB Y10, Y6, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y2, Y6, Y2
+	VPXOR   Y3, Y5, Y3
+
+	// Store 2 outputs
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (BX)
+	VMOVDQU Y3, 32(BX)
+	ADDQ    $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x2_64_loop
+	VZEROUPPER
+
+mulAvxTwo_1x2_64_end:
+	RET
+
+// func mulAvxTwo_1x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x2Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 11 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x2Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), BX
+	MOVQ    24(DX), DX
+	MOVQ    start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
+	ADDQ SI, DX
+
+	// Add start offset to input
+	ADDQ         SI, CX
+	MOVQ         $0x0000000f, SI
+	MOVQ         SI, X6
+	VPBROADCASTB X6, Y6
 
+mulAvxTwo_1x2Xor_loop:
 	// Load and process 32 bytes from input 0 to 2 outputs
 	VMOVDQU (CX), Y9
 	ADDQ    $0x20, CX
 	VPSRLQ  $0x04, Y9, Y10
 	VPAND   Y6, Y9, Y9
 	VPAND   Y6, Y10, Y10
+	VMOVDQU (BX), Y4
 	VPSHUFB Y9, Y0, Y7
 	VPSHUFB Y10, Y1, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y4, Y4
+	VMOVDQU (DX), Y5
 	VPSHUFB Y9, Y2, Y7
 	VPSHUFB Y10, Y3, Y8
 	VPXOR   Y7, Y8, Y7
@@ -187,48 +428,52 @@ mulAvxTwo_1x2_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x2_loop
+	JNZ  mulAvxTwo_1x2Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x2_end:
+mulAvxTwo_1x2Xor_end:
 	RET
 
-// func mulAvxTwo_1x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_1x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_1x2_64(SB), $0-88
+TEXT ·mulAvxTwo_1x2_64Xor(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 11 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 17 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_1x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), AX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  start+72(FP), BX
+	JZ    mulAvxTwo_1x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), BX
+	MOVQ  start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+	ADDQ DI, BX
 
 	// Add start offset to input
-	ADDQ         BX, AX
-	MOVQ         $0x0000000f, SI
-	MOVQ         SI, X4
+	ADDQ         DI, DX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), SI
-	SHRQ         $0x06, SI
 
-mulAvxTwo_1x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+mulAvxTwo_1x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y1
+	VMOVDQU (BX), Y2
+	VMOVDQU 32(BX), Y3
 
 	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -257,20 +502,19 @@ mulAvxTwo_1x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Store 2 outputs
-	MOVQ    (DX), DI
-	VMOVDQU Y0, (DI)(BX*1)
-	VMOVDQU Y1, 32(DI)(BX*1)
-	MOVQ    24(DX), DI
-	VMOVDQU Y2, (DI)(BX*1)
-	VMOVDQU Y3, 32(DI)(BX*1)
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (BX)
+	VMOVDQU Y3, 32(BX)
+	ADDQ    $0x40, BX
 
 	// Prepare for next loop
-	ADDQ $0x40, BX
-	DECQ SI
-	JNZ  mulAvxTwo_1x2_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x2_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x2_64_end:
+mulAvxTwo_1x2_64Xor_end:
 	RET
 
 // func mulAvxTwo_1x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -310,25 +554,178 @@ TEXT ·mulAvxTwo_1x3(SB), NOSPLIT, $0-88
 	VPBROADCASTB X9, Y9
 
 mulAvxTwo_1x3_loop:
-	// Clear 3 outputs
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (CX), Y11
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y9, Y11, Y11
+	VPAND   Y9, Y12, Y12
+	VPSHUFB Y11, Y0, Y8
+	VPSHUFB Y12, Y1, Y10
+	VPXOR   Y8, Y10, Y6
+	VPSHUFB Y11, Y2, Y8
+	VPSHUFB Y12, Y3, Y10
+	VPXOR   Y8, Y10, Y7
+	VPSHUFB Y11, Y4, Y8
+	VPSHUFB Y12, Y5, Y10
+	VPXOR   Y8, Y10, Y8
+
+	// Store 3 outputs
+	VMOVDQU Y6, (BX)
+	ADDQ    $0x20, BX
+	VMOVDQU Y7, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y8, (DX)
+	ADDQ    $0x20, DX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x3_loop
+	VZEROUPPER
+
+mulAvxTwo_1x3_end:
+	RET
+
+// func mulAvxTwo_1x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x3_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x3_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), BX
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, SI
+	ADDQ R8, DI
+	ADDQ R8, BX
+
+	// Add start offset to input
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_1x3_64_loop:
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y10, Y10
+	VPAND   Y6, Y12, Y12
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y11, Y4, Y5
+	VPSHUFB Y9, Y4, Y4
+	VPSHUFB Y12, Y8, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y4, Y8, Y0
+	VPXOR   Y5, Y7, Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y11, Y4, Y5
+	VPSHUFB Y9, Y4, Y4
+	VPSHUFB Y12, Y8, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y4, Y8, Y2
+	VPXOR   Y5, Y7, Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y11, Y4, Y5
+	VPSHUFB Y9, Y4, Y4
+	VPSHUFB Y12, Y8, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y4, Y8, Y4
+	VPXOR   Y5, Y7, Y5
+
+	// Store 3 outputs
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y4, (BX)
+	VMOVDQU Y5, 32(BX)
+	ADDQ    $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x3_64_loop
+	VZEROUPPER
+
+mulAvxTwo_1x3_64_end:
+	RET
+
+// func mulAvxTwo_1x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x3Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_1x3Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), CX
+	MOVQ    out_base+48(FP), DX
+	MOVQ    (DX), BX
+	MOVQ    24(DX), SI
+	MOVQ    48(DX), DX
+	MOVQ    start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, BX
+	ADDQ DI, SI
+	ADDQ DI, DX
+
+	// Add start offset to input
+	ADDQ         DI, CX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X9
+	VPBROADCASTB X9, Y9
 
+mulAvxTwo_1x3Xor_loop:
 	// Load and process 32 bytes from input 0 to 3 outputs
 	VMOVDQU (CX), Y12
 	ADDQ    $0x20, CX
 	VPSRLQ  $0x04, Y12, Y13
 	VPAND   Y9, Y12, Y12
 	VPAND   Y9, Y13, Y13
+	VMOVDQU (BX), Y6
 	VPSHUFB Y12, Y0, Y10
 	VPSHUFB Y13, Y1, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y6, Y6
+	VMOVDQU (SI), Y7
 	VPSHUFB Y12, Y2, Y10
 	VPSHUFB Y13, Y3, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y7, Y7
+	VMOVDQU (DX), Y8
 	VPSHUFB Y12, Y4, Y10
 	VPSHUFB Y13, Y5, Y11
 	VPXOR   Y10, Y11, Y10
@@ -344,50 +741,56 @@ mulAvxTwo_1x3_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x3_loop
+	JNZ  mulAvxTwo_1x3Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x3_end:
+mulAvxTwo_1x3Xor_end:
 	RET
 
-// func mulAvxTwo_1x3_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_1x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_1x3_64(SB), $0-88
+TEXT ·mulAvxTwo_1x3_64Xor(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 14 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_1x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), AX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  out_base+48(FP), DX
-	MOVQ  start+72(FP), BX
+	JZ    mulAvxTwo_1x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), BX
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, SI
+	ADDQ R8, DI
+	ADDQ R8, BX
 
 	// Add start offset to input
-	ADDQ         BX, AX
-	MOVQ         $0x0000000f, SI
-	MOVQ         SI, X6
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), SI
-	SHRQ         $0x06, SI
 
-mulAvxTwo_1x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+mulAvxTwo_1x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (SI), Y0
+	VMOVDQU 32(SI), Y1
+	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y3
+	VMOVDQU (BX), Y4
+	VMOVDQU 32(BX), Y5
 
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -426,23 +829,22 @@ mulAvxTwo_1x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Store 3 outputs
-	MOVQ    (DX), DI
-	VMOVDQU Y0, (DI)(BX*1)
-	VMOVDQU Y1, 32(DI)(BX*1)
-	MOVQ    24(DX), DI
-	VMOVDQU Y2, (DI)(BX*1)
-	VMOVDQU Y3, 32(DI)(BX*1)
-	MOVQ    48(DX), DI
-	VMOVDQU Y4, (DI)(BX*1)
-	VMOVDQU Y5, 32(DI)(BX*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, BX
-	DECQ SI
-	JNZ  mulAvxTwo_1x3_64_loop
+	VMOVDQU Y0, (SI)
+	VMOVDQU Y1, 32(SI)
+	ADDQ    $0x40, SI
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y4, (BX)
+	VMOVDQU Y5, 32(BX)
+	ADDQ    $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x3_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x3_64_end:
+mulAvxTwo_1x3_64Xor_end:
 	RET
 
 // func mulAvxTwo_1x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -478,42 +880,32 @@ TEXT ·mulAvxTwo_1x4(SB), NOSPLIT, $0-88
 	VPBROADCASTB X4, Y4
 
 mulAvxTwo_1x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (DX), Y7
+	VMOVDQU (DX), Y6
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y4, Y6, Y6
 	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
+	VPXOR   Y3, Y5, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
+	VPXOR   Y3, Y5, Y1
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
+	VPXOR   Y3, Y5, Y2
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPXOR   Y3, Y5, Y3
 
 	// Store 4 outputs
 	VMOVDQU Y0, (SI)
@@ -533,29 +925,115 @@ mulAvxTwo_1x4_loop:
 mulAvxTwo_1x4_end:
 	RET
 
-// func mulAvxTwo_1x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_1x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_1x5(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_1x4Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 20 YMM used
+	// Full registers estimated 17 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_1x5_end
+	JZ    mulAvxTwo_1x4Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), DX
 	MOVQ  out_base+48(FP), BX
 	MOVQ  (BX), SI
 	MOVQ  24(BX), DI
 	MOVQ  48(BX), R8
-	MOVQ  72(BX), R9
-	MOVQ  96(BX), BX
-	MOVQ  start+72(FP), R10
+	MOVQ  72(BX), BX
+	MOVQ  start+72(FP), R9
 
 	// Add start offset to output
-	ADDQ R10, SI
+	ADDQ R9, SI
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, BX
+
+	// Add start offset to input
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_1x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (SI), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU (DI), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU (R8), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU (BX), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Store 4 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_1x4Xor_end:
+	RET
+
+// func mulAvxTwo_1x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x5(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x5_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), BX
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, SI
 	ADDQ R10, DI
 	ADDQ R10, R8
 	ADDQ R10, R9
@@ -568,43 +1046,128 @@ TEXT ·mulAvxTwo_1x5(SB), NOSPLIT, $0-88
 	VPBROADCASTB X5, Y5
 
 mulAvxTwo_1x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y5, Y7, Y7
+	VPAND   Y5, Y8, Y8
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y0
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y1
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y2
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y3
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y4, Y4
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y4, Y6, Y4
+
+	// Store 5 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x5_loop
+	VZEROUPPER
+
+mulAvxTwo_1x5_end:
+	RET
+
+// func mulAvxTwo_1x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x5Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), BX
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, BX
 
+	// Add start offset to input
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_1x5Xor_loop:
 	// Load and process 32 bytes from input 0 to 5 outputs
 	VMOVDQU (DX), Y8
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y8, Y9
 	VPAND   Y5, Y8, Y8
 	VPAND   Y5, Y9, Y9
+	VMOVDQU (SI), Y0
 	VMOVDQU (CX), Y6
 	VMOVDQU 32(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
 	VPXOR   Y6, Y7, Y6
 	VPXOR   Y6, Y0, Y0
+	VMOVDQU (DI), Y1
 	VMOVDQU 64(CX), Y6
 	VMOVDQU 96(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
 	VPXOR   Y6, Y7, Y6
 	VPXOR   Y6, Y1, Y1
+	VMOVDQU (R8), Y2
 	VMOVDQU 128(CX), Y6
 	VMOVDQU 160(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
 	VPXOR   Y6, Y7, Y6
 	VPXOR   Y6, Y2, Y2
+	VMOVDQU (R9), Y3
 	VMOVDQU 192(CX), Y6
 	VMOVDQU 224(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
 	VPXOR   Y6, Y7, Y6
 	VPXOR   Y6, Y3, Y3
+	VMOVDQU (BX), Y4
 	VMOVDQU 256(CX), Y6
 	VMOVDQU 288(CX), Y7
 	VPSHUFB Y8, Y6, Y6
@@ -626,10 +1189,10 @@ mulAvxTwo_1x5_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x5_loop
+	JNZ  mulAvxTwo_1x5Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x5_end:
+mulAvxTwo_1x5Xor_end:
 	RET
 
 // func mulAvxTwo_1x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -669,50 +1232,144 @@ TEXT ·mulAvxTwo_1x6(SB), NOSPLIT, $0-88
 	VPBROADCASTB X6, Y6
 
 mulAvxTwo_1x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y9, Y9
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y5, Y5
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y5, Y7, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y5, Y5
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y5, Y7, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y5, Y5
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y5, Y7, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y5, Y5
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y5, Y7, Y3
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y5, Y5
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y5, Y7, Y4
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y5, Y5
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y5, Y7, Y5
+
+	// Store 6 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x6_loop
+	VZEROUPPER
+
+mulAvxTwo_1x6_end:
+	RET
+
+// func mulAvxTwo_1x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x6Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 23 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), BX
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, SI
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, BX
+
+	// Add start offset to input
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X6
+	VPBROADCASTB X6, Y6
 
+mulAvxTwo_1x6Xor_loop:
 	// Load and process 32 bytes from input 0 to 6 outputs
 	VMOVDQU (DX), Y9
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPAND   Y6, Y9, Y9
 	VPAND   Y6, Y10, Y10
+	VMOVDQU (SI), Y0
 	VMOVDQU (CX), Y7
 	VMOVDQU 32(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y0, Y0
+	VMOVDQU (DI), Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y1, Y1
+	VMOVDQU (R8), Y2
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y2, Y2
+	VMOVDQU (R9), Y3
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y3, Y3
+	VMOVDQU (R10), Y4
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y4, Y4
+	VMOVDQU (BX), Y5
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y9, Y7, Y7
@@ -736,10 +1393,10 @@ mulAvxTwo_1x6_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x6_loop
+	JNZ  mulAvxTwo_1x6Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x6_end:
+mulAvxTwo_1x6Xor_end:
 	RET
 
 // func mulAvxTwo_1x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -781,57 +1438,160 @@ TEXT ·mulAvxTwo_1x7(SB), NOSPLIT, $0-88
 	VPBROADCASTB X7, Y7
 
 mulAvxTwo_1x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y7, Y9, Y9
+	VPAND   Y7, Y10, Y10
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y4
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y5
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y6, Y6
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y6, Y8, Y6
+
+	// Store 7 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y6, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x7_loop
+	VZEROUPPER
+
+mulAvxTwo_1x7_end:
+	RET
+
+// func mulAvxTwo_1x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x7Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x7Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), BX
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, SI
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, BX
+
+	// Add start offset to input
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X7
+	VPBROADCASTB X7, Y7
 
+mulAvxTwo_1x7Xor_loop:
 	// Load and process 32 bytes from input 0 to 7 outputs
 	VMOVDQU (DX), Y10
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y10, Y11
 	VPAND   Y7, Y10, Y10
 	VPAND   Y7, Y11, Y11
+	VMOVDQU (SI), Y0
 	VMOVDQU (CX), Y8
 	VMOVDQU 32(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
 	VPXOR   Y8, Y9, Y8
 	VPXOR   Y8, Y0, Y0
+	VMOVDQU (DI), Y1
 	VMOVDQU 64(CX), Y8
 	VMOVDQU 96(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
 	VPXOR   Y8, Y9, Y8
 	VPXOR   Y8, Y1, Y1
+	VMOVDQU (R8), Y2
 	VMOVDQU 128(CX), Y8
 	VMOVDQU 160(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
 	VPXOR   Y8, Y9, Y8
 	VPXOR   Y8, Y2, Y2
+	VMOVDQU (R9), Y3
 	VMOVDQU 192(CX), Y8
 	VMOVDQU 224(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
 	VPXOR   Y8, Y9, Y8
 	VPXOR   Y8, Y3, Y3
+	VMOVDQU (R10), Y4
 	VMOVDQU 256(CX), Y8
 	VMOVDQU 288(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
 	VPXOR   Y8, Y9, Y8
 	VPXOR   Y8, Y4, Y4
+	VMOVDQU (R11), Y5
 	VMOVDQU 320(CX), Y8
 	VMOVDQU 352(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
 	VPXOR   Y8, Y9, Y8
 	VPXOR   Y8, Y5, Y5
+	VMOVDQU (BX), Y6
 	VMOVDQU 384(CX), Y8
 	VMOVDQU 416(CX), Y9
 	VPSHUFB Y10, Y8, Y8
@@ -857,10 +1617,10 @@ mulAvxTwo_1x7_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x7_loop
+	JNZ  mulAvxTwo_1x7Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x7_end:
+mulAvxTwo_1x7Xor_end:
 	RET
 
 // func mulAvxTwo_1x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -904,64 +1664,176 @@ TEXT ·mulAvxTwo_1x8(SB), NOSPLIT, $0-88
 	VPBROADCASTB X8, Y8
 
 mulAvxTwo_1x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y8, Y10, Y10
+	VPAND   Y8, Y11, Y11
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y0
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y1
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y2
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y3
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y4
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y5
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y6
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y7, Y7
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y7, Y9, Y7
+
+	// Store 8 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y6, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y7, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x8_loop
+	VZEROUPPER
+
+mulAvxTwo_1x8_end:
+	RET
+
+// func mulAvxTwo_1x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x8Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 29 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), R12
+	MOVQ  168(BX), BX
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, BX
+
+	// Add start offset to input
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X8
+	VPBROADCASTB X8, Y8
 
+mulAvxTwo_1x8Xor_loop:
 	// Load and process 32 bytes from input 0 to 8 outputs
 	VMOVDQU (DX), Y11
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y8, Y11, Y11
 	VPAND   Y8, Y12, Y12
+	VMOVDQU (SI), Y0
 	VMOVDQU (CX), Y9
 	VMOVDQU 32(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
 	VPXOR   Y9, Y10, Y9
 	VPXOR   Y9, Y0, Y0
+	VMOVDQU (DI), Y1
 	VMOVDQU 64(CX), Y9
 	VMOVDQU 96(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
 	VPXOR   Y9, Y10, Y9
 	VPXOR   Y9, Y1, Y1
+	VMOVDQU (R8), Y2
 	VMOVDQU 128(CX), Y9
 	VMOVDQU 160(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
 	VPXOR   Y9, Y10, Y9
 	VPXOR   Y9, Y2, Y2
+	VMOVDQU (R9), Y3
 	VMOVDQU 192(CX), Y9
 	VMOVDQU 224(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
 	VPXOR   Y9, Y10, Y9
 	VPXOR   Y9, Y3, Y3
+	VMOVDQU (R10), Y4
 	VMOVDQU 256(CX), Y9
 	VMOVDQU 288(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
 	VPXOR   Y9, Y10, Y9
 	VPXOR   Y9, Y4, Y4
+	VMOVDQU (R11), Y5
 	VMOVDQU 320(CX), Y9
 	VMOVDQU 352(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
 	VPXOR   Y9, Y10, Y9
 	VPXOR   Y9, Y5, Y5
+	VMOVDQU (R12), Y6
 	VMOVDQU 384(CX), Y9
 	VMOVDQU 416(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
 	VPXOR   Y9, Y10, Y9
 	VPXOR   Y9, Y6, Y6
+	VMOVDQU (BX), Y7
 	VMOVDQU 448(CX), Y9
 	VMOVDQU 480(CX), Y10
 	VPSHUFB Y11, Y9, Y9
@@ -989,10 +1861,10 @@ mulAvxTwo_1x8_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x8_loop
+	JNZ  mulAvxTwo_1x8Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x8_end:
+mulAvxTwo_1x8Xor_end:
 	RET
 
 // func mulAvxTwo_1x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -1038,77 +1910,57 @@ TEXT ·mulAvxTwo_1x9(SB), NOSPLIT, $0-88
 	VPBROADCASTB X9, Y9
 
 mulAvxTwo_1x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-
 	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (DX), Y12
+	VMOVDQU (DX), Y11
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y9, Y11, Y11
 	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
+	VPXOR   Y8, Y10, Y0
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
+	VPXOR   Y8, Y10, Y1
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
+	VPXOR   Y8, Y10, Y2
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
+	VPXOR   Y8, Y10, Y3
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
+	VPXOR   Y8, Y10, Y4
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
+	VPXOR   Y8, Y10, Y5
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
+	VPXOR   Y8, Y10, Y6
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 512(CX), Y10
-	VMOVDQU 544(CX), Y11
+	VPXOR   Y8, Y10, Y7
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y8, Y8
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPXOR   Y8, Y10, Y8
 
 	// Store 9 outputs
 	VMOVDQU Y0, (SI)
@@ -1138,17 +1990,17 @@ mulAvxTwo_1x9_loop:
 mulAvxTwo_1x9_end:
 	RET
 
-// func mulAvxTwo_1x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_1x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_1x10(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_1x9Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 35 YMM used
+	// Full registers estimated 32 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_1x10_end
+	JZ    mulAvxTwo_1x9Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), DX
 	MOVQ  out_base+48(FP), BX
@@ -1160,9 +2012,150 @@ TEXT ·mulAvxTwo_1x10(SB), NOSPLIT, $0-88
 	MOVQ  120(BX), R11
 	MOVQ  144(BX), R12
 	MOVQ  168(BX), R13
-	MOVQ  192(BX), R14
-	MOVQ  216(BX), BX
-	MOVQ  start+72(FP), R15
+	MOVQ  192(BX), BX
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, SI
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, BX
+
+	// Add start offset to input
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_1x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (SI), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU (DI), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU (R8), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU (R9), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU (R10), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU (R11), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU (R12), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU (R13), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU (BX), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Store 9 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y6, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y7, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y8, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_1x9Xor_end:
+	RET
+
+// func mulAvxTwo_1x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x10(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x10_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), R12
+	MOVQ  168(BX), R13
+	MOVQ  192(BX), R14
+	MOVQ  216(BX), BX
+	MOVQ  start+72(FP), R15
 
 	// Add start offset to output
 	ADDQ R15, SI
@@ -1183,78 +2176,208 @@ TEXT ·mulAvxTwo_1x10(SB), NOSPLIT, $0-88
 	VPBROADCASTB X10, Y10
 
 mulAvxTwo_1x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y10, Y12, Y12
+	VPAND   Y10, Y13, Y13
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y7
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y8
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y9, Y9
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y9, Y11, Y9
+
+	// Store 10 outputs
+	VMOVDQU Y0, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y4, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y5, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y6, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y7, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y8, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y9, (BX)
+	ADDQ    $0x20, BX
 
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_1x10_loop
+	VZEROUPPER
+
+mulAvxTwo_1x10_end:
+	RET
+
+// func mulAvxTwo_1x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_1x10Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_1x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), DX
+	MOVQ  out_base+48(FP), BX
+	MOVQ  (BX), SI
+	MOVQ  24(BX), DI
+	MOVQ  48(BX), R8
+	MOVQ  72(BX), R9
+	MOVQ  96(BX), R10
+	MOVQ  120(BX), R11
+	MOVQ  144(BX), R12
+	MOVQ  168(BX), R13
+	MOVQ  192(BX), R14
+	MOVQ  216(BX), BX
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, SI
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, BX
+
+	// Add start offset to input
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_1x10Xor_loop:
 	// Load and process 32 bytes from input 0 to 10 outputs
 	VMOVDQU (DX), Y13
 	ADDQ    $0x20, DX
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y10, Y13, Y13
 	VPAND   Y10, Y14, Y14
+	VMOVDQU (SI), Y0
 	VMOVDQU (CX), Y11
 	VMOVDQU 32(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y0, Y0
+	VMOVDQU (DI), Y1
 	VMOVDQU 64(CX), Y11
 	VMOVDQU 96(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y1, Y1
+	VMOVDQU (R8), Y2
 	VMOVDQU 128(CX), Y11
 	VMOVDQU 160(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y2, Y2
+	VMOVDQU (R9), Y3
 	VMOVDQU 192(CX), Y11
 	VMOVDQU 224(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y3, Y3
+	VMOVDQU (R10), Y4
 	VMOVDQU 256(CX), Y11
 	VMOVDQU 288(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y4, Y4
+	VMOVDQU (R11), Y5
 	VMOVDQU 320(CX), Y11
 	VMOVDQU 352(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y5, Y5
+	VMOVDQU (R12), Y6
 	VMOVDQU 384(CX), Y11
 	VMOVDQU 416(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y6, Y6
+	VMOVDQU (R13), Y7
 	VMOVDQU 448(CX), Y11
 	VMOVDQU 480(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y7, Y7
+	VMOVDQU (R14), Y8
 	VMOVDQU 512(CX), Y11
 	VMOVDQU 544(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y8, Y8
+	VMOVDQU (BX), Y9
 	VMOVDQU 576(CX), Y11
 	VMOVDQU 608(CX), Y12
 	VPSHUFB Y13, Y11, Y11
@@ -1286,10 +2409,10 @@ mulAvxTwo_1x10_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_1x10_loop
+	JNZ  mulAvxTwo_1x10Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_1x10_end:
+mulAvxTwo_1x10Xor_end:
 	RET
 
 // func mulAvxTwo_2x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -1325,9 +2448,6 @@ TEXT ·mulAvxTwo_2x1(SB), NOSPLIT, $0-88
 	VPBROADCASTB X5, Y5
 
 mulAvxTwo_2x1_loop:
-	// Clear 1 outputs
-	VPXOR Y4, Y4, Y4
-
 	// Load and process 32 bytes from input 0 to 1 outputs
 	VMOVDQU (DX), Y6
 	ADDQ    $0x20, DX
@@ -1336,8 +2456,7 @@ mulAvxTwo_2x1_loop:
 	VPAND   Y5, Y7, Y7
 	VPSHUFB Y6, Y0, Y6
 	VPSHUFB Y7, Y1, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPXOR   Y6, Y7, Y4
 
 	// Load and process 32 bytes from input 1 to 1 outputs
 	VMOVDQU (CX), Y6
@@ -1365,91 +2484,243 @@ mulAvxTwo_2x1_end:
 // func mulAvxTwo_2x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_2x1_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 8 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_2x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), AX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  start+72(FP), SI
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x06, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_2x1_64_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), CX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    (BX), BX
+	MOVQ    start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
 
 	// Add start offset to input
 	ADDQ         SI, DX
-	ADDQ         SI, AX
-	MOVQ         $0x0000000f, DI
-	MOVQ         DI, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), DI
-	SHRQ         $0x06, DI
+	ADDQ         SI, CX
+	MOVQ         $0x0000000f, SI
+	MOVQ         SI, X6
+	VPBROADCASTB X6, Y6
 
 mulAvxTwo_2x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
 	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
+	VMOVDQU (DX), Y7
+	VMOVDQU 32(DX), Y9
 	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y7, Y7
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y10, Y10
+	VPSHUFB Y7, Y0, Y7
+	VPSHUFB Y9, Y0, Y9
+	VPSHUFB Y8, Y1, Y8
+	VPSHUFB Y10, Y1, Y10
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y9
+	ADDQ    $0x40, CX
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y7, Y7
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y10, Y10
+	VPSHUFB Y7, Y2, Y7
+	VPSHUFB Y9, Y2, Y9
+	VPSHUFB Y8, Y3, Y8
+	VPSHUFB Y10, Y3, Y10
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
 
 	// Store 1 outputs
-	MOVQ    (BX), R8
-	VMOVDQU Y0, (R8)(SI*1)
-	VMOVDQU Y1, 32(R8)(SI*1)
+	VMOVDQU Y4, (BX)
+	VMOVDQU Y5, 32(BX)
+	ADDQ    $0x40, BX
 
 	// Prepare for next loop
-	ADDQ $0x40, SI
-	DECQ DI
+	DECQ AX
 	JNZ  mulAvxTwo_2x1_64_loop
 	VZEROUPPER
 
 mulAvxTwo_2x1_64_end:
 	RET
 
+// func mulAvxTwo_2x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 8 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_2x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), CX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    (BX), BX
+	MOVQ    start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
+
+	// Add start offset to input
+	ADDQ         SI, DX
+	ADDQ         SI, CX
+	MOVQ         $0x0000000f, SI
+	MOVQ         SI, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_2x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y5, Y6, Y6
+	VPAND   Y5, Y7, Y7
+	VMOVDQU (BX), Y4
+	VPSHUFB Y6, Y0, Y6
+	VPSHUFB Y7, Y1, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (CX), Y6
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y5, Y6, Y6
+	VPAND   Y5, Y7, Y7
+	VPSHUFB Y6, Y2, Y6
+	VPSHUFB Y7, Y3, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Store 1 outputs
+	VMOVDQU Y4, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x1Xor_end:
+	RET
+
+// func mulAvxTwo_2x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x1_64Xor(SB), $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x06, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_2x1_64Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), CX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    (BX), BX
+	MOVQ    start+72(FP), SI
+
+	// Add start offset to output
+	ADDQ SI, BX
+
+	// Add start offset to input
+	ADDQ         SI, DX
+	ADDQ         SI, CX
+	MOVQ         $0x0000000f, SI
+	MOVQ         SI, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_2x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (BX), Y4
+	VMOVDQU 32(BX), Y5
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y7
+	VMOVDQU 32(DX), Y9
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y7, Y7
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y10, Y10
+	VPSHUFB Y7, Y0, Y7
+	VPSHUFB Y9, Y0, Y9
+	VPSHUFB Y8, Y1, Y8
+	VPSHUFB Y10, Y1, Y10
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y9
+	ADDQ    $0x40, CX
+	VPSRLQ  $0x04, Y7, Y8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y7, Y7
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y8, Y8
+	VPAND   Y6, Y10, Y10
+	VPSHUFB Y7, Y2, Y7
+	VPSHUFB Y9, Y2, Y9
+	VPSHUFB Y8, Y3, Y8
+	VPSHUFB Y10, Y3, Y10
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Store 1 outputs
+	VMOVDQU Y4, (BX)
+	VMOVDQU Y5, 32(BX)
+	ADDQ    $0x40, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x1_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x1_64Xor_end:
+	RET
+
 // func mulAvxTwo_2x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_2x2(SB), NOSPLIT, $0-88
@@ -1489,10 +2760,6 @@ TEXT ·mulAvxTwo_2x2(SB), NOSPLIT, $0-88
 	VPBROADCASTB X10, Y10
 
 mulAvxTwo_2x2_loop:
-	// Clear 2 outputs
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
-
 	// Load and process 32 bytes from input 0 to 2 outputs
 	VMOVDQU (DX), Y13
 	ADDQ    $0x20, DX
@@ -1501,12 +2768,10 @@ mulAvxTwo_2x2_loop:
 	VPAND   Y10, Y14, Y14
 	VPSHUFB Y13, Y0, Y11
 	VPSHUFB Y14, Y1, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	VPXOR   Y11, Y12, Y8
 	VPSHUFB Y13, Y2, Y11
 	VPSHUFB Y14, Y3, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPXOR   Y11, Y12, Y9
 
 	// Load and process 32 bytes from input 1 to 2 outputs
 	VMOVDQU (CX), Y13
@@ -1541,40 +2806,38 @@ mulAvxTwo_2x2_end:
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_2x2_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 15 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 25 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_2x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), AX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  start+72(FP), SI
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), SI
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+	ADDQ R8, SI
 
 	// Add start offset to input
-	ADDQ         SI, DX
-	ADDQ         SI, AX
-	MOVQ         $0x0000000f, DI
-	MOVQ         DI, X4
+	ADDQ         R8, BX
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), DI
-	SHRQ         $0x06, DI
 
 mulAvxTwo_2x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -1587,10 +2850,207 @@ mulAvxTwo_2x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Store 2 outputs
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y2, (SI)
+	VMOVDQU Y3, 32(SI)
+	ADDQ    $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x2_64_loop
+	VZEROUPPER
+
+mulAvxTwo_2x2_64_end:
+	RET
+
+// func mulAvxTwo_2x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x2Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 15 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_2x2Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), CX
+	MOVQ    out_base+48(FP), BX
+	MOVQ    (BX), SI
+	MOVQ    24(BX), BX
+	MOVQ    start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+	ADDQ DI, BX
+
+	// Add start offset to input
+	ADDQ         DI, DX
+	ADDQ         DI, CX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_2x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (SI), Y8
+	VPSHUFB Y13, Y0, Y11
+	VPSHUFB Y14, Y1, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU (BX), Y9
+	VPSHUFB Y13, Y2, Y11
+	VPSHUFB Y14, Y3, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (CX), Y13
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VPSHUFB Y13, Y4, Y11
+	VPSHUFB Y14, Y5, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VPSHUFB Y13, Y6, Y11
+	VPSHUFB Y14, Y7, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
+
+	// Store 2 outputs
+	VMOVDQU Y8, (SI)
+	ADDQ    $0x20, SI
+	VMOVDQU Y9, (BX)
+	ADDQ    $0x20, BX
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x2Xor_end:
+	RET
+
+// func mulAvxTwo_2x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 25 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), SI
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+	ADDQ R8, SI
+
+	// Add start offset to input
+	ADDQ         R8, BX
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_2x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (DI), Y0
+	VMOVDQU 32(DI), Y1
+	VMOVDQU (SI), Y2
+	VMOVDQU 32(SI), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y11, Y5, Y7
@@ -1603,9 +3063,9 @@ mulAvxTwo_2x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -1634,20 +3094,19 @@ mulAvxTwo_2x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Store 2 outputs
-	MOVQ    (BX), R8
-	VMOVDQU Y0, (R8)(SI*1)
-	VMOVDQU Y1, 32(R8)(SI*1)
-	MOVQ    24(BX), R8
-	VMOVDQU Y2, (R8)(SI*1)
-	VMOVDQU Y3, 32(R8)(SI*1)
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y2, (SI)
+	VMOVDQU Y3, 32(SI)
+	ADDQ    $0x40, SI
 
 	// Prepare for next loop
-	ADDQ $0x40, SI
-	DECQ DI
-	JNZ  mulAvxTwo_2x2_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x2_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_2x2_64_end:
+mulAvxTwo_2x2_64Xor_end:
 	RET
 
 // func mulAvxTwo_2x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -1683,11 +3142,6 @@ TEXT ·mulAvxTwo_2x3(SB), NOSPLIT, $0-88
 	VPBROADCASTB X3, Y3
 
 mulAvxTwo_2x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-
 	// Load and process 32 bytes from input 0 to 3 outputs
 	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
@@ -1698,20 +3152,17 @@ mulAvxTwo_2x3_loop:
 	VMOVDQU 32(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	VPXOR   Y4, Y5, Y0
 	VMOVDQU 64(CX), Y4
 	VMOVDQU 96(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	VPXOR   Y4, Y5, Y1
 	VMOVDQU 128(CX), Y4
 	VMOVDQU 160(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y4, Y5, Y2
 
 	// Load and process 32 bytes from input 1 to 3 outputs
 	VMOVDQU (DX), Y6
@@ -1758,39 +3209,72 @@ mulAvxTwo_2x3_end:
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_2x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 20 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_2x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), AX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  out_base+48(FP), BX
-	MOVQ  start+72(FP), SI
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), SI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, SI
 
 	// Add start offset to input
-	ADDQ         SI, DX
-	ADDQ         SI, AX
-	MOVQ         $0x0000000f, DI
-	MOVQ         DI, X6
+	ADDQ         R9, BX
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), DI
-	SHRQ         $0x06, DI
 
 mulAvxTwo_2x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
+
+	// Load and process 64 bytes from input 1 to 3 outputs
 	VMOVDQU (DX), Y11
 	VMOVDQU 32(DX), Y13
 	ADDQ    $0x40, DX
@@ -1800,6 +3284,210 @@ mulAvxTwo_2x3_64_loop:
 	VPAND   Y6, Y13, Y13
 	VPAND   Y6, Y12, Y12
 	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Store 3 outputs
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y2, (R8)
+	VMOVDQU Y3, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y4, (SI)
+	VMOVDQU Y5, 32(SI)
+	ADDQ    $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x3_64_loop
+	VZEROUPPER
+
+mulAvxTwo_2x3_64_end:
+	RET
+
+// func mulAvxTwo_2x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x3Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 20 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x3Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), SI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, SI
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_2x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU (SI), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Store 3 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x3Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x3Xor_end:
+	RET
+
+// func mulAvxTwo_2x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x3_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 34 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), SI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, DI
+	ADDQ R9, R8
+	ADDQ R9, SI
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_2x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (DI), Y0
+	VMOVDQU 32(DI), Y1
+	VMOVDQU (R8), Y2
+	VMOVDQU 32(R8), Y3
+	VMOVDQU (SI), Y4
+	VMOVDQU 32(SI), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU (CX), Y7
 	VMOVDQU 32(CX), Y8
 	VPSHUFB Y13, Y7, Y9
@@ -1832,9 +3520,9 @@ mulAvxTwo_2x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -1873,23 +3561,22 @@ mulAvxTwo_2x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Store 3 outputs
-	MOVQ    (BX), R8
-	VMOVDQU Y0, (R8)(SI*1)
-	VMOVDQU Y1, 32(R8)(SI*1)
-	MOVQ    24(BX), R8
-	VMOVDQU Y2, (R8)(SI*1)
-	VMOVDQU Y3, 32(R8)(SI*1)
-	MOVQ    48(BX), R8
-	VMOVDQU Y4, (R8)(SI*1)
-	VMOVDQU Y5, 32(R8)(SI*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, SI
-	DECQ DI
-	JNZ  mulAvxTwo_2x3_64_loop
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+	VMOVDQU Y2, (R8)
+	VMOVDQU Y3, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y4, (SI)
+	VMOVDQU Y5, 32(SI)
+	ADDQ    $0x40, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x3_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_2x3_64_end:
+mulAvxTwo_2x3_64Xor_end:
 	RET
 
 // func mulAvxTwo_2x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -1927,12 +3614,6 @@ TEXT ·mulAvxTwo_2x4(SB), NOSPLIT, $0-88
 	VPBROADCASTB X4, Y4
 
 mulAvxTwo_2x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 32 bytes from input 0 to 4 outputs
 	VMOVDQU (BX), Y7
 	ADDQ    $0x20, BX
@@ -1943,26 +3624,22 @@ mulAvxTwo_2x4_loop:
 	VMOVDQU 32(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	VPXOR   Y5, Y6, Y0
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y5, Y6, Y1
 	VMOVDQU 128(CX), Y5
 	VMOVDQU 160(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	VPXOR   Y5, Y6, Y2
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPXOR   Y5, Y6, Y3
 
 	// Load and process 32 bytes from input 1 to 4 outputs
 	VMOVDQU (DX), Y7
@@ -2013,17 +3690,17 @@ mulAvxTwo_2x4_loop:
 mulAvxTwo_2x4_end:
 	RET
 
-// func mulAvxTwo_2x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_2x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_2x5(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_2x4Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 30 YMM used
+	// Full registers estimated 25 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_2x5_end
+	JZ    mulAvxTwo_2x4Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), DX
@@ -2031,62 +3708,305 @@ TEXT ·mulAvxTwo_2x5(SB), NOSPLIT, $0-88
 	MOVQ  (SI), DI
 	MOVQ  24(SI), R8
 	MOVQ  48(SI), R9
-	MOVQ  72(SI), R10
-	MOVQ  96(SI), SI
-	MOVQ  start+72(FP), R11
+	MOVQ  72(SI), SI
+	MOVQ  start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R11, DI
-	ADDQ R11, R8
-	ADDQ R11, R9
-	ADDQ R11, R10
-	ADDQ R11, SI
+	ADDQ R10, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, SI
 
 	// Add start offset to input
-	ADDQ         R11, BX
-	ADDQ         R11, DX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X5
-	VPBROADCASTB X5, Y5
-
-mulAvxTwo_2x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
+	ADDQ         R10, BX
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X4
+	VPBROADCASTB X4, Y4
 
-	// Load and process 32 bytes from input 0 to 5 outputs
-	VMOVDQU (BX), Y8
+mulAvxTwo_2x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU (CX), Y6
-	VMOVDQU 32(CX), Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 64(CX), Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU (SI), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Store 4 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x4Xor_end:
+	RET
+
+// func mulAvxTwo_2x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x5(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 30 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x5_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), SI
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, SI
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_2x5_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y4
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Store 5 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x5_loop
+	VZEROUPPER
+
+mulAvxTwo_2x5_end:
+	RET
+
+// func mulAvxTwo_2x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x5Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 30 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), SI
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, DI
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, SI
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_2x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y6
 	VMOVDQU 96(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
 	VPXOR   Y6, Y7, Y6
 	VPXOR   Y6, Y1, Y1
+	VMOVDQU (R9), Y2
 	VMOVDQU 128(CX), Y6
 	VMOVDQU 160(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
 	VPXOR   Y6, Y7, Y6
 	VPXOR   Y6, Y2, Y2
+	VMOVDQU (R10), Y3
 	VMOVDQU 192(CX), Y6
 	VMOVDQU 224(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
 	VPXOR   Y6, Y7, Y6
 	VPXOR   Y6, Y3, Y3
+	VMOVDQU (SI), Y4
 	VMOVDQU 256(CX), Y6
 	VMOVDQU 288(CX), Y7
 	VPSHUFB Y8, Y6, Y6
@@ -2145,10 +4065,10 @@ mulAvxTwo_2x5_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_2x5_loop
+	JNZ  mulAvxTwo_2x5Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_2x5_end:
+mulAvxTwo_2x5Xor_end:
 	RET
 
 // func mulAvxTwo_2x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -2190,50 +4110,189 @@ TEXT ·mulAvxTwo_2x6(SB), NOSPLIT, $0-88
 	VPBROADCASTB X6, Y6
 
 mulAvxTwo_2x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y3
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y5
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Store 6 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x6_loop
+	VZEROUPPER
+
+mulAvxTwo_2x6_end:
+	RET
+
+// func mulAvxTwo_2x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x6Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 35 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), SI
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, SI
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X6
+	VPBROADCASTB X6, Y6
 
+mulAvxTwo_2x6Xor_loop:
 	// Load and process 32 bytes from input 0 to 6 outputs
 	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y9, Y10
 	VPAND   Y6, Y9, Y9
 	VPAND   Y6, Y10, Y10
+	VMOVDQU (DI), Y0
 	VMOVDQU (CX), Y7
 	VMOVDQU 32(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y0, Y0
+	VMOVDQU (R8), Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y1, Y1
+	VMOVDQU (R9), Y2
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y2, Y2
+	VMOVDQU (R10), Y3
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y3, Y3
+	VMOVDQU (R11), Y4
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y4, Y4
+	VMOVDQU (SI), Y5
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y9, Y7, Y7
@@ -2300,10 +4359,10 @@ mulAvxTwo_2x6_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_2x6_loop
+	JNZ  mulAvxTwo_2x6Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_2x6_end:
+mulAvxTwo_2x6Xor_end:
 	RET
 
 // func mulAvxTwo_2x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -2347,15 +4406,6 @@ TEXT ·mulAvxTwo_2x7(SB), NOSPLIT, $0-88
 	VPBROADCASTB X7, Y7
 
 mulAvxTwo_2x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-
 	// Load and process 32 bytes from input 0 to 7 outputs
 	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
@@ -2366,44 +4416,37 @@ mulAvxTwo_2x7_loop:
 	VMOVDQU 32(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	VPXOR   Y8, Y9, Y0
 	VMOVDQU 64(CX), Y8
 	VMOVDQU 96(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	VPXOR   Y8, Y9, Y1
 	VMOVDQU 128(CX), Y8
 	VMOVDQU 160(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	VPXOR   Y8, Y9, Y2
 	VMOVDQU 192(CX), Y8
 	VMOVDQU 224(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	VPXOR   Y8, Y9, Y3
 	VMOVDQU 256(CX), Y8
 	VMOVDQU 288(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	VPXOR   Y8, Y9, Y4
 	VMOVDQU 320(CX), Y8
 	VMOVDQU 352(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	VPXOR   Y8, Y9, Y5
 	VMOVDQU 384(CX), Y8
 	VMOVDQU 416(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPXOR   Y8, Y9, Y6
 
 	// Load and process 32 bytes from input 1 to 7 outputs
 	VMOVDQU (DX), Y10
@@ -2478,17 +4521,17 @@ mulAvxTwo_2x7_loop:
 mulAvxTwo_2x7_end:
 	RET
 
-// func mulAvxTwo_2x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_2x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_2x8(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_2x7Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 45 YMM used
+	// Full registers estimated 40 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_2x8_end
+	JZ    mulAvxTwo_2x7Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), DX
@@ -2499,93 +4542,245 @@ TEXT ·mulAvxTwo_2x8(SB), NOSPLIT, $0-88
 	MOVQ  72(SI), R10
 	MOVQ  96(SI), R11
 	MOVQ  120(SI), R12
-	MOVQ  144(SI), R13
-	MOVQ  168(SI), SI
-	MOVQ  start+72(FP), R14
+	MOVQ  144(SI), SI
+	MOVQ  start+72(FP), R13
 
 	// Add start offset to output
-	ADDQ R14, DI
-	ADDQ R14, R8
-	ADDQ R14, R9
-	ADDQ R14, R10
-	ADDQ R14, R11
-	ADDQ R14, R12
-	ADDQ R14, R13
-	ADDQ R14, SI
+	ADDQ R13, DI
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, SI
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X8
-	VPBROADCASTB X8, Y8
-
-mulAvxTwo_2x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	ADDQ         R13, BX
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X7
+	VPBROADCASTB X7, Y7
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (BX), Y11
+mulAvxTwo_2x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU (R12), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU (SI), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
-
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Store 7 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x7Xor_end:
+	RET
+
+// func mulAvxTwo_2x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x8(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 45 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x8_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), SI
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, SI
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_2x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
+
 	// Load and process 32 bytes from input 1 to 8 outputs
 	VMOVDQU (DX), Y11
 	ADDQ    $0x20, DX
@@ -2667,6 +4862,193 @@ mulAvxTwo_2x8_loop:
 mulAvxTwo_2x8_end:
 	RET
 
+// func mulAvxTwo_2x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x8Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 45 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), SI
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, SI
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_2x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU (R12), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU (R13), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU (SI), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Store 8 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y7, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_2x8Xor_end:
+	RET
+
 // func mulAvxTwo_2x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_2x9(SB), NOSPLIT, $0-88
@@ -2712,71 +5094,255 @@ TEXT ·mulAvxTwo_2x9(SB), NOSPLIT, $0-88
 	VPBROADCASTB X9, Y9
 
 mulAvxTwo_2x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y2
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y3
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y4
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y5
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y6
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y7
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y8
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Store 9 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y7, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y8, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x9_loop
+	VZEROUPPER
+
+mulAvxTwo_2x9_end:
+	RET
+
+// func mulAvxTwo_2x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x9Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x9Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), R14
+	MOVQ  192(SI), SI
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, DI
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, SI
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X9
+	VPBROADCASTB X9, Y9
 
+mulAvxTwo_2x9Xor_loop:
 	// Load and process 32 bytes from input 0 to 9 outputs
 	VMOVDQU (BX), Y12
 	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y12, Y13
 	VPAND   Y9, Y12, Y12
 	VPAND   Y9, Y13, Y13
+	VMOVDQU (DI), Y0
 	VMOVDQU (CX), Y10
 	VMOVDQU 32(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y0, Y0
+	VMOVDQU (R8), Y1
 	VMOVDQU 64(CX), Y10
 	VMOVDQU 96(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y1, Y1
+	VMOVDQU (R9), Y2
 	VMOVDQU 128(CX), Y10
 	VMOVDQU 160(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y2, Y2
+	VMOVDQU (R10), Y3
 	VMOVDQU 192(CX), Y10
 	VMOVDQU 224(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y3, Y3
+	VMOVDQU (R11), Y4
 	VMOVDQU 256(CX), Y10
 	VMOVDQU 288(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y4, Y4
+	VMOVDQU (R12), Y5
 	VMOVDQU 320(CX), Y10
 	VMOVDQU 352(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y5, Y5
+	VMOVDQU (R13), Y6
 	VMOVDQU 384(CX), Y10
 	VMOVDQU 416(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y6, Y6
+	VMOVDQU (R14), Y7
 	VMOVDQU 448(CX), Y10
 	VMOVDQU 480(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
 	VPXOR   Y10, Y11, Y10
 	VPXOR   Y10, Y7, Y7
+	VMOVDQU (SI), Y8
 	VMOVDQU 512(CX), Y10
 	VMOVDQU 544(CX), Y11
 	VPSHUFB Y12, Y10, Y10
@@ -2867,10 +5433,10 @@ mulAvxTwo_2x9_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_2x9_loop
+	JNZ  mulAvxTwo_2x9Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_2x9_end:
+mulAvxTwo_2x9Xor_end:
 	RET
 
 // func mulAvxTwo_2x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -2920,18 +5486,6 @@ TEXT ·mulAvxTwo_2x10(SB), NOSPLIT, $8-88
 	VPBROADCASTB X10, Y10
 
 mulAvxTwo_2x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
-
 	// Load and process 32 bytes from input 0 to 10 outputs
 	VMOVDQU (BX), Y13
 	ADDQ    $0x20, BX
@@ -2942,56 +5496,267 @@ mulAvxTwo_2x10_loop:
 	VMOVDQU 32(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
+
+	// Store 10 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y7, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y8, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y9, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_2x10_loop
+	VZEROUPPER
+
+mulAvxTwo_2x10_end:
+	RET
+
+// func mulAvxTwo_2x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_2x10Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 55 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_2x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), DX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), R14
+	MOVQ  192(SI), R15
+	MOVQ  216(SI), SI
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, SI
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_2x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU (R8), Y1
 	VMOVDQU 64(CX), Y11
 	VMOVDQU 96(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y1, Y1
+	VMOVDQU (R9), Y2
 	VMOVDQU 128(CX), Y11
 	VMOVDQU 160(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y2, Y2
+	VMOVDQU (R10), Y3
 	VMOVDQU 192(CX), Y11
 	VMOVDQU 224(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y3, Y3
+	VMOVDQU (R11), Y4
 	VMOVDQU 256(CX), Y11
 	VMOVDQU 288(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y4, Y4
+	VMOVDQU (R12), Y5
 	VMOVDQU 320(CX), Y11
 	VMOVDQU 352(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y5, Y5
+	VMOVDQU (R13), Y6
 	VMOVDQU 384(CX), Y11
 	VMOVDQU 416(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y6, Y6
+	VMOVDQU (R14), Y7
 	VMOVDQU 448(CX), Y11
 	VMOVDQU 480(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y7, Y7
+	VMOVDQU (R15), Y8
 	VMOVDQU 512(CX), Y11
 	VMOVDQU 544(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
 	VPXOR   Y11, Y8, Y8
+	VMOVDQU (SI), Y9
 	VMOVDQU 576(CX), Y11
 	VMOVDQU 608(CX), Y12
 	VPSHUFB Y13, Y11, Y11
@@ -3090,10 +5855,10 @@ mulAvxTwo_2x10_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_2x10_loop
+	JNZ  mulAvxTwo_2x10Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_2x10_end:
+mulAvxTwo_2x10Xor_end:
 	RET
 
 // func mulAvxTwo_3x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -3133,9 +5898,6 @@ TEXT ·mulAvxTwo_3x1(SB), NOSPLIT, $0-88
 	VPBROADCASTB X7, Y7
 
 mulAvxTwo_3x1_loop:
-	// Clear 1 outputs
-	VPXOR Y6, Y6, Y6
-
 	// Load and process 32 bytes from input 0 to 1 outputs
 	VMOVDQU (DX), Y8
 	ADDQ    $0x20, DX
@@ -3144,8 +5906,7 @@ mulAvxTwo_3x1_loop:
 	VPAND   Y7, Y9, Y9
 	VPSHUFB Y8, Y0, Y8
 	VPSHUFB Y9, Y1, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPXOR   Y8, Y9, Y6
 
 	// Load and process 32 bytes from input 1 to 1 outputs
 	VMOVDQU (BX), Y8
@@ -3185,37 +5946,75 @@ mulAvxTwo_3x1_end:
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_3x1_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 10 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_3x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), AX
-	MOVQ  out_base+48(FP), SI
-	MOVQ  out_base+48(FP), SI
-	MOVQ  start+72(FP), DI
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), DI
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
 
 	// Add start offset to input
-	ADDQ         DI, DX
-	ADDQ         DI, BX
-	ADDQ         DI, AX
+	ADDQ         R8, BX
+	ADDQ         R8, SI
+	ADDQ         R8, DX
 	MOVQ         $0x0000000f, R8
 	MOVQ         R8, X2
 	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R8
-	SHRQ         $0x06, R8
 
 mulAvxTwo_3x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
 	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Load and process 64 bytes from input 2 to 1 outputs
 	VMOVDQU (DX), Y6
 	VMOVDQU 32(DX), Y5
 	ADDQ    $0x40, DX
@@ -3225,8 +6024,8 @@ mulAvxTwo_3x1_64_loop:
 	VPAND   Y2, Y5, Y5
 	VPAND   Y2, Y7, Y7
 	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
 	VPSHUFB Y5, Y3, Y5
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
@@ -3236,7 +6035,139 @@ mulAvxTwo_3x1_64_loop:
 	VPXOR   Y3, Y0, Y0
 	VPXOR   Y5, Y1, Y1
 
-	// Load and process 64 bytes from input 1 to 1 outputs
+	// Store 1 outputs
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x1_64_loop
+	VZEROUPPER
+
+mulAvxTwo_3x1_64_end:
+	RET
+
+// func mulAvxTwo_3x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 10 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_3x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), CX
+	MOVQ    out_base+48(FP), SI
+	MOVQ    (SI), SI
+	MOVQ    start+72(FP), DI
+
+	// Add start offset to output
+	ADDQ DI, SI
+
+	// Add start offset to input
+	ADDQ         DI, DX
+	ADDQ         DI, BX
+	ADDQ         DI, CX
+	MOVQ         $0x0000000f, DI
+	MOVQ         DI, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_3x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y7, Y8, Y8
+	VPAND   Y7, Y9, Y9
+	VMOVDQU (SI), Y6
+	VPSHUFB Y8, Y0, Y8
+	VPSHUFB Y9, Y1, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y7, Y8, Y8
+	VPAND   Y7, Y9, Y9
+	VPSHUFB Y8, Y2, Y8
+	VPSHUFB Y9, Y3, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (CX), Y8
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y7, Y8, Y8
+	VPAND   Y7, Y9, Y9
+	VPSHUFB Y8, Y4, Y8
+	VPSHUFB Y9, Y5, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Store 1 outputs
+	VMOVDQU Y6, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x1Xor_end:
+	RET
+
+// func mulAvxTwo_3x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x1_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 18 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x1_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), DI
+	MOVQ  start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+
+	// Add start offset to input
+	ADDQ         R8, BX
+	ADDQ         R8, SI
+	ADDQ         R8, DX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_3x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (DI), Y0
+	VMOVDQU 32(DI), Y1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
 	VMOVDQU (BX), Y6
 	VMOVDQU 32(BX), Y5
 	ADDQ    $0x40, BX
@@ -3246,6 +6177,27 @@ mulAvxTwo_3x1_64_loop:
 	VPAND   Y2, Y5, Y5
 	VPAND   Y2, Y7, Y7
 	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU 64(CX), Y3
 	VMOVDQU 96(CX), Y4
 	VPSHUFB Y5, Y3, Y5
@@ -3258,9 +6210,9 @@ mulAvxTwo_3x1_64_loop:
 	VPXOR   Y5, Y1, Y1
 
 	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -3279,17 +6231,16 @@ mulAvxTwo_3x1_64_loop:
 	VPXOR   Y5, Y1, Y1
 
 	// Store 1 outputs
-	MOVQ    (SI), R9
-	VMOVDQU Y0, (R9)(DI*1)
-	VMOVDQU Y1, 32(R9)(DI*1)
+	VMOVDQU Y0, (DI)
+	VMOVDQU Y1, 32(DI)
+	ADDQ    $0x40, DI
 
 	// Prepare for next loop
-	ADDQ $0x40, DI
-	DECQ R8
-	JNZ  mulAvxTwo_3x1_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x1_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_3x1_64_end:
+mulAvxTwo_3x1_64Xor_end:
 	RET
 
 // func mulAvxTwo_3x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -3325,10 +6276,6 @@ TEXT ·mulAvxTwo_3x2(SB), NOSPLIT, $0-88
 	VPBROADCASTB X2, Y2
 
 mulAvxTwo_3x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-
 	// Load and process 32 bytes from input 0 to 2 outputs
 	VMOVDQU (BX), Y5
 	ADDQ    $0x20, BX
@@ -3339,14 +6286,12 @@ mulAvxTwo_3x2_loop:
 	VMOVDQU 32(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	VPXOR   Y3, Y4, Y0
 	VMOVDQU 64(CX), Y3
 	VMOVDQU 96(CX), Y4
 	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPXOR   Y3, Y4, Y1
 
 	// Load and process 32 bytes from input 1 to 2 outputs
 	VMOVDQU (SI), Y5
@@ -3404,42 +6349,40 @@ mulAvxTwo_3x2_end:
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_3x2_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 19 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 33 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_3x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), AX
-	MOVQ  out_base+48(FP), SI
-	MOVQ  out_base+48(FP), SI
-	MOVQ  start+72(FP), DI
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), DI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+	ADDQ R9, DI
 
 	// Add start offset to input
-	ADDQ         DI, DX
-	ADDQ         DI, BX
-	ADDQ         DI, AX
-	MOVQ         $0x0000000f, R8
-	MOVQ         R8, X4
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R8
-	SHRQ         $0x06, R8
 
 mulAvxTwo_3x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -3452,25 +6395,21 @@ mulAvxTwo_3x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
 
 	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -3499,9 +6438,9 @@ mulAvxTwo_3x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -3530,62 +6469,311 @@ mulAvxTwo_3x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Store 2 outputs
-	MOVQ    (SI), R9
-	VMOVDQU Y0, (R9)(DI*1)
-	VMOVDQU Y1, 32(R9)(DI*1)
-	MOVQ    24(SI), R9
-	VMOVDQU Y2, (R9)(DI*1)
-	VMOVDQU Y3, 32(R9)(DI*1)
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
 
 	// Prepare for next loop
-	ADDQ $0x40, DI
-	DECQ R8
+	DECQ AX
 	JNZ  mulAvxTwo_3x2_64_loop
 	VZEROUPPER
 
 mulAvxTwo_3x2_64_end:
 	RET
 
-// func mulAvxTwo_3x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_3x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_3x3(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_3x2Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 26 YMM used
+	// Full registers estimated 19 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x3_end
+	JZ    mulAvxTwo_3x2Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DX
 	MOVQ  out_base+48(FP), DI
 	MOVQ  (DI), R8
-	MOVQ  24(DI), R9
-	MOVQ  48(DI), DI
-	MOVQ  start+72(FP), R10
+	MOVQ  24(DI), DI
+	MOVQ  start+72(FP), R9
 
 	// Add start offset to output
-	ADDQ R10, R8
-	ADDQ R10, R9
-	ADDQ R10, DI
+	ADDQ R9, R8
+	ADDQ R9, DI
 
 	// Add start offset to input
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X3
-	VPBROADCASTB X3, Y3
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X2
+	VPBROADCASTB X2, Y2
 
-mulAvxTwo_3x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
+mulAvxTwo_3x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU (DI), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x2Xor_end:
+	RET
+
+// func mulAvxTwo_3x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 33 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), DI
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+	ADDQ R9, DI
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_3x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R8), Y0
+	VMOVDQU 32(R8), Y1
+	VMOVDQU (DI), Y2
+	VMOVDQU 32(DI), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y2, (DI)
+	VMOVDQU Y3, 32(DI)
+	ADDQ    $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x2_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x3(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x3_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), DI
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DI
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X3
+	VPBROADCASTB X3, Y3
 
+mulAvxTwo_3x3_loop:
 	// Load and process 32 bytes from input 0 to 3 outputs
 	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
@@ -3596,20 +6784,17 @@ mulAvxTwo_3x3_loop:
 	VMOVDQU 32(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	VPXOR   Y4, Y5, Y0
 	VMOVDQU 64(CX), Y4
 	VMOVDQU 96(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	VPXOR   Y4, Y5, Y1
 	VMOVDQU 128(CX), Y4
 	VMOVDQU 160(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y4, Y5, Y2
 
 	// Load and process 32 bytes from input 1 to 3 outputs
 	VMOVDQU (SI), Y6
@@ -3681,44 +6866,42 @@ mulAvxTwo_3x3_end:
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_3x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 26 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 46 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_3x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), AX
-	MOVQ  out_base+48(FP), SI
-	MOVQ  out_base+48(FP), SI
-	MOVQ  start+72(FP), DI
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), DI
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DI
 
 	// Add start offset to input
-	ADDQ         DI, DX
-	ADDQ         DI, BX
-	ADDQ         DI, AX
-	MOVQ         $0x0000000f, R8
-	MOVQ         R8, X6
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R8
-	SHRQ         $0x06, R8
 
 mulAvxTwo_3x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -3731,35 +6914,29 @@ mulAvxTwo_3x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -3798,9 +6975,9 @@ mulAvxTwo_3x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Load and process 64 bytes from input 2 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -3839,36 +7016,35 @@ mulAvxTwo_3x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Store 3 outputs
-	MOVQ    (SI), R9
-	VMOVDQU Y0, (R9)(DI*1)
-	VMOVDQU Y1, 32(R9)(DI*1)
-	MOVQ    24(SI), R9
-	VMOVDQU Y2, (R9)(DI*1)
-	VMOVDQU Y3, 32(R9)(DI*1)
-	MOVQ    48(SI), R9
-	VMOVDQU Y4, (R9)(DI*1)
-	VMOVDQU Y5, 32(R9)(DI*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, DI
-	DECQ R8
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y2, (R9)
+	VMOVDQU Y3, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y4, (DI)
+	VMOVDQU Y5, 32(DI)
+	ADDQ    $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
 	JNZ  mulAvxTwo_3x3_64_loop
 	VZEROUPPER
 
 mulAvxTwo_3x3_64_end:
 	RET
 
-// func mulAvxTwo_3x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_3x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_3x4(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_3x3Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 33 YMM used
+	// Full registers estimated 26 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x4_end
+	JZ    mulAvxTwo_3x3Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -3876,168 +7052,625 @@ TEXT ·mulAvxTwo_3x4(SB), NOSPLIT, $0-88
 	MOVQ  out_base+48(FP), DI
 	MOVQ  (DI), R8
 	MOVQ  24(DI), R9
-	MOVQ  48(DI), R10
-	MOVQ  72(DI), DI
-	MOVQ  start+72(FP), R11
+	MOVQ  48(DI), DI
+	MOVQ  start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R11, R8
-	ADDQ R11, R9
-	ADDQ R11, R10
-	ADDQ R11, DI
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DI
 
 	// Add start offset to input
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X4
-	VPBROADCASTB X4, Y4
-
-mulAvxTwo_3x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X3
+	VPBROADCASTB X3, Y3
 
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (BX), Y7
+mulAvxTwo_3x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU (DI), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
 
-	// Load and process 32 bytes from input 1 to 4 outputs
-	VMOVDQU (SI), Y7
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 256(CX), Y5
-	VMOVDQU 288(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 320(CX), Y5
-	VMOVDQU 352(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 384(CX), Y5
-	VMOVDQU 416(CX), Y6
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 448(CX), Y5
-	VMOVDQU 480(CX), Y6
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
 
-	// Load and process 32 bytes from input 2 to 4 outputs
-	VMOVDQU (DX), Y7
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DX), Y6
 	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU 512(CX), Y5
-	VMOVDQU 544(CX), Y6
-	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
-	VMOVDQU 576(CX), Y5
-	VMOVDQU 608(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
-	VMOVDQU 640(CX), Y5
-	VMOVDQU 672(CX), Y6
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
-	VMOVDQU 704(CX), Y5
-	VMOVDQU 736(CX), Y6
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
 
-	// Store 4 outputs
+	// Store 3 outputs
 	VMOVDQU Y0, (R8)
 	ADDQ    $0x20, R8
 	VMOVDQU Y1, (R9)
 	ADDQ    $0x20, R9
-	VMOVDQU Y2, (R10)
-	ADDQ    $0x20, R10
-	VMOVDQU Y3, (DI)
+	VMOVDQU Y2, (DI)
 	ADDQ    $0x20, DI
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_3x4_loop
+	JNZ  mulAvxTwo_3x3Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_3x4_end:
+mulAvxTwo_3x3Xor_end:
 	RET
 
-// func mulAvxTwo_3x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_3x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_3x5(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_3x3_64Xor(SB), $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 40 YMM used
+	// Full registers estimated 46 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x5_end
+	JZ    mulAvxTwo_3x3_64Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DX
 	MOVQ  out_base+48(FP), DI
+	MOVQ  out_base+48(FP), DI
 	MOVQ  (DI), R8
 	MOVQ  24(DI), R9
-	MOVQ  48(DI), R10
-	MOVQ  72(DI), R11
-	MOVQ  96(DI), DI
-	MOVQ  start+72(FP), R12
+	MOVQ  48(DI), DI
+	MOVQ  start+72(FP), R10
 
 	// Add start offset to output
-	ADDQ R12, R8
-	ADDQ R12, R9
+	ADDQ R10, R8
+	ADDQ R10, R9
+	ADDQ R10, DI
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_3x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R8), Y0
+	VMOVDQU 32(R8), Y1
+	VMOVDQU (R9), Y2
+	VMOVDQU 32(R9), Y3
+	VMOVDQU (DI), Y4
+	VMOVDQU 32(DI), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+	VMOVDQU Y2, (R9)
+	VMOVDQU Y3, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y4, (DI)
+	VMOVDQU Y5, 32(DI)
+	ADDQ    $0x40, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_3x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x4(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 33 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), DI
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, DI
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_3x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y1
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x4_loop
+	VZEROUPPER
+
+mulAvxTwo_3x4_end:
+	RET
+
+// func mulAvxTwo_3x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x4Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 33 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x4Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), DI
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R8
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, DI
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_3x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU (DI), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x4Xor_end:
+	RET
+
+// func mulAvxTwo_3x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x5(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 40 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x5_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), DI
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R8
+	ADDQ R12, R9
 	ADDQ R12, R10
 	ADDQ R12, R11
 	ADDQ R12, DI
@@ -4051,13 +7684,6 @@ TEXT ·mulAvxTwo_3x5(SB), NOSPLIT, $0-88
 	VPBROADCASTB X5, Y5
 
 mulAvxTwo_3x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-
 	// Load and process 32 bytes from input 0 to 5 outputs
 	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
@@ -4068,32 +7694,27 @@ mulAvxTwo_3x5_loop:
 	VMOVDQU 32(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
+	VPXOR   Y6, Y7, Y0
 	VMOVDQU 64(CX), Y6
 	VMOVDQU 96(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	VPXOR   Y6, Y7, Y1
 	VMOVDQU 128(CX), Y6
 	VMOVDQU 160(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
+	VPXOR   Y6, Y7, Y2
 	VMOVDQU 192(CX), Y6
 	VMOVDQU 224(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
+	VPXOR   Y6, Y7, Y3
 	VMOVDQU 256(CX), Y6
 	VMOVDQU 288(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPXOR   Y6, Y7, Y4
 
 	// Load and process 32 bytes from input 1 to 5 outputs
 	VMOVDQU (SI), Y8
@@ -4189,17 +7810,17 @@ mulAvxTwo_3x5_loop:
 mulAvxTwo_3x5_end:
 	RET
 
-// func mulAvxTwo_3x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_3x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_3x6(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_3x5Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 47 YMM used
+	// Full registers estimated 40 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x6_end
+	JZ    mulAvxTwo_3x5Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -4209,71 +7830,430 @@ TEXT ·mulAvxTwo_3x6(SB), NOSPLIT, $0-88
 	MOVQ  24(DI), R9
 	MOVQ  48(DI), R10
 	MOVQ  72(DI), R11
-	MOVQ  96(DI), R12
-	MOVQ  120(DI), DI
-	MOVQ  start+72(FP), R13
+	MOVQ  96(DI), DI
+	MOVQ  start+72(FP), R12
 
 	// Add start offset to output
-	ADDQ R13, R8
-	ADDQ R13, R9
-	ADDQ R13, R10
-	ADDQ R13, R11
-	ADDQ R13, R12
-	ADDQ R13, DI
+	ADDQ R12, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, DI
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X6
-	VPBROADCASTB X6, Y6
-
-mulAvxTwo_3x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X5
+	VPBROADCASTB X5, Y5
 
-	// Load and process 32 bytes from input 0 to 6 outputs
-	VMOVDQU (BX), Y9
+mulAvxTwo_3x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPAND   Y6, Y9, Y9
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU (DI), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x5Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x5Xor_end:
+	RET
+
+// func mulAvxTwo_3x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x6(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 47 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x6_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), DI
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, DI
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_3x6_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y0
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y2
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y3
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y4
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y5
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Store 6 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x6_loop
+	VZEROUPPER
+
+mulAvxTwo_3x6_end:
+	RET
+
+// func mulAvxTwo_3x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x6Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 47 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x6Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), DI
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R8
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, DI
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_3x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
 	VPAND   Y6, Y10, Y10
+	VMOVDQU (R8), Y0
 	VMOVDQU (CX), Y7
 	VMOVDQU 32(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y0, Y0
+	VMOVDQU (R9), Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y1, Y1
+	VMOVDQU (R10), Y2
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y2, Y2
+	VMOVDQU (R11), Y3
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y3, Y3
+	VMOVDQU (R12), Y4
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
 	VPXOR   Y7, Y8, Y7
 	VPXOR   Y7, Y4, Y4
+	VMOVDQU (DI), Y5
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y9, Y7, Y7
@@ -4383,10 +8363,10 @@ mulAvxTwo_3x6_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_3x6_loop
+	JNZ  mulAvxTwo_3x6Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_3x6_end:
+mulAvxTwo_3x6Xor_end:
 	RET
 
 // func mulAvxTwo_3x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -4432,15 +8412,6 @@ TEXT ·mulAvxTwo_3x7(SB), NOSPLIT, $0-88
 	VPBROADCASTB X7, Y7
 
 mulAvxTwo_3x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-
 	// Load and process 32 bytes from input 0 to 7 outputs
 	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
@@ -4451,44 +8422,37 @@ mulAvxTwo_3x7_loop:
 	VMOVDQU 32(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	VPXOR   Y8, Y9, Y0
 	VMOVDQU 64(CX), Y8
 	VMOVDQU 96(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	VPXOR   Y8, Y9, Y1
 	VMOVDQU 128(CX), Y8
 	VMOVDQU 160(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	VPXOR   Y8, Y9, Y2
 	VMOVDQU 192(CX), Y8
 	VMOVDQU 224(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	VPXOR   Y8, Y9, Y3
 	VMOVDQU 256(CX), Y8
 	VMOVDQU 288(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	VPXOR   Y8, Y9, Y4
 	VMOVDQU 320(CX), Y8
 	VMOVDQU 352(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	VPXOR   Y8, Y9, Y5
 	VMOVDQU 384(CX), Y8
 	VMOVDQU 416(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPXOR   Y8, Y9, Y6
 
 	// Load and process 32 bytes from input 1 to 7 outputs
 	VMOVDQU (SI), Y10
@@ -4612,17 +8576,17 @@ mulAvxTwo_3x7_loop:
 mulAvxTwo_3x7_end:
 	RET
 
-// func mulAvxTwo_3x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_3x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_3x8(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_3x7Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 61 YMM used
+	// Full registers estimated 54 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x8_end
+	JZ    mulAvxTwo_3x7Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -4634,104 +8598,307 @@ TEXT ·mulAvxTwo_3x8(SB), NOSPLIT, $0-88
 	MOVQ  72(DI), R11
 	MOVQ  96(DI), R12
 	MOVQ  120(DI), R13
-	MOVQ  144(DI), R14
-	MOVQ  168(DI), DI
-	MOVQ  start+72(FP), R15
+	MOVQ  144(DI), DI
+	MOVQ  start+72(FP), R14
 
 	// Add start offset to output
-	ADDQ R15, R8
-	ADDQ R15, R9
-	ADDQ R15, R10
-	ADDQ R15, R11
-	ADDQ R15, R12
-	ADDQ R15, R13
-	ADDQ R15, R14
-	ADDQ R15, DI
+	ADDQ R14, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, DI
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, SI
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X8
-	VPBROADCASTB X8, Y8
-
-mulAvxTwo_3x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X7
+	VPBROADCASTB X7, Y7
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (BX), Y11
+mulAvxTwo_3x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU (R12), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU (R13), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU (DI), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
 
-	// Load and process 32 bytes from input 1 to 8 outputs
-	VMOVDQU (SI), Y11
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 512(CX), Y9
-	VMOVDQU 544(CX), Y10
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Store 7 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y6, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x7Xor_end:
+	RET
+
+// func mulAvxTwo_3x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x8(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 61 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x8_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), R13
+	MOVQ  144(DI), R14
+	MOVQ  168(DI), DI
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, DI
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_3x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
 	VPXOR   Y9, Y10, Y9
 	VPXOR   Y9, Y0, Y0
 	VMOVDQU 576(CX), Y9
@@ -4858,6 +9025,250 @@ mulAvxTwo_3x8_loop:
 mulAvxTwo_3x8_end:
 	RET
 
+// func mulAvxTwo_3x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x8Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 61 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), R13
+	MOVQ  144(DI), R14
+	MOVQ  168(DI), DI
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R8
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, DI
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_3x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU (R12), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU (R13), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU (R14), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU (DI), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Store 8 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y6, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y7, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x8Xor_end:
+	RET
+
 // func mulAvxTwo_3x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_3x9(SB), NOSPLIT, $8-88
@@ -4905,17 +9316,6 @@ TEXT ·mulAvxTwo_3x9(SB), NOSPLIT, $8-88
 	VPBROADCASTB X9, Y9
 
 mulAvxTwo_3x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-
 	// Load and process 32 bytes from input 0 to 9 outputs
 	VMOVDQU (BX), Y12
 	ADDQ    $0x20, BX
@@ -4926,56 +9326,47 @@ mulAvxTwo_3x9_loop:
 	VMOVDQU 32(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	VPXOR   Y10, Y11, Y0
 	VMOVDQU 64(CX), Y10
 	VMOVDQU 96(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	VPXOR   Y10, Y11, Y1
 	VMOVDQU 128(CX), Y10
 	VMOVDQU 160(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	VPXOR   Y10, Y11, Y2
 	VMOVDQU 192(CX), Y10
 	VMOVDQU 224(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	VPXOR   Y10, Y11, Y3
 	VMOVDQU 256(CX), Y10
 	VMOVDQU 288(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	VPXOR   Y10, Y11, Y4
 	VMOVDQU 320(CX), Y10
 	VMOVDQU 352(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	VPXOR   Y10, Y11, Y5
 	VMOVDQU 384(CX), Y10
 	VMOVDQU 416(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	VPXOR   Y10, Y11, Y6
 	VMOVDQU 448(CX), Y10
 	VMOVDQU 480(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	VPXOR   Y10, Y11, Y7
 	VMOVDQU 512(CX), Y10
 	VMOVDQU 544(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPXOR   Y10, Y11, Y8
 
 	// Load and process 32 bytes from input 1 to 9 outputs
 	VMOVDQU (SI), Y12
@@ -5127,36 +9518,34 @@ mulAvxTwo_3x9_loop:
 mulAvxTwo_3x9_end:
 	RET
 
-// func mulAvxTwo_3x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_3x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_3x10(SB), NOSPLIT, $8-88
+TEXT ·mulAvxTwo_3x9Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 75 YMM used
+	// Full registers estimated 68 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_3x10_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), AX
-	MOVQ  out_base+48(FP), SI
-	MOVQ  (SI), DI
-	MOVQ  24(SI), R8
-	MOVQ  48(SI), R9
-	MOVQ  72(SI), R10
-	MOVQ  96(SI), R11
-	MOVQ  120(SI), R12
-	MOVQ  144(SI), R13
-	MOVQ  168(SI), R14
-	MOVQ  192(SI), R15
-	MOVQ  216(SI), SI
+	JZ    mulAvxTwo_3x9Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), R13
+	MOVQ  144(DI), R14
+	MOVQ  168(DI), R15
+	MOVQ  192(DI), DI
 	MOVQ  start+72(FP), BP
 
 	// Add start offset to output
-	ADDQ BP, DI
 	ADDQ BP, R8
 	ADDQ BP, R9
 	ADDQ BP, R10
@@ -5165,97 +9554,344 @@ TEXT ·mulAvxTwo_3x10(SB), NOSPLIT, $8-88
 	ADDQ BP, R13
 	ADDQ BP, R14
 	ADDQ BP, R15
-	ADDQ BP, SI
+	ADDQ BP, DI
 
 	// Add start offset to input
-	ADDQ         BP, DX
 	ADDQ         BP, BX
-	ADDQ         BP, AX
+	ADDQ         BP, SI
+	ADDQ         BP, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X10
-	VPBROADCASTB X10, Y10
-	MOVQ         n+80(FP), BP
-	SHRQ         $0x05, BP
-
-mulAvxTwo_3x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	MOVQ         BP, X9
+	VPBROADCASTB X9, Y9
 
-	// Load and process 32 bytes from input 0 to 10 outputs
-	VMOVDQU (DX), Y13
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU (CX), Y11
+mulAvxTwo_3x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU (R12), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU (R13), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU (R14), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU (R15), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU (DI), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Store 9 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y6, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y7, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y8, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_3x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x9Xor_end:
+	RET
+
+// func mulAvxTwo_3x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_3x10(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 75 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_3x10_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), AX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), R14
+	MOVQ  192(SI), R15
+	MOVQ  216(SI), SI
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, SI
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X10
+	VPBROADCASTB X10, Y10
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_3x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
 	VMOVDQU 32(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
+	VPXOR   Y11, Y12, Y0
 	VMOVDQU 64(CX), Y11
 	VMOVDQU 96(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
+	VPXOR   Y11, Y12, Y1
 	VMOVDQU 128(CX), Y11
 	VMOVDQU 160(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
+	VPXOR   Y11, Y12, Y2
 	VMOVDQU 192(CX), Y11
 	VMOVDQU 224(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
+	VPXOR   Y11, Y12, Y3
 	VMOVDQU 256(CX), Y11
 	VMOVDQU 288(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
+	VPXOR   Y11, Y12, Y4
 	VMOVDQU 320(CX), Y11
 	VMOVDQU 352(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
+	VPXOR   Y11, Y12, Y5
 	VMOVDQU 384(CX), Y11
 	VMOVDQU 416(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
+	VPXOR   Y11, Y12, Y6
 	VMOVDQU 448(CX), Y11
 	VMOVDQU 480(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
+	VPXOR   Y11, Y12, Y7
 	VMOVDQU 512(CX), Y11
 	VMOVDQU 544(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
+	VPXOR   Y11, Y12, Y8
 	VMOVDQU 576(CX), Y11
 	VMOVDQU 608(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPXOR   Y11, Y12, Y9
 
 	// Load and process 32 bytes from input 1 to 10 outputs
 	VMOVDQU (BX), Y13
@@ -5421,146 +10057,432 @@ mulAvxTwo_3x10_loop:
 mulAvxTwo_3x10_end:
 	RET
 
-// func mulAvxTwo_4x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_4x1(SB), NOSPLIT, $0-88
-	// Loading all tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 12 YMM used
-	MOVQ    n+80(FP), AX
-	MOVQ    matrix_base+0(FP), CX
-	SHRQ    $0x05, AX
-	TESTQ   AX, AX
-	JZ      mulAvxTwo_4x1_end
-	VMOVDQU (CX), Y0
-	VMOVDQU 32(CX), Y1
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
-	MOVQ    in_base+24(FP), CX
-	MOVQ    (CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    48(CX), SI
-	MOVQ    72(CX), CX
-	MOVQ    out_base+48(FP), DI
-	MOVQ    (DI), DI
-	MOVQ    start+72(FP), R8
-
-	// Add start offset to output
-	ADDQ R8, DI
-
-	// Add start offset to input
-	ADDQ         R8, DX
-	ADDQ         R8, BX
-	ADDQ         R8, SI
-	ADDQ         R8, CX
-	MOVQ         $0x0000000f, R8
-	MOVQ         R8, X9
-	VPBROADCASTB X9, Y9
-
-mulAvxTwo_4x1_loop:
-	// Clear 1 outputs
-	VPXOR Y8, Y8, Y8
-
-	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y10
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y9, Y10, Y10
-	VPAND   Y9, Y11, Y11
-	VPSHUFB Y10, Y0, Y10
-	VPSHUFB Y11, Y1, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
-
-	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y10
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y9, Y10, Y10
-	VPAND   Y9, Y11, Y11
-	VPSHUFB Y10, Y2, Y10
-	VPSHUFB Y11, Y3, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
-
-	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y10
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y9, Y10, Y10
-	VPAND   Y9, Y11, Y11
-	VPSHUFB Y10, Y4, Y10
-	VPSHUFB Y11, Y5, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
-
-	// Load and process 32 bytes from input 3 to 1 outputs
-	VMOVDQU (CX), Y10
-	ADDQ    $0x20, CX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y9, Y10, Y10
-	VPAND   Y9, Y11, Y11
-	VPSHUFB Y10, Y6, Y10
-	VPSHUFB Y11, Y7, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
-
-	// Store 1 outputs
-	VMOVDQU Y8, (DI)
-	ADDQ    $0x20, DI
-
-	// Prepare for next loop
-	DECQ AX
-	JNZ  mulAvxTwo_4x1_loop
-	VZEROUPPER
-
-mulAvxTwo_4x1_end:
-	RET
-
-// func mulAvxTwo_4x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_3x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_4x1_64(SB), $0-88
+TEXT ·mulAvxTwo_3x10Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 12 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 75 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_4x1_64_end
+	JZ    mulAvxTwo_3x10Xor_end
 	MOVQ  in_base+24(FP), AX
 	MOVQ  (AX), DX
 	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), AX
-	MOVQ  out_base+48(FP), DI
-	MOVQ  out_base+48(FP), DI
-	MOVQ  start+72(FP), R8
+	MOVQ  48(AX), AX
+	MOVQ  out_base+48(FP), SI
+	MOVQ  (SI), DI
+	MOVQ  24(SI), R8
+	MOVQ  48(SI), R9
+	MOVQ  72(SI), R10
+	MOVQ  96(SI), R11
+	MOVQ  120(SI), R12
+	MOVQ  144(SI), R13
+	MOVQ  168(SI), R14
+	MOVQ  192(SI), R15
+	MOVQ  216(SI), SI
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, DI
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, SI
 
 	// Add start offset to input
-	ADDQ         R8, DX
-	ADDQ         R8, BX
-	ADDQ         R8, SI
-	ADDQ         R8, AX
-	MOVQ         $0x0000000f, R9
-	MOVQ         R9, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R9
-	SHRQ         $0x06, R9
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X10
+	VPBROADCASTB X10, Y10
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
 
-mulAvxTwo_4x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+mulAvxTwo_3x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (DI), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU (R10), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU (R11), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU (R12), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU (R13), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU (R14), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU (R15), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU (SI), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
 
-	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (AX), Y13
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
+
+	// Store 10 outputs
+	VMOVDQU Y0, (DI)
+	ADDQ    $0x20, DI
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y3, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y4, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y5, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y6, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y7, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y8, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y9, (SI)
+	ADDQ    $0x20, SI
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_3x10Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_3x10Xor_end:
+	RET
+
+// func mulAvxTwo_4x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x1(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 12 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_4x1_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), CX
+	MOVQ    out_base+48(FP), DI
+	MOVQ    (DI), DI
+	MOVQ    start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+
+	// Add start offset to input
+	ADDQ         R8, DX
+	ADDQ         R8, BX
+	ADDQ         R8, SI
+	ADDQ         R8, CX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_4x1_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y0, Y10
+	VPSHUFB Y11, Y1, Y11
+	VPXOR   Y10, Y11, Y8
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y2, Y10
+	VPSHUFB Y11, Y3, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y4, Y10
+	VPSHUFB Y11, Y5, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (CX), Y10
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y6, Y10
+	VPSHUFB Y11, Y7, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Store 1 outputs
+	VMOVDQU Y8, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x1_loop
+	VZEROUPPER
+
+mulAvxTwo_4x1_end:
+	RET
+
+// func mulAvxTwo_4x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x1_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 22 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x1_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R8
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_4x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -5573,15 +10495,13 @@ mulAvxTwo_4x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
 
 	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -5600,9 +10520,9 @@ mulAvxTwo_4x1_64_loop:
 	VPXOR   Y5, Y1, Y1
 
 	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y6
-	VMOVDQU 32(SI), Y5
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -5621,9 +10541,9 @@ mulAvxTwo_4x1_64_loop:
 	VPXOR   Y5, Y1, Y1
 
 	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -5642,95 +10562,321 @@ mulAvxTwo_4x1_64_loop:
 	VPXOR   Y5, Y1, Y1
 
 	// Store 1 outputs
-	MOVQ    (DI), R10
-	VMOVDQU Y0, (R10)(R8*1)
-	VMOVDQU Y1, 32(R10)(R8*1)
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
 
 	// Prepare for next loop
-	ADDQ $0x40, R8
-	DECQ R9
+	DECQ AX
 	JNZ  mulAvxTwo_4x1_64_loop
 	VZEROUPPER
 
 mulAvxTwo_4x1_64_end:
 	RET
 
-// func mulAvxTwo_4x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_4x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_4x2(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_4x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 12 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_4x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), CX
+	MOVQ    out_base+48(FP), DI
+	MOVQ    (DI), DI
+	MOVQ    start+72(FP), R8
+
+	// Add start offset to output
+	ADDQ R8, DI
+
+	// Add start offset to input
+	ADDQ         R8, DX
+	ADDQ         R8, BX
+	ADDQ         R8, SI
+	ADDQ         R8, CX
+	MOVQ         $0x0000000f, R8
+	MOVQ         R8, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_4x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VMOVDQU (DI), Y8
+	VPSHUFB Y10, Y0, Y10
+	VPSHUFB Y11, Y1, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y2, Y10
+	VPSHUFB Y11, Y3, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y4, Y10
+	VPSHUFB Y11, Y5, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (CX), Y10
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y9, Y10, Y10
+	VPAND   Y9, Y11, Y11
+	VPSHUFB Y10, Y6, Y10
+	VPSHUFB Y11, Y7, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Store 1 outputs
+	VMOVDQU Y8, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x1Xor_end:
+	RET
+
+// func mulAvxTwo_4x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x1_64Xor(SB), $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 23 YMM used
+	// Full registers estimated 22 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
+	SHRQ  $0x06, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_4x2_end
+	JZ    mulAvxTwo_4x1_64Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
 	MOVQ  48(DX), DI
 	MOVQ  72(DX), DX
 	MOVQ  out_base+48(FP), R8
-	MOVQ  (R8), R9
-	MOVQ  24(R8), R8
-	MOVQ  start+72(FP), R10
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R8
+	MOVQ  start+72(FP), R9
 
 	// Add start offset to output
-	ADDQ R10, R9
-	ADDQ R10, R8
+	ADDQ R9, R8
 
 	// Add start offset to input
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DI
-	ADDQ         R10, DX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X2
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X2
 	VPBROADCASTB X2, Y2
 
-mulAvxTwo_4x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+mulAvxTwo_4x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (R8), Y0
+	VMOVDQU 32(R8), Y1
 
-	// Load and process 32 bytes from input 0 to 2 outputs
-	VMOVDQU (BX), Y5
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU (CX), Y3
 	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
 	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
 	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU 64(CX), Y3
 	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
 	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
 
-	// Load and process 32 bytes from input 1 to 2 outputs
-	VMOVDQU (SI), Y5
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU 128(CX), Y3
 	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
 	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
 	VPXOR   Y3, Y0, Y0
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPXOR   Y5, Y1, Y1
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R8)
+	VMOVDQU Y1, 32(R8)
+	ADDQ    $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x1_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_4x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x2(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 23 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x2_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R8
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+	ADDQ R10, R8
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_4x2_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y1
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
 
 	// Load and process 32 bytes from input 2 to 2 outputs
 	VMOVDQU (DI), Y5
@@ -5788,41 +10934,128 @@ mulAvxTwo_4x2_end:
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_4x2_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 23 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 41 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_4x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), AX
-	MOVQ  out_base+48(FP), DI
-	MOVQ  out_base+48(FP), DI
-	MOVQ  start+72(FP), R8
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R8
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+	ADDQ R10, R8
 
 	// Add start offset to input
-	ADDQ         R8, DX
-	ADDQ         R8, BX
-	ADDQ         R8, SI
-	ADDQ         R8, AX
-	MOVQ         $0x0000000f, R9
-	MOVQ         R9, X4
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R9
-	SHRQ         $0x06, R9
 
 mulAvxTwo_4x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Load and process 64 bytes from input 3 to 2 outputs
 	VMOVDQU (DX), Y9
 	VMOVDQU 32(DX), Y11
 	ADDQ    $0x40, DX
@@ -5832,6 +11065,222 @@ mulAvxTwo_4x2_64_loop:
 	VPAND   Y4, Y11, Y11
 	VPAND   Y4, Y10, Y10
 	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y2, (R8)
+	VMOVDQU Y3, 32(R8)
+	ADDQ    $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x2_64_loop
+	VZEROUPPER
+
+mulAvxTwo_4x2_64_end:
+	RET
+
+// func mulAvxTwo_4x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x2Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 23 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x2Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R8
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+	ADDQ R10, R8
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_4x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU (R8), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
+
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
+
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x2Xor_end:
+	RET
+
+// func mulAvxTwo_4x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 41 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R8
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+	ADDQ R10, R8
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_4x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R9), Y0
+	VMOVDQU 32(R9), Y1
+	VMOVDQU (R8), Y2
+	VMOVDQU 32(R8), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
 	VMOVDQU (CX), Y5
 	VMOVDQU 32(CX), Y6
 	VPSHUFB Y11, Y5, Y7
@@ -5854,9 +11303,9 @@ mulAvxTwo_4x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -5885,9 +11334,9 @@ mulAvxTwo_4x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -5916,9 +11365,9 @@ mulAvxTwo_4x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Load and process 64 bytes from input 3 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -5947,20 +11396,19 @@ mulAvxTwo_4x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Store 2 outputs
-	MOVQ    (DI), R10
-	VMOVDQU Y0, (R10)(R8*1)
-	VMOVDQU Y1, 32(R10)(R8*1)
-	MOVQ    24(DI), R10
-	VMOVDQU Y2, (R10)(R8*1)
-	VMOVDQU Y3, 32(R10)(R8*1)
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y2, (R8)
+	VMOVDQU Y3, 32(R8)
+	ADDQ    $0x40, R8
 
 	// Prepare for next loop
-	ADDQ $0x40, R8
-	DECQ R9
-	JNZ  mulAvxTwo_4x2_64_loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x2_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_4x2_64_end:
+mulAvxTwo_4x2_64Xor_end:
 	RET
 
 // func mulAvxTwo_4x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -6000,11 +11448,6 @@ TEXT ·mulAvxTwo_4x3(SB), NOSPLIT, $0-88
 	VPBROADCASTB X3, Y3
 
 mulAvxTwo_4x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-
 	// Load and process 32 bytes from input 0 to 3 outputs
 	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
@@ -6015,20 +11458,17 @@ mulAvxTwo_4x3_loop:
 	VMOVDQU 32(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
+	VPXOR   Y4, Y5, Y0
 	VMOVDQU 64(CX), Y4
 	VMOVDQU 96(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
+	VPXOR   Y4, Y5, Y1
 	VMOVDQU 128(CX), Y4
 	VMOVDQU 160(CX), Y5
 	VPSHUFB Y6, Y4, Y4
 	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y4, Y5, Y2
 
 	// Load and process 32 bytes from input 1 to 3 outputs
 	VMOVDQU (SI), Y6
@@ -6125,46 +11565,44 @@ mulAvxTwo_4x3_end:
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_4x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 32 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 58 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_4x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), AX
-	MOVQ  out_base+48(FP), DI
-	MOVQ  out_base+48(FP), DI
-	MOVQ  start+72(FP), R8
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R8
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, R8
 
 	// Add start offset to input
-	ADDQ         R8, DX
-	ADDQ         R8, BX
-	ADDQ         R8, SI
-	ADDQ         R8, AX
-	MOVQ         $0x0000000f, R9
-	MOVQ         R9, X6
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R9
-	SHRQ         $0x06, R9
 
 mulAvxTwo_4x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -6177,36 +11615,30 @@ mulAvxTwo_4x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y11, Y12
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
 	VPAND   Y6, Y13, Y13
@@ -6244,6 +11676,350 @@ mulAvxTwo_4x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y2, (R10)
+	VMOVDQU Y3, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y5, 32(R8)
+	ADDQ    $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x3_64_loop
+	VZEROUPPER
+
+mulAvxTwo_4x3_64_end:
+	RET
+
+// func mulAvxTwo_4x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x3Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 32 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x3Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R8
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, R8
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_4x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU (R8), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x3Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x3Xor_end:
+	RET
+
+// func mulAvxTwo_4x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x3_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 58 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R8
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R9
+	ADDQ R11, R10
+	ADDQ R11, R8
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_4x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R9), Y0
+	VMOVDQU 32(R9), Y1
+	VMOVDQU (R10), Y2
+	VMOVDQU 32(R10), Y3
+	VMOVDQU (R8), Y4
+	VMOVDQU 32(R8), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 1 to 3 outputs
 	VMOVDQU (SI), Y11
 	VMOVDQU 32(SI), Y13
 	ADDQ    $0x40, SI
@@ -6253,6 +12029,47 @@ mulAvxTwo_4x3_64_loop:
 	VPAND   Y6, Y13, Y13
 	VPAND   Y6, Y12, Y12
 	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
 	VMOVDQU 384(CX), Y7
 	VMOVDQU 416(CX), Y8
 	VPSHUFB Y13, Y7, Y9
@@ -6285,9 +12102,9 @@ mulAvxTwo_4x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Load and process 64 bytes from input 3 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -6326,23 +12143,22 @@ mulAvxTwo_4x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Store 3 outputs
-	MOVQ    (DI), R10
-	VMOVDQU Y0, (R10)(R8*1)
-	VMOVDQU Y1, 32(R10)(R8*1)
-	MOVQ    24(DI), R10
-	VMOVDQU Y2, (R10)(R8*1)
-	VMOVDQU Y3, 32(R10)(R8*1)
-	MOVQ    48(DI), R10
-	VMOVDQU Y4, (R10)(R8*1)
-	VMOVDQU Y5, 32(R10)(R8*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, R8
-	DECQ R9
-	JNZ  mulAvxTwo_4x3_64_loop
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
+	VMOVDQU Y2, (R10)
+	VMOVDQU Y3, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y4, (R8)
+	VMOVDQU Y5, 32(R8)
+	ADDQ    $0x40, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x3_64Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_4x3_64_end:
+mulAvxTwo_4x3_64Xor_end:
 	RET
 
 // func mulAvxTwo_4x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -6384,12 +12200,6 @@ TEXT ·mulAvxTwo_4x4(SB), NOSPLIT, $0-88
 	VPBROADCASTB X4, Y4
 
 mulAvxTwo_4x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 32 bytes from input 0 to 4 outputs
 	VMOVDQU (BX), Y7
 	ADDQ    $0x20, BX
@@ -6400,26 +12210,22 @@ mulAvxTwo_4x4_loop:
 	VMOVDQU 32(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	VPXOR   Y5, Y6, Y0
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y5, Y6, Y1
 	VMOVDQU 128(CX), Y5
 	VMOVDQU 160(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	VPXOR   Y5, Y6, Y2
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPXOR   Y5, Y6, Y3
 
 	// Load and process 32 bytes from input 1 to 4 outputs
 	VMOVDQU (SI), Y7
@@ -6532,17 +12338,17 @@ mulAvxTwo_4x4_loop:
 mulAvxTwo_4x4_end:
 	RET
 
-// func mulAvxTwo_4x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_4x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_4x5(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_4x4Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 50 YMM used
+	// Full registers estimated 41 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_4x5_end
+	JZ    mulAvxTwo_4x4Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -6552,89 +12358,262 @@ TEXT ·mulAvxTwo_4x5(SB), NOSPLIT, $0-88
 	MOVQ  (R8), R9
 	MOVQ  24(R8), R10
 	MOVQ  48(R8), R11
-	MOVQ  72(R8), R12
-	MOVQ  96(R8), R8
-	MOVQ  start+72(FP), R13
+	MOVQ  72(R8), R8
+	MOVQ  start+72(FP), R12
 
 	// Add start offset to output
-	ADDQ R13, R9
-	ADDQ R13, R10
-	ADDQ R13, R11
-	ADDQ R13, R12
-	ADDQ R13, R8
+	ADDQ R12, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R8
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X5
-	VPBROADCASTB X5, Y5
-
-mulAvxTwo_4x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X4
+	VPBROADCASTB X4, Y4
 
-	// Load and process 32 bytes from input 0 to 5 outputs
-	VMOVDQU (BX), Y8
+mulAvxTwo_4x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU (CX), Y6
-	VMOVDQU 32(CX), Y7
-	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 64(CX), Y6
-	VMOVDQU 96(CX), Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 128(CX), Y6
-	VMOVDQU 160(CX), Y7
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y2, Y2
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y3, Y3
-	VMOVDQU 256(CX), Y6
-	VMOVDQU 288(CX), Y7
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU (R8), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y4, Y4
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
 
-	// Load and process 32 bytes from input 1 to 5 outputs
-	VMOVDQU (SI), Y8
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU 320(CX), Y6
-	VMOVDQU 352(CX), Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 384(CX), Y6
-	VMOVDQU 416(CX), Y7
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x4Xor_end:
+	RET
+
+// func mulAvxTwo_4x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x5(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x5_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R8
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R8
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_4x5_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y4
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
 	VMOVDQU 448(CX), Y6
 	VMOVDQU 480(CX), Y7
 	VPSHUFB Y8, Y6, Y6
@@ -6748,6 +12727,220 @@ mulAvxTwo_4x5_loop:
 mulAvxTwo_4x5_end:
 	RET
 
+// func mulAvxTwo_4x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x5Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 50 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R8
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R8
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_4x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU (R8), Y4
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x5Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x5Xor_end:
+	RET
+
 // func mulAvxTwo_4x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_4x6(SB), NOSPLIT, $0-88
@@ -6791,14 +12984,6 @@ TEXT ·mulAvxTwo_4x6(SB), NOSPLIT, $0-88
 	VPBROADCASTB X6, Y6
 
 mulAvxTwo_4x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 32 bytes from input 0 to 6 outputs
 	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
@@ -6809,38 +12994,32 @@ mulAvxTwo_4x6_loop:
 	VMOVDQU 32(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPXOR   Y7, Y8, Y0
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPXOR   Y7, Y8, Y2
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y7, Y8, Y3
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPXOR   Y7, Y8, Y4
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPXOR   Y7, Y8, Y5
 
 	// Load and process 32 bytes from input 1 to 6 outputs
 	VMOVDQU (SI), Y9
@@ -6993,17 +13172,17 @@ mulAvxTwo_4x6_loop:
 mulAvxTwo_4x6_end:
 	RET
 
-// func mulAvxTwo_4x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_4x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_4x7(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_4x6Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 68 YMM used
+	// Full registers estimated 59 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_4x7_end
+	JZ    mulAvxTwo_4x6Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -7015,87 +13194,314 @@ TEXT ·mulAvxTwo_4x7(SB), NOSPLIT, $0-88
 	MOVQ  48(R8), R11
 	MOVQ  72(R8), R12
 	MOVQ  96(R8), R13
-	MOVQ  120(R8), R14
-	MOVQ  144(R8), R8
-	MOVQ  start+72(FP), R15
+	MOVQ  120(R8), R8
+	MOVQ  start+72(FP), R14
 
 	// Add start offset to output
-	ADDQ R15, R9
-	ADDQ R15, R10
-	ADDQ R15, R11
-	ADDQ R15, R12
-	ADDQ R15, R13
-	ADDQ R15, R14
-	ADDQ R15, R8
+	ADDQ R14, R9
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R8
 
 	// Add start offset to input
-	ADDQ         R15, BX
-	ADDQ         R15, SI
-	ADDQ         R15, DI
-	ADDQ         R15, DX
-	MOVQ         $0x0000000f, R15
-	MOVQ         R15, X7
-	VPBROADCASTB X7, Y7
-
-mulAvxTwo_4x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X6
+	VPBROADCASTB X6, Y6
 
-	// Load and process 32 bytes from input 0 to 7 outputs
-	VMOVDQU (BX), Y10
+mulAvxTwo_4x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y10, Y11
-	VPAND   Y7, Y10, Y10
-	VPAND   Y7, Y11, Y11
-	VMOVDQU (CX), Y8
-	VMOVDQU 32(CX), Y9
-	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
-	VMOVDQU 64(CX), Y8
-	VMOVDQU 96(CX), Y9
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
-	VMOVDQU 128(CX), Y8
-	VMOVDQU 160(CX), Y9
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
-	VMOVDQU 192(CX), Y8
-	VMOVDQU 224(CX), Y9
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
-	VMOVDQU 320(CX), Y8
-	VMOVDQU 352(CX), Y9
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU (R13), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
-	VMOVDQU 384(CX), Y8
-	VMOVDQU 416(CX), Y9
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU (R8), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
-
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Store 6 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y5, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x6Xor_end:
+	RET
+
+// func mulAvxTwo_4x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x7(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 68 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x7_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R8
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R8
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_4x7_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y0
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y1
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y2
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y3
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y4
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y5
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y6
+
 	// Load and process 32 bytes from input 1 to 7 outputs
 	VMOVDQU (SI), Y10
 	ADDQ    $0x20, SI
@@ -7267,6 +13673,278 @@ mulAvxTwo_4x7_loop:
 mulAvxTwo_4x7_end:
 	RET
 
+// func mulAvxTwo_4x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x7Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 68 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x7Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R8
+	MOVQ  start+72(FP), R15
+
+	// Add start offset to output
+	ADDQ R15, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R8
+
+	// Add start offset to input
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_4x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU (R13), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU (R14), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU (R8), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Store 7 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y5, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y6, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x7Xor_end:
+	RET
+
 // func mulAvxTwo_4x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_4x8(SB), NOSPLIT, $8-88
@@ -7314,16 +13992,6 @@ TEXT ·mulAvxTwo_4x8(SB), NOSPLIT, $8-88
 	VPBROADCASTB X8, Y8
 
 mulAvxTwo_4x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-
 	// Load and process 32 bytes from input 0 to 8 outputs
 	VMOVDQU (BX), Y11
 	ADDQ    $0x20, BX
@@ -7334,50 +14002,42 @@ mulAvxTwo_4x8_loop:
 	VMOVDQU 32(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
+	VPXOR   Y9, Y10, Y0
 	VMOVDQU 64(CX), Y9
 	VMOVDQU 96(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 128(CX), Y9
 	VMOVDQU 160(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
+	VPXOR   Y9, Y10, Y2
 	VMOVDQU 192(CX), Y9
 	VMOVDQU 224(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 256(CX), Y9
 	VMOVDQU 288(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
+	VPXOR   Y9, Y10, Y4
 	VMOVDQU 320(CX), Y9
 	VMOVDQU 352(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y9, Y10, Y5
 	VMOVDQU 384(CX), Y9
 	VMOVDQU 416(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
+	VPXOR   Y9, Y10, Y6
 	VMOVDQU 448(CX), Y9
 	VMOVDQU 480(CX), Y10
 	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPXOR   Y9, Y10, Y7
 
 	// Load and process 32 bytes from input 1 to 8 outputs
 	VMOVDQU (SI), Y11
@@ -7570,9 +14230,310 @@ mulAvxTwo_4x8_loop:
 mulAvxTwo_4x8_end:
 	RET
 
-// func mulAvxTwo_4x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_4x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_4x9(SB), NOSPLIT, $8-88
+TEXT ·mulAvxTwo_4x8Xor(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 77 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x8Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R15
+	MOVQ  168(R8), R8
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X8
+	VPBROADCASTB X8, Y8
+
+mulAvxTwo_4x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU (R13), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU (R14), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU (R15), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU (R8), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Store 8 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y5, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y6, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y7, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_4x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x8Xor_end:
+	RET
+
+// func mulAvxTwo_4x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x9(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
 	// Full registers estimated 86 YMM used
@@ -7621,17 +14582,6 @@ TEXT ·mulAvxTwo_4x9(SB), NOSPLIT, $8-88
 	SHRQ         $0x05, BP
 
 mulAvxTwo_4x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-
 	// Load and process 32 bytes from input 0 to 9 outputs
 	VMOVDQU (DX), Y12
 	ADDQ    $0x20, DX
@@ -7642,56 +14592,47 @@ mulAvxTwo_4x9_loop:
 	VMOVDQU 32(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
+	VPXOR   Y10, Y11, Y0
 	VMOVDQU 64(CX), Y10
 	VMOVDQU 96(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
+	VPXOR   Y10, Y11, Y1
 	VMOVDQU 128(CX), Y10
 	VMOVDQU 160(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
+	VPXOR   Y10, Y11, Y2
 	VMOVDQU 192(CX), Y10
 	VMOVDQU 224(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
+	VPXOR   Y10, Y11, Y3
 	VMOVDQU 256(CX), Y10
 	VMOVDQU 288(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
+	VPXOR   Y10, Y11, Y4
 	VMOVDQU 320(CX), Y10
 	VMOVDQU 352(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
+	VPXOR   Y10, Y11, Y5
 	VMOVDQU 384(CX), Y10
 	VMOVDQU 416(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
+	VPXOR   Y10, Y11, Y6
 	VMOVDQU 448(CX), Y10
 	VMOVDQU 480(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
+	VPXOR   Y10, Y11, Y7
 	VMOVDQU 512(CX), Y10
 	VMOVDQU 544(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPXOR   Y10, Y11, Y8
 
 	// Load and process 32 bytes from input 1 to 9 outputs
 	VMOVDQU (BX), Y12
@@ -7904,134 +14845,444 @@ mulAvxTwo_4x9_loop:
 mulAvxTwo_4x9_end:
 	RET
 
-// func mulAvxTwo_4x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_4x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_4x10(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_4x9Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 95 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 86 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_4x10_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), DX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  start+72(FP), R9
+	JZ    mulAvxTwo_4x9Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), AX
+	MOVQ  out_base+48(FP), DI
+	MOVQ  (DI), R8
+	MOVQ  24(DI), R9
+	MOVQ  48(DI), R10
+	MOVQ  72(DI), R11
+	MOVQ  96(DI), R12
+	MOVQ  120(DI), R13
+	MOVQ  144(DI), R14
+	MOVQ  168(DI), R15
+	MOVQ  192(DI), DI
+	MOVQ  start+72(FP), BP
 
-	// Add start offset to input
-	ADDQ         R9, BX
-	ADDQ         R9, SI
-	ADDQ         R9, DI
-	ADDQ         R9, DX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X10
-	VPBROADCASTB X10, Y10
+	// Add start offset to output
+	ADDQ BP, R8
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, DI
 
-mulAvxTwo_4x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X9
+	VPBROADCASTB X9, Y9
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
 
-	// Load and process 32 bytes from input 0 to 10 outputs
-	VMOVDQU (BX), Y13
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU (CX), Y11
-	VMOVDQU 32(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 64(CX), Y11
-	VMOVDQU 96(CX), Y12
+mulAvxTwo_4x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (R8), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 128(CX), Y11
-	VMOVDQU 160(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 192(CX), Y11
-	VMOVDQU 224(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU (R10), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 256(CX), Y11
-	VMOVDQU 288(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU (R11), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 320(CX), Y11
-	VMOVDQU 352(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU (R12), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 384(CX), Y11
-	VMOVDQU 416(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU (R13), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 448(CX), Y11
-	VMOVDQU 480(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU (R14), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 512(CX), Y11
-	VMOVDQU 544(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU (R15), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 576(CX), Y11
-	VMOVDQU 608(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU (DI), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
 
-	// Load and process 32 bytes from input 1 to 10 outputs
-	VMOVDQU (SI), Y13
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 640(CX), Y11
-	VMOVDQU 672(CX), Y12
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 704(CX), Y11
-	VMOVDQU 736(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 768(CX), Y11
-	VMOVDQU 800(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (AX), Y12
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Store 9 outputs
+	VMOVDQU Y0, (R8)
+	ADDQ    $0x20, R8
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y2, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y3, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y4, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y5, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y6, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y7, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y8, (DI)
+	ADDQ    $0x20, DI
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_4x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x9Xor_end:
+	RET
+
+// func mulAvxTwo_4x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_4x10(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 95 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_4x10_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  start+72(FP), R9
+
+	// Add start offset to input
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DI
+	ADDQ         R9, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_4x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
 	VPXOR   Y11, Y12, Y11
@@ -8244,167 +15495,509 @@ mulAvxTwo_4x10_loop:
 mulAvxTwo_4x10_end:
 	RET
 
-// func mulAvxTwo_5x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x1(SB), NOSPLIT, $0-88
-	// Loading all tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 14 YMM used
-	MOVQ    n+80(FP), AX
-	MOVQ    matrix_base+0(FP), CX
-	SHRQ    $0x05, AX
-	TESTQ   AX, AX
-	JZ      mulAvxTwo_5x1_end
-	VMOVDQU (CX), Y0
-	VMOVDQU 32(CX), Y1
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
-	MOVQ    in_base+24(FP), CX
-	MOVQ    (CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    48(CX), SI
-	MOVQ    72(CX), DI
-	MOVQ    96(CX), CX
-	MOVQ    out_base+48(FP), R8
-	MOVQ    (R8), R8
-	MOVQ    start+72(FP), R9
-
-	// Add start offset to output
-	ADDQ R9, R8
-
-	// Add start offset to input
-	ADDQ         R9, DX
-	ADDQ         R9, BX
-	ADDQ         R9, SI
-	ADDQ         R9, DI
-	ADDQ         R9, CX
-	MOVQ         $0x0000000f, R9
-	MOVQ         R9, X11
-	VPBROADCASTB X11, Y11
-
-mulAvxTwo_5x1_loop:
-	// Clear 1 outputs
-	VPXOR Y10, Y10, Y10
-
-	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y12
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y0, Y12
-	VPSHUFB Y13, Y1, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
-
-	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y12
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y2, Y12
-	VPSHUFB Y13, Y3, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
-
-	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y12
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y4, Y12
-	VPSHUFB Y13, Y5, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
-
-	// Load and process 32 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y12
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y6, Y12
-	VPSHUFB Y13, Y7, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
-
-	// Load and process 32 bytes from input 4 to 1 outputs
-	VMOVDQU (CX), Y12
-	ADDQ    $0x20, CX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y11, Y12, Y12
-	VPAND   Y11, Y13, Y13
-	VPSHUFB Y12, Y8, Y12
-	VPSHUFB Y13, Y9, Y13
-	VPXOR   Y12, Y13, Y12
-	VPXOR   Y12, Y10, Y10
-
-	// Store 1 outputs
-	VMOVDQU Y10, (R8)
-	ADDQ    $0x20, R8
-
-	// Prepare for next loop
-	DECQ AX
-	JNZ  mulAvxTwo_5x1_loop
-	VZEROUPPER
-
-mulAvxTwo_5x1_end:
-	RET
-
-// func mulAvxTwo_5x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_4x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x1_64(SB), $0-88
+TEXT ·mulAvxTwo_4x10Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 14 YMM used
+	// Full registers estimated 95 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), AX
-	MOVQ  out_base+48(FP), R8
+	JZ    mulAvxTwo_4x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), DX
 	MOVQ  out_base+48(FP), R8
 	MOVQ  start+72(FP), R9
 
 	// Add start offset to input
-	ADDQ         R9, DX
 	ADDQ         R9, BX
 	ADDQ         R9, SI
 	ADDQ         R9, DI
-	ADDQ         R9, AX
+	ADDQ         R9, DX
 	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R10
-	SHRQ         $0x06, R10
+	MOVQ         R10, X10
+	VPBROADCASTB X10, Y10
 
-mulAvxTwo_5x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+mulAvxTwo_4x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	MOVQ    (R8), R10
+	VMOVDQU (R10)(R9*1), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	MOVQ    24(R8), R10
+	VMOVDQU (R10)(R9*1), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	MOVQ    48(R8), R10
+	VMOVDQU (R10)(R9*1), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	MOVQ    72(R8), R10
+	VMOVDQU (R10)(R9*1), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	MOVQ    96(R8), R10
+	VMOVDQU (R10)(R9*1), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	MOVQ    120(R8), R10
+	VMOVDQU (R10)(R9*1), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	MOVQ    144(R8), R10
+	VMOVDQU (R10)(R9*1), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	MOVQ    168(R8), R10
+	VMOVDQU (R10)(R9*1), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	MOVQ    192(R8), R10
+	VMOVDQU (R10)(R9*1), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	MOVQ    216(R8), R10
+	VMOVDQU (R10)(R9*1), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
 
-	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
+
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
+
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
+
+	// Store 10 outputs
+	MOVQ    (R8), R10
+	VMOVDQU Y0, (R10)(R9*1)
+	MOVQ    24(R8), R10
+	VMOVDQU Y1, (R10)(R9*1)
+	MOVQ    48(R8), R10
+	VMOVDQU Y2, (R10)(R9*1)
+	MOVQ    72(R8), R10
+	VMOVDQU Y3, (R10)(R9*1)
+	MOVQ    96(R8), R10
+	VMOVDQU Y4, (R10)(R9*1)
+	MOVQ    120(R8), R10
+	VMOVDQU Y5, (R10)(R9*1)
+	MOVQ    144(R8), R10
+	VMOVDQU Y6, (R10)(R9*1)
+	MOVQ    168(R8), R10
+	VMOVDQU Y7, (R10)(R9*1)
+	MOVQ    192(R8), R10
+	VMOVDQU Y8, (R10)(R9*1)
+	MOVQ    216(R8), R10
+	VMOVDQU Y9, (R10)(R9*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R9
+	DECQ AX
+	JNZ  mulAvxTwo_4x10Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_4x10Xor_end:
+	RET
+
+// func mulAvxTwo_5x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x1(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_5x1_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), DI
+	MOVQ    96(CX), CX
+	MOVQ    out_base+48(FP), R8
+	MOVQ    (R8), R8
+	MOVQ    start+72(FP), R9
+
+	// Add start offset to output
+	ADDQ R9, R8
+
+	// Add start offset to input
+	ADDQ         R9, DX
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DI
+	ADDQ         R9, CX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X11
+	VPBROADCASTB X11, Y11
+
+mulAvxTwo_5x1_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y0, Y12
+	VPSHUFB Y13, Y1, Y13
+	VPXOR   Y12, Y13, Y10
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y2, Y12
+	VPSHUFB Y13, Y3, Y13
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y12, Y10, Y10
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y4, Y12
+	VPSHUFB Y13, Y5, Y13
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y12, Y10, Y10
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y6, Y12
+	VPSHUFB Y13, Y7, Y13
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y12, Y10, Y10
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (CX), Y12
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y8, Y12
+	VPSHUFB Y13, Y9, Y13
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y12, Y10, Y10
+
+	// Store 1 outputs
+	VMOVDQU Y10, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x1_loop
+	VZEROUPPER
+
+mulAvxTwo_5x1_end:
+	RET
+
+// func mulAvxTwo_5x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x1_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x1_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R9
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_5x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
 	VPAND   Y2, Y7, Y7
 	VPAND   Y2, Y8, Y8
 	VMOVDQU (CX), Y3
@@ -8413,15 +16006,13 @@ mulAvxTwo_5x1_64_loop:
 	VPSHUFB Y6, Y3, Y3
 	VPSHUFB Y8, Y4, Y6
 	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
 
 	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -8440,9 +16031,9 @@ mulAvxTwo_5x1_64_loop:
 	VPXOR   Y5, Y1, Y1
 
 	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y6
-	VMOVDQU 32(SI), Y5
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -8461,9 +16052,9 @@ mulAvxTwo_5x1_64_loop:
 	VPXOR   Y5, Y1, Y1
 
 	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y6
-	VMOVDQU 32(DI), Y5
-	ADDQ    $0x40, DI
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -8482,9 +16073,9 @@ mulAvxTwo_5x1_64_loop:
 	VPXOR   Y5, Y1, Y1
 
 	// Load and process 64 bytes from input 4 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y6, Y7
 	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
@@ -8503,83 +16094,347 @@ mulAvxTwo_5x1_64_loop:
 	VPXOR   Y5, Y1, Y1
 
 	// Store 1 outputs
-	MOVQ    (R8), R11
-	VMOVDQU Y0, (R11)(R9*1)
-	VMOVDQU Y1, 32(R11)(R9*1)
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
 
 	// Prepare for next loop
-	ADDQ $0x40, R9
-	DECQ R10
+	DECQ AX
 	JNZ  mulAvxTwo_5x1_64_loop
 	VZEROUPPER
 
 mulAvxTwo_5x1_64_end:
 	RET
 
-// func mulAvxTwo_5x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_5x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x2(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+TEXT ·mulAvxTwo_5x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 27 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_5x2_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), DX
-	MOVQ  out_base+48(FP), R9
-	MOVQ  (R9), R10
-	MOVQ  24(R9), R9
-	MOVQ  start+72(FP), R11
+	// Full registers estimated 14 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_5x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), DI
+	MOVQ    96(CX), CX
+	MOVQ    out_base+48(FP), R8
+	MOVQ    (R8), R8
+	MOVQ    start+72(FP), R9
 
 	// Add start offset to output
-	ADDQ R11, R10
-	ADDQ R11, R9
+	ADDQ R9, R8
 
 	// Add start offset to input
-	ADDQ         R11, BX
-	ADDQ         R11, SI
-	ADDQ         R11, DI
-	ADDQ         R11, R8
-	ADDQ         R11, DX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X2
-	VPBROADCASTB X2, Y2
+	ADDQ         R9, DX
+	ADDQ         R9, BX
+	ADDQ         R9, SI
+	ADDQ         R9, DI
+	ADDQ         R9, CX
+	MOVQ         $0x0000000f, R9
+	MOVQ         R9, X11
+	VPBROADCASTB X11, Y11
 
-mulAvxTwo_5x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+mulAvxTwo_5x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VMOVDQU (R8), Y10
+	VPSHUFB Y12, Y0, Y12
+	VPSHUFB Y13, Y1, Y13
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y12, Y10, Y10
 
-	// Load and process 32 bytes from input 0 to 2 outputs
-	VMOVDQU (BX), Y5
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y12
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 1 to 2 outputs
-	VMOVDQU (SI), Y5
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y5, Y6
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y2, Y12
+	VPSHUFB Y13, Y3, Y13
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y12, Y10, Y10
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y4, Y12
+	VPSHUFB Y13, Y5, Y13
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y12, Y10, Y10
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y6, Y12
+	VPSHUFB Y13, Y7, Y13
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y12, Y10, Y10
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (CX), Y12
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y11, Y12, Y12
+	VPAND   Y11, Y13, Y13
+	VPSHUFB Y12, Y8, Y12
+	VPSHUFB Y13, Y9, Y13
+	VPXOR   Y12, Y13, Y12
+	VPXOR   Y12, Y10, Y10
+
+	// Store 1 outputs
+	VMOVDQU Y10, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x1Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x1Xor_end:
+	RET
+
+// func mulAvxTwo_5x1_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x1_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 26 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x1_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R9
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_5x1_64Xor_loop:
+	// Load 1 outputs
+	VMOVDQU (R9), Y0
+	VMOVDQU 32(R9), Y1
+
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Store 1 outputs
+	VMOVDQU Y0, (R9)
+	VMOVDQU Y1, 32(R9)
+	ADDQ    $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x1_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x1_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x2(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 27 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x2_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R9
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R10
+	ADDQ R11, R9
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_5x2_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y1
+
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y5, Y6
 	VPAND   Y2, Y5, Y5
 	VPAND   Y2, Y6, Y6
 	VMOVDQU 128(CX), Y3
@@ -8670,46 +16525,44 @@ mulAvxTwo_5x2_end:
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_5x2_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 27 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 49 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_5x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), AX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  out_base+48(FP), R8
-	MOVQ  start+72(FP), R9
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R9
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R10
+	ADDQ R11, R9
 
 	// Add start offset to input
-	ADDQ         R9, DX
-	ADDQ         R9, BX
-	ADDQ         R9, SI
-	ADDQ         R9, DI
-	ADDQ         R9, AX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X4
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X4
 	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R10
-	SHRQ         $0x06, R10
 
 mulAvxTwo_5x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-
 	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -8722,25 +16575,21 @@ mulAvxTwo_5x2_64_loop:
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y5, Y6, Y0
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y11, Y5, Y7
 	VPSHUFB Y9, Y5, Y5
 	VPSHUFB Y12, Y6, Y8
 	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y5, Y6, Y2
+	VPXOR   Y7, Y8, Y3
 
 	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -8769,9 +16618,9 @@ mulAvxTwo_5x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -8800,9 +16649,9 @@ mulAvxTwo_5x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Load and process 64 bytes from input 3 to 2 outputs
-	VMOVDQU (DI), Y9
-	VMOVDQU 32(DI), Y11
-	ADDQ    $0x40, DI
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -8831,9 +16680,9 @@ mulAvxTwo_5x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Load and process 64 bytes from input 4 to 2 outputs
-	VMOVDQU (AX), Y9
-	VMOVDQU 32(AX), Y11
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y9, Y10
 	VPSRLQ  $0x04, Y11, Y12
 	VPAND   Y4, Y9, Y9
@@ -8862,33 +16711,32 @@ mulAvxTwo_5x2_64_loop:
 	VPXOR   Y7, Y3, Y3
 
 	// Store 2 outputs
-	MOVQ    (R8), R11
-	VMOVDQU Y0, (R11)(R9*1)
-	VMOVDQU Y1, 32(R11)(R9*1)
-	MOVQ    24(R8), R11
-	VMOVDQU Y2, (R11)(R9*1)
-	VMOVDQU Y3, 32(R11)(R9*1)
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y2, (R9)
+	VMOVDQU Y3, 32(R9)
+	ADDQ    $0x40, R9
 
 	// Prepare for next loop
-	ADDQ $0x40, R9
-	DECQ R10
+	DECQ AX
 	JNZ  mulAvxTwo_5x2_64_loop
 	VZEROUPPER
 
 mulAvxTwo_5x2_64_end:
 	RET
 
-// func mulAvxTwo_5x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_5x2Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x3(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_5x2Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 38 YMM used
+	// Full registers estimated 27 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x3_end
+	JZ    mulAvxTwo_5x2Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -8897,129 +16745,484 @@ TEXT ·mulAvxTwo_5x3(SB), NOSPLIT, $0-88
 	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
 	MOVQ  (R9), R10
-	MOVQ  24(R9), R11
-	MOVQ  48(R9), R9
-	MOVQ  start+72(FP), R12
+	MOVQ  24(R9), R9
+	MOVQ  start+72(FP), R11
 
 	// Add start offset to output
-	ADDQ R12, R10
-	ADDQ R12, R11
-	ADDQ R12, R9
+	ADDQ R11, R10
+	ADDQ R11, R9
 
 	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X3
-	VPBROADCASTB X3, Y3
-
-mulAvxTwo_5x3_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X2
+	VPBROADCASTB X2, Y2
 
-	// Load and process 32 bytes from input 0 to 3 outputs
-	VMOVDQU (BX), Y6
+mulAvxTwo_5x2Xor_loop:
+	// Load and process 32 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y5
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU (CX), Y4
-	VMOVDQU 32(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 64(CX), Y4
-	VMOVDQU 96(CX), Y5
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y3
+	VMOVDQU 32(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU (R9), Y1
+	VMOVDQU 64(CX), Y3
+	VMOVDQU 96(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
 
-	// Load and process 32 bytes from input 1 to 3 outputs
-	VMOVDQU (SI), Y6
+	// Load and process 32 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y5
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 192(CX), Y4
-	VMOVDQU 224(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 256(CX), Y4
-	VMOVDQU 288(CX), Y5
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 128(CX), Y3
+	VMOVDQU 160(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 320(CX), Y4
-	VMOVDQU 352(CX), Y5
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 192(CX), Y3
+	VMOVDQU 224(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
 
-	// Load and process 32 bytes from input 2 to 3 outputs
-	VMOVDQU (DI), Y6
+	// Load and process 32 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y5
 	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 384(CX), Y4
-	VMOVDQU 416(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 448(CX), Y4
-	VMOVDQU 480(CX), Y5
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 256(CX), Y3
+	VMOVDQU 288(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 512(CX), Y4
-	VMOVDQU 544(CX), Y5
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y2, Y2
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
 
-	// Load and process 32 bytes from input 3 to 3 outputs
-	VMOVDQU (R8), Y6
+	// Load and process 32 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y5
 	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y6, Y7
-	VPAND   Y3, Y6, Y6
-	VPAND   Y3, Y7, Y7
-	VMOVDQU 576(CX), Y4
-	VMOVDQU 608(CX), Y5
-	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y0, Y0
-	VMOVDQU 640(CX), Y4
-	VMOVDQU 672(CX), Y5
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 384(CX), Y3
+	VMOVDQU 416(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
-	VPXOR   Y4, Y1, Y1
-	VMOVDQU 704(CX), Y4
-	VMOVDQU 736(CX), Y5
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 448(CX), Y3
+	VMOVDQU 480(CX), Y4
+	VPSHUFB Y5, Y3, Y3
 	VPSHUFB Y6, Y4, Y4
-	VPSHUFB Y7, Y5, Y5
-	VPXOR   Y4, Y5, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
+
+	// Load and process 32 bytes from input 4 to 2 outputs
+	VMOVDQU (DX), Y5
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y5, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y6, Y6
+	VMOVDQU 512(CX), Y3
+	VMOVDQU 544(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y0, Y0
+	VMOVDQU 576(CX), Y3
+	VMOVDQU 608(CX), Y4
+	VPSHUFB Y5, Y3, Y3
+	VPSHUFB Y6, Y4, Y4
+	VPXOR   Y3, Y4, Y3
+	VPXOR   Y3, Y1, Y1
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x2Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x2Xor_end:
+	RET
+
+// func mulAvxTwo_5x2_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x2_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 49 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x2_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R9
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R10
+	ADDQ R11, R9
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_5x2_64Xor_loop:
+	// Load 2 outputs
+	VMOVDQU (R10), Y0
+	VMOVDQU 32(R10), Y1
+	VMOVDQU (R9), Y2
+	VMOVDQU 32(R9), Y3
+
+	// Load and process 64 bytes from input 0 to 2 outputs
+	VMOVDQU (BX), Y9
+	VMOVDQU 32(BX), Y11
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Load and process 64 bytes from input 1 to 2 outputs
+	VMOVDQU (SI), Y9
+	VMOVDQU 32(SI), Y11
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Load and process 64 bytes from input 2 to 2 outputs
+	VMOVDQU (DI), Y9
+	VMOVDQU 32(DI), Y11
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Load and process 64 bytes from input 3 to 2 outputs
+	VMOVDQU (R8), Y9
+	VMOVDQU 32(R8), Y11
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Load and process 64 bytes from input 4 to 2 outputs
+	VMOVDQU (DX), Y9
+	VMOVDQU 32(DX), Y11
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y4, Y9, Y9
+	VPAND   Y4, Y11, Y11
+	VPAND   Y4, Y10, Y10
+	VPAND   Y4, Y12, Y12
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y0, Y0
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y11, Y5, Y7
+	VPSHUFB Y9, Y5, Y5
+	VPSHUFB Y12, Y6, Y8
+	VPSHUFB Y10, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y5, Y2, Y2
+	VPXOR   Y7, Y3, Y3
+
+	// Store 2 outputs
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y2, (R9)
+	VMOVDQU Y3, 32(R9)
+	ADDQ    $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x2_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x2_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x3(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 38 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x3_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R9
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X3
+	VPBROADCASTB X3, Y3
+
+mulAvxTwo_5x3_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y0
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y1
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y2
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
 	VPXOR   Y4, Y2, Y2
 
 	// Load and process 32 bytes from input 4 to 3 outputs
@@ -9067,48 +17270,46 @@ mulAvxTwo_5x3_end:
 // Requires: AVX, AVX2, SSE2
 TEXT ·mulAvxTwo_5x3_64(SB), $0-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 38 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 70 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x06, AX
 	TESTQ AX, AX
 	JZ    mulAvxTwo_5x3_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), AX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  out_base+48(FP), R8
-	MOVQ  start+72(FP), R9
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R9
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
 
 	// Add start offset to input
-	ADDQ         R9, DX
-	ADDQ         R9, BX
-	ADDQ         R9, SI
-	ADDQ         R9, DI
-	ADDQ         R9, AX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X6
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X6
 	VPBROADCASTB X6, Y6
-	MOVQ         n+80(FP), R10
-	SHRQ         $0x06, R10
 
 mulAvxTwo_5x3_64_loop:
-	// Clear 3 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 64 bytes from input 0 to 3 outputs
-	VMOVDQU (DX), Y11
-	VMOVDQU 32(DX), Y13
-	ADDQ    $0x40, DX
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -9121,35 +17322,29 @@ mulAvxTwo_5x3_64_loop:
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y0, Y0
-	VPXOR   Y9, Y1, Y1
+	VPXOR   Y7, Y8, Y0
+	VPXOR   Y9, Y10, Y1
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y2, Y2
-	VPXOR   Y9, Y3, Y3
+	VPXOR   Y7, Y8, Y2
+	VPXOR   Y9, Y10, Y3
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y13, Y7, Y9
 	VPSHUFB Y11, Y7, Y7
 	VPSHUFB Y14, Y8, Y10
 	VPSHUFB Y12, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y7, Y4, Y4
-	VPXOR   Y9, Y5, Y5
+	VPXOR   Y7, Y8, Y4
+	VPXOR   Y9, Y10, Y5
 
 	// Load and process 64 bytes from input 1 to 3 outputs
-	VMOVDQU (BX), Y11
-	VMOVDQU 32(BX), Y13
-	ADDQ    $0x40, BX
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -9188,9 +17383,9 @@ mulAvxTwo_5x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Load and process 64 bytes from input 2 to 3 outputs
-	VMOVDQU (SI), Y11
-	VMOVDQU 32(SI), Y13
-	ADDQ    $0x40, SI
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -9229,9 +17424,9 @@ mulAvxTwo_5x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Load and process 64 bytes from input 3 to 3 outputs
-	VMOVDQU (DI), Y11
-	VMOVDQU 32(DI), Y13
-	ADDQ    $0x40, DI
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -9270,9 +17465,9 @@ mulAvxTwo_5x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Load and process 64 bytes from input 4 to 3 outputs
-	VMOVDQU (AX), Y11
-	VMOVDQU 32(AX), Y13
-	ADDQ    $0x40, AX
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
 	VPSRLQ  $0x04, Y11, Y12
 	VPSRLQ  $0x04, Y13, Y14
 	VPAND   Y6, Y11, Y11
@@ -9311,36 +17506,35 @@ mulAvxTwo_5x3_64_loop:
 	VPXOR   Y9, Y5, Y5
 
 	// Store 3 outputs
-	MOVQ    (R8), R11
-	VMOVDQU Y0, (R11)(R9*1)
-	VMOVDQU Y1, 32(R11)(R9*1)
-	MOVQ    24(R8), R11
-	VMOVDQU Y2, (R11)(R9*1)
-	VMOVDQU Y3, 32(R11)(R9*1)
-	MOVQ    48(R8), R11
-	VMOVDQU Y4, (R11)(R9*1)
-	VMOVDQU Y5, 32(R11)(R9*1)
-
-	// Prepare for next loop
-	ADDQ $0x40, R9
-	DECQ R10
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y2, (R11)
+	VMOVDQU Y3, 32(R11)
+	ADDQ    $0x40, R11
+	VMOVDQU Y4, (R9)
+	VMOVDQU Y5, 32(R9)
+	ADDQ    $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
 	JNZ  mulAvxTwo_5x3_64_loop
 	VZEROUPPER
 
 mulAvxTwo_5x3_64_end:
 	RET
 
-// func mulAvxTwo_5x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_5x3Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x4(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_5x3Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 49 YMM used
+	// Full registers estimated 38 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x4_end
+	JZ    mulAvxTwo_5x3Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -9350,63 +17544,508 @@ TEXT ·mulAvxTwo_5x4(SB), NOSPLIT, $0-88
 	MOVQ  out_base+48(FP), R9
 	MOVQ  (R9), R10
 	MOVQ  24(R9), R11
-	MOVQ  48(R9), R12
-	MOVQ  72(R9), R9
-	MOVQ  start+72(FP), R13
+	MOVQ  48(R9), R9
+	MOVQ  start+72(FP), R12
 
 	// Add start offset to output
-	ADDQ R13, R10
-	ADDQ R13, R11
-	ADDQ R13, R12
-	ADDQ R13, R9
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
 
 	// Add start offset to input
-	ADDQ         R13, BX
-	ADDQ         R13, SI
-	ADDQ         R13, DI
-	ADDQ         R13, R8
-	ADDQ         R13, DX
-	MOVQ         $0x0000000f, R13
-	MOVQ         R13, X4
-	VPBROADCASTB X4, Y4
-
-mulAvxTwo_5x4_loop:
-	// Clear 4 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X3
+	VPBROADCASTB X3, Y3
 
-	// Load and process 32 bytes from input 0 to 4 outputs
-	VMOVDQU (BX), Y7
+mulAvxTwo_5x3Xor_loop:
+	// Load and process 32 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y6
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y7, Y8
-	VPAND   Y4, Y7, Y7
-	VPAND   Y4, Y8, Y8
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y4
+	VMOVDQU 32(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y4
+	VMOVDQU 96(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU (R9), Y2
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y6
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 192(CX), Y4
+	VMOVDQU 224(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 256(CX), Y4
+	VMOVDQU 288(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 320(CX), Y4
+	VMOVDQU 352(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y6
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 384(CX), Y4
+	VMOVDQU 416(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 448(CX), Y4
+	VMOVDQU 480(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 512(CX), Y4
+	VMOVDQU 544(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y6
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 576(CX), Y4
+	VMOVDQU 608(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 640(CX), Y4
+	VMOVDQU 672(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 704(CX), Y4
+	VMOVDQU 736(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Load and process 32 bytes from input 4 to 3 outputs
+	VMOVDQU (DX), Y6
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPAND   Y3, Y6, Y6
+	VPAND   Y3, Y7, Y7
+	VMOVDQU 768(CX), Y4
+	VMOVDQU 800(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y0, Y0
+	VMOVDQU 832(CX), Y4
+	VMOVDQU 864(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y1, Y1
+	VMOVDQU 896(CX), Y4
+	VMOVDQU 928(CX), Y5
+	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y7, Y5, Y5
+	VPXOR   Y4, Y5, Y4
+	VPXOR   Y4, Y2, Y2
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x3Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x3Xor_end:
+	RET
+
+// func mulAvxTwo_5x3_64Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x3_64Xor(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 70 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x3_64Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R9
+	MOVQ  start+72(FP), R12
+
+	// Add start offset to output
+	ADDQ R12, R10
+	ADDQ R12, R11
+	ADDQ R12, R9
+
+	// Add start offset to input
+	ADDQ         R12, BX
+	ADDQ         R12, SI
+	ADDQ         R12, DI
+	ADDQ         R12, R8
+	ADDQ         R12, DX
+	MOVQ         $0x0000000f, R12
+	MOVQ         R12, X6
+	VPBROADCASTB X6, Y6
+
+mulAvxTwo_5x3_64Xor_loop:
+	// Load 3 outputs
+	VMOVDQU (R10), Y0
+	VMOVDQU 32(R10), Y1
+	VMOVDQU (R11), Y2
+	VMOVDQU 32(R11), Y3
+	VMOVDQU (R9), Y4
+	VMOVDQU 32(R9), Y5
+
+	// Load and process 64 bytes from input 0 to 3 outputs
+	VMOVDQU (BX), Y11
+	VMOVDQU 32(BX), Y13
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 1 to 3 outputs
+	VMOVDQU (SI), Y11
+	VMOVDQU 32(SI), Y13
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 2 to 3 outputs
+	VMOVDQU (DI), Y11
+	VMOVDQU 32(DI), Y13
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 3 to 3 outputs
+	VMOVDQU (R8), Y11
+	VMOVDQU 32(R8), Y13
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Load and process 64 bytes from input 4 to 3 outputs
+	VMOVDQU (DX), Y11
+	VMOVDQU 32(DX), Y13
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y6, Y11, Y11
+	VPAND   Y6, Y13, Y13
+	VPAND   Y6, Y12, Y12
+	VPAND   Y6, Y14, Y14
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y0, Y0
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y2, Y2
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y13, Y7, Y9
+	VPSHUFB Y11, Y7, Y7
+	VPSHUFB Y14, Y8, Y10
+	VPSHUFB Y12, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y7, Y4, Y4
+	VPXOR   Y9, Y5, Y5
+
+	// Store 3 outputs
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
+	VMOVDQU Y2, (R11)
+	VMOVDQU Y3, 32(R11)
+	ADDQ    $0x40, R11
+	VMOVDQU Y4, (R9)
+	VMOVDQU Y5, 32(R9)
+	ADDQ    $0x40, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x3_64Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x3_64Xor_end:
+	RET
+
+// func mulAvxTwo_5x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x4(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 49 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x4_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R9
+	MOVQ  start+72(FP), R13
+
+	// Add start offset to output
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R9
+
+	// Add start offset to input
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X4
+	VPBROADCASTB X4, Y4
+
+mulAvxTwo_5x4_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y0, Y0
+	VPXOR   Y5, Y6, Y0
 	VMOVDQU 64(CX), Y5
 	VMOVDQU 96(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y1, Y1
+	VPXOR   Y5, Y6, Y1
 	VMOVDQU 128(CX), Y5
 	VMOVDQU 160(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y2, Y2
+	VPXOR   Y5, Y6, Y2
 	VMOVDQU 192(CX), Y5
 	VMOVDQU 224(CX), Y6
 	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y5, Y3, Y3
+	VPXOR   Y5, Y6, Y3
 
 	// Load and process 32 bytes from input 1 to 4 outputs
 	VMOVDQU (SI), Y7
@@ -9550,17 +18189,17 @@ mulAvxTwo_5x4_loop:
 mulAvxTwo_5x4_end:
 	RET
 
-// func mulAvxTwo_5x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_5x4Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x5(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_5x4Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 60 YMM used
+	// Full registers estimated 49 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x5_end
+	JZ    mulAvxTwo_5x4Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -9571,65 +18210,524 @@ TEXT ·mulAvxTwo_5x5(SB), NOSPLIT, $0-88
 	MOVQ  (R9), R10
 	MOVQ  24(R9), R11
 	MOVQ  48(R9), R12
-	MOVQ  72(R9), R13
-	MOVQ  96(R9), R9
-	MOVQ  start+72(FP), R14
+	MOVQ  72(R9), R9
+	MOVQ  start+72(FP), R13
 
 	// Add start offset to output
-	ADDQ R14, R10
-	ADDQ R14, R11
-	ADDQ R14, R12
-	ADDQ R14, R13
-	ADDQ R14, R9
+	ADDQ R13, R10
+	ADDQ R13, R11
+	ADDQ R13, R12
+	ADDQ R13, R9
 
 	// Add start offset to input
-	ADDQ         R14, BX
-	ADDQ         R14, SI
-	ADDQ         R14, DI
-	ADDQ         R14, R8
-	ADDQ         R14, DX
-	MOVQ         $0x0000000f, R14
-	MOVQ         R14, X5
-	VPBROADCASTB X5, Y5
-
-mulAvxTwo_5x5_loop:
-	// Clear 5 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
+	ADDQ         R13, BX
+	ADDQ         R13, SI
+	ADDQ         R13, DI
+	ADDQ         R13, R8
+	ADDQ         R13, DX
+	MOVQ         $0x0000000f, R13
+	MOVQ         R13, X4
+	VPBROADCASTB X4, Y4
 
-	// Load and process 32 bytes from input 0 to 5 outputs
-	VMOVDQU (BX), Y8
+mulAvxTwo_5x4Xor_loop:
+	// Load and process 32 bytes from input 0 to 4 outputs
+	VMOVDQU (BX), Y7
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y8, Y9
-	VPAND   Y5, Y8, Y8
-	VPAND   Y5, Y9, Y9
-	VMOVDQU (CX), Y6
-	VMOVDQU 32(CX), Y7
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y5
+	VMOVDQU 32(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y0, Y0
-	VMOVDQU 64(CX), Y6
-	VMOVDQU 96(CX), Y7
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y5
+	VMOVDQU 96(CX), Y6
+	VPSHUFB Y7, Y5, Y5
 	VPSHUFB Y8, Y6, Y6
-	VPSHUFB Y9, Y7, Y7
-	VPXOR   Y6, Y7, Y6
-	VPXOR   Y6, Y1, Y1
-	VMOVDQU 128(CX), Y6
-	VMOVDQU 160(CX), Y7
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y5
+	VMOVDQU 160(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU (R9), Y3
+	VMOVDQU 192(CX), Y5
+	VMOVDQU 224(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 1 to 4 outputs
+	VMOVDQU (SI), Y7
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 256(CX), Y5
+	VMOVDQU 288(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 320(CX), Y5
+	VMOVDQU 352(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 384(CX), Y5
+	VMOVDQU 416(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 448(CX), Y5
+	VMOVDQU 480(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 2 to 4 outputs
+	VMOVDQU (DI), Y7
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 512(CX), Y5
+	VMOVDQU 544(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 576(CX), Y5
+	VMOVDQU 608(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 640(CX), Y5
+	VMOVDQU 672(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 704(CX), Y5
+	VMOVDQU 736(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 3 to 4 outputs
+	VMOVDQU (R8), Y7
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 768(CX), Y5
+	VMOVDQU 800(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 832(CX), Y5
+	VMOVDQU 864(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 896(CX), Y5
+	VMOVDQU 928(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 960(CX), Y5
+	VMOVDQU 992(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Load and process 32 bytes from input 4 to 4 outputs
+	VMOVDQU (DX), Y7
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y7, Y8
+	VPAND   Y4, Y7, Y7
+	VPAND   Y4, Y8, Y8
+	VMOVDQU 1024(CX), Y5
+	VMOVDQU 1056(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y0, Y0
+	VMOVDQU 1088(CX), Y5
+	VMOVDQU 1120(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y1, Y1
+	VMOVDQU 1152(CX), Y5
+	VMOVDQU 1184(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y2, Y2
+	VMOVDQU 1216(CX), Y5
+	VMOVDQU 1248(CX), Y6
+	VPSHUFB Y7, Y5, Y5
+	VPSHUFB Y8, Y6, Y6
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y5, Y3, Y3
+
+	// Store 4 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x4Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x4Xor_end:
+	RET
+
+// func mulAvxTwo_5x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x5(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 60 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x5_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R9
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R9
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_5x5_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y0
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y1
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y2
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y3
+	VMOVDQU 256(CX), Y6
+	VMOVDQU 288(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y4
+
+	// Load and process 32 bytes from input 1 to 5 outputs
+	VMOVDQU (SI), Y8
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 320(CX), Y6
+	VMOVDQU 352(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 384(CX), Y6
+	VMOVDQU 416(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 448(CX), Y6
+	VMOVDQU 480(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 512(CX), Y6
+	VMOVDQU 544(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 576(CX), Y6
+	VMOVDQU 608(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Load and process 32 bytes from input 2 to 5 outputs
+	VMOVDQU (DI), Y8
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 640(CX), Y6
+	VMOVDQU 672(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 704(CX), Y6
+	VMOVDQU 736(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 768(CX), Y6
+	VMOVDQU 800(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 832(CX), Y6
+	VMOVDQU 864(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 896(CX), Y6
+	VMOVDQU 928(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Load and process 32 bytes from input 3 to 5 outputs
+	VMOVDQU (R8), Y8
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 960(CX), Y6
+	VMOVDQU 992(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 1024(CX), Y6
+	VMOVDQU 1056(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 1088(CX), Y6
+	VMOVDQU 1120(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 1152(CX), Y6
+	VMOVDQU 1184(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 1216(CX), Y6
+	VMOVDQU 1248(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Load and process 32 bytes from input 4 to 5 outputs
+	VMOVDQU (DX), Y8
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU 1280(CX), Y6
+	VMOVDQU 1312(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU 1344(CX), Y6
+	VMOVDQU 1376(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU 1408(CX), Y6
+	VMOVDQU 1440(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y2, Y2
+	VMOVDQU 1472(CX), Y6
+	VMOVDQU 1504(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y3, Y3
+	VMOVDQU 1536(CX), Y6
+	VMOVDQU 1568(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y4, Y4
+
+	// Store 5 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y4, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x5_loop
+	VZEROUPPER
+
+mulAvxTwo_5x5_end:
+	RET
+
+// func mulAvxTwo_5x5Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x5Xor(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 60 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x5Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R9
+	MOVQ  start+72(FP), R14
+
+	// Add start offset to output
+	ADDQ R14, R10
+	ADDQ R14, R11
+	ADDQ R14, R12
+	ADDQ R14, R13
+	ADDQ R14, R9
+
+	// Add start offset to input
+	ADDQ         R14, BX
+	ADDQ         R14, SI
+	ADDQ         R14, DI
+	ADDQ         R14, R8
+	ADDQ         R14, DX
+	MOVQ         $0x0000000f, R14
+	MOVQ         R14, X5
+	VPBROADCASTB X5, Y5
+
+mulAvxTwo_5x5Xor_loop:
+	// Load and process 32 bytes from input 0 to 5 outputs
+	VMOVDQU (BX), Y8
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y8, Y9
+	VPAND   Y5, Y8, Y8
+	VPAND   Y5, Y9, Y9
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y6
+	VMOVDQU 32(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y0, Y0
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y6
+	VMOVDQU 96(CX), Y7
+	VPSHUFB Y8, Y6, Y6
+	VPSHUFB Y9, Y7, Y7
+	VPXOR   Y6, Y7, Y6
+	VPXOR   Y6, Y1, Y1
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y6
+	VMOVDQU 160(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
 	VPXOR   Y6, Y7, Y6
 	VPXOR   Y6, Y2, Y2
+	VMOVDQU (R13), Y3
 	VMOVDQU 192(CX), Y6
 	VMOVDQU 224(CX), Y7
 	VPSHUFB Y8, Y6, Y6
 	VPSHUFB Y9, Y7, Y7
 	VPXOR   Y6, Y7, Y6
 	VPXOR   Y6, Y3, Y3
+	VMOVDQU (R9), Y4
 	VMOVDQU 256(CX), Y6
 	VMOVDQU 288(CX), Y7
 	VPSHUFB Y8, Y6, Y6
@@ -9799,10 +18897,10 @@ mulAvxTwo_5x5_loop:
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_5x5_loop
+	JNZ  mulAvxTwo_5x5Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_5x5_end:
+mulAvxTwo_5x5Xor_end:
 	RET
 
 // func mulAvxTwo_5x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
@@ -9850,14 +18948,6 @@ TEXT ·mulAvxTwo_5x6(SB), NOSPLIT, $0-88
 	VPBROADCASTB X6, Y6
 
 mulAvxTwo_5x6_loop:
-	// Clear 6 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-
 	// Load and process 32 bytes from input 0 to 6 outputs
 	VMOVDQU (BX), Y9
 	ADDQ    $0x20, BX
@@ -9868,38 +18958,32 @@ mulAvxTwo_5x6_loop:
 	VMOVDQU 32(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y0, Y0
+	VPXOR   Y7, Y8, Y0
 	VMOVDQU 64(CX), Y7
 	VMOVDQU 96(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y1, Y1
+	VPXOR   Y7, Y8, Y1
 	VMOVDQU 128(CX), Y7
 	VMOVDQU 160(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y2, Y2
+	VPXOR   Y7, Y8, Y2
 	VMOVDQU 192(CX), Y7
 	VMOVDQU 224(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y3, Y3
+	VPXOR   Y7, Y8, Y3
 	VMOVDQU 256(CX), Y7
 	VMOVDQU 288(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y4, Y4
+	VPXOR   Y7, Y8, Y4
 	VMOVDQU 320(CX), Y7
 	VMOVDQU 352(CX), Y8
 	VPSHUFB Y9, Y7, Y7
 	VPSHUFB Y10, Y8, Y8
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y7, Y5, Y5
+	VPXOR   Y7, Y8, Y5
 
 	// Load and process 32 bytes from input 1 to 6 outputs
 	VMOVDQU (SI), Y9
@@ -10095,17 +19179,17 @@ mulAvxTwo_5x6_loop:
 mulAvxTwo_5x6_end:
 	RET
 
-// func mulAvxTwo_5x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_5x6Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x7(SB), NOSPLIT, $8-88
+TEXT ·mulAvxTwo_5x6Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 82 YMM used
+	// Full registers estimated 71 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x7_end
+	JZ    mulAvxTwo_5x6Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -10118,42 +19202,321 @@ TEXT ·mulAvxTwo_5x7(SB), NOSPLIT, $8-88
 	MOVQ  48(R9), R12
 	MOVQ  72(R9), R13
 	MOVQ  96(R9), R14
-	MOVQ  120(R9), R15
-	MOVQ  144(R9), R9
-	MOVQ  start+72(FP), BP
+	MOVQ  120(R9), R9
+	MOVQ  start+72(FP), R15
 
 	// Add start offset to output
-	ADDQ BP, R10
-	ADDQ BP, R11
-	ADDQ BP, R12
-	ADDQ BP, R13
-	ADDQ BP, R14
-	ADDQ BP, R15
-	ADDQ BP, R9
+	ADDQ R15, R10
+	ADDQ R15, R11
+	ADDQ R15, R12
+	ADDQ R15, R13
+	ADDQ R15, R14
+	ADDQ R15, R9
 
 	// Add start offset to input
-	ADDQ         BP, BX
-	ADDQ         BP, SI
-	ADDQ         BP, DI
-	ADDQ         BP, R8
-	ADDQ         BP, DX
-	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X7
-	VPBROADCASTB X7, Y7
-
-mulAvxTwo_5x7_loop:
-	// Clear 7 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
+	ADDQ         R15, BX
+	ADDQ         R15, SI
+	ADDQ         R15, DI
+	ADDQ         R15, R8
+	ADDQ         R15, DX
+	MOVQ         $0x0000000f, R15
+	MOVQ         R15, X6
+	VPBROADCASTB X6, Y6
 
-	// Load and process 32 bytes from input 0 to 7 outputs
-	VMOVDQU (BX), Y10
-	ADDQ    $0x20, BX
+mulAvxTwo_5x6Xor_loop:
+	// Load and process 32 bytes from input 0 to 6 outputs
+	VMOVDQU (BX), Y9
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y7
+	VMOVDQU 32(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y7
+	VMOVDQU 96(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y7
+	VMOVDQU 160(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU (R13), Y3
+	VMOVDQU 192(CX), Y7
+	VMOVDQU 224(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU (R14), Y4
+	VMOVDQU 256(CX), Y7
+	VMOVDQU 288(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU (R9), Y5
+	VMOVDQU 320(CX), Y7
+	VMOVDQU 352(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Load and process 32 bytes from input 1 to 6 outputs
+	VMOVDQU (SI), Y9
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 384(CX), Y7
+	VMOVDQU 416(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 448(CX), Y7
+	VMOVDQU 480(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 512(CX), Y7
+	VMOVDQU 544(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 576(CX), Y7
+	VMOVDQU 608(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 640(CX), Y7
+	VMOVDQU 672(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 704(CX), Y7
+	VMOVDQU 736(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Load and process 32 bytes from input 2 to 6 outputs
+	VMOVDQU (DI), Y9
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 768(CX), Y7
+	VMOVDQU 800(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 832(CX), Y7
+	VMOVDQU 864(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 896(CX), Y7
+	VMOVDQU 928(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 960(CX), Y7
+	VMOVDQU 992(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 1024(CX), Y7
+	VMOVDQU 1056(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 1088(CX), Y7
+	VMOVDQU 1120(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Load and process 32 bytes from input 3 to 6 outputs
+	VMOVDQU (R8), Y9
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1152(CX), Y7
+	VMOVDQU 1184(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 1216(CX), Y7
+	VMOVDQU 1248(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 1280(CX), Y7
+	VMOVDQU 1312(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 1344(CX), Y7
+	VMOVDQU 1376(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 1408(CX), Y7
+	VMOVDQU 1440(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 1472(CX), Y7
+	VMOVDQU 1504(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Load and process 32 bytes from input 4 to 6 outputs
+	VMOVDQU (DX), Y9
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y9, Y10
+	VPAND   Y6, Y9, Y9
+	VPAND   Y6, Y10, Y10
+	VMOVDQU 1536(CX), Y7
+	VMOVDQU 1568(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y0, Y0
+	VMOVDQU 1600(CX), Y7
+	VMOVDQU 1632(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y1, Y1
+	VMOVDQU 1664(CX), Y7
+	VMOVDQU 1696(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y2, Y2
+	VMOVDQU 1728(CX), Y7
+	VMOVDQU 1760(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y3, Y3
+	VMOVDQU 1792(CX), Y7
+	VMOVDQU 1824(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y4, Y4
+	VMOVDQU 1856(CX), Y7
+	VMOVDQU 1888(CX), Y8
+	VPSHUFB Y9, Y7, Y7
+	VPSHUFB Y10, Y8, Y8
+	VPXOR   Y7, Y8, Y7
+	VPXOR   Y7, Y5, Y5
+
+	// Store 6 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y4, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y5, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x6Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x6Xor_end:
+	RET
+
+// func mulAvxTwo_5x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x7(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 82 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x7_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R14
+	MOVQ  120(R9), R15
+	MOVQ  144(R9), R9
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R9
+
+	// Add start offset to input
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, R8
+	ADDQ         BP, DX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X7
+	VPBROADCASTB X7, Y7
+
+mulAvxTwo_5x7_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
 	VPSRLQ  $0x04, Y10, Y11
 	VPAND   Y7, Y10, Y10
 	VPAND   Y7, Y11, Y11
@@ -10161,44 +19524,37 @@ mulAvxTwo_5x7_loop:
 	VMOVDQU 32(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y0, Y0
+	VPXOR   Y8, Y9, Y0
 	VMOVDQU 64(CX), Y8
 	VMOVDQU 96(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y1, Y1
+	VPXOR   Y8, Y9, Y1
 	VMOVDQU 128(CX), Y8
 	VMOVDQU 160(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y2, Y2
+	VPXOR   Y8, Y9, Y2
 	VMOVDQU 192(CX), Y8
 	VMOVDQU 224(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y3, Y3
+	VPXOR   Y8, Y9, Y3
 	VMOVDQU 256(CX), Y8
 	VMOVDQU 288(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y4, Y4
+	VPXOR   Y8, Y9, Y4
 	VMOVDQU 320(CX), Y8
 	VMOVDQU 352(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y5, Y5
+	VPXOR   Y8, Y9, Y5
 	VMOVDQU 384(CX), Y8
 	VMOVDQU 416(CX), Y9
 	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPXOR   Y8, Y9, Y8
-	VPXOR   Y8, Y6, Y6
+	VPXOR   Y8, Y9, Y6
 
 	// Load and process 32 bytes from input 1 to 7 outputs
 	VMOVDQU (SI), Y10
@@ -10420,168 +19776,473 @@ mulAvxTwo_5x7_loop:
 mulAvxTwo_5x7_end:
 	RET
 
-// func mulAvxTwo_5x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_5x7Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x8(SB), NOSPLIT, $8-88
+TEXT ·mulAvxTwo_5x7Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 93 YMM used
+	// Full registers estimated 82 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x8_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), AX
-	MOVQ  out_base+48(FP), R8
-	MOVQ  (R8), R9
-	MOVQ  24(R8), R10
-	MOVQ  48(R8), R11
-	MOVQ  72(R8), R12
-	MOVQ  96(R8), R13
-	MOVQ  120(R8), R14
-	MOVQ  144(R8), R15
-	MOVQ  168(R8), R8
+	JZ    mulAvxTwo_5x7Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  (R9), R10
+	MOVQ  24(R9), R11
+	MOVQ  48(R9), R12
+	MOVQ  72(R9), R13
+	MOVQ  96(R9), R14
+	MOVQ  120(R9), R15
+	MOVQ  144(R9), R9
 	MOVQ  start+72(FP), BP
 
 	// Add start offset to output
-	ADDQ BP, R9
 	ADDQ BP, R10
 	ADDQ BP, R11
 	ADDQ BP, R12
 	ADDQ BP, R13
 	ADDQ BP, R14
 	ADDQ BP, R15
-	ADDQ BP, R8
+	ADDQ BP, R9
 
 	// Add start offset to input
-	ADDQ         BP, DX
 	ADDQ         BP, BX
 	ADDQ         BP, SI
 	ADDQ         BP, DI
-	ADDQ         BP, AX
+	ADDQ         BP, R8
+	ADDQ         BP, DX
 	MOVQ         $0x0000000f, BP
-	MOVQ         BP, X8
-	VPBROADCASTB X8, Y8
-	MOVQ         n+80(FP), BP
-	SHRQ         $0x05, BP
-
-mulAvxTwo_5x8_loop:
-	// Clear 8 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
+	MOVQ         BP, X7
+	VPBROADCASTB X7, Y7
 
-	// Load and process 32 bytes from input 0 to 8 outputs
-	VMOVDQU (DX), Y11
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU (CX), Y9
-	VMOVDQU 32(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 64(CX), Y9
-	VMOVDQU 96(CX), Y10
+mulAvxTwo_5x7Xor_loop:
+	// Load and process 32 bytes from input 0 to 7 outputs
+	VMOVDQU (BX), Y10
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU (R10), Y0
+	VMOVDQU (CX), Y8
+	VMOVDQU 32(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 128(CX), Y9
-	VMOVDQU 160(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU (R11), Y1
+	VMOVDQU 64(CX), Y8
+	VMOVDQU 96(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 192(CX), Y9
-	VMOVDQU 224(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU (R12), Y2
+	VMOVDQU 128(CX), Y8
+	VMOVDQU 160(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 256(CX), Y9
-	VMOVDQU 288(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU (R13), Y3
+	VMOVDQU 192(CX), Y8
+	VMOVDQU 224(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 320(CX), Y9
-	VMOVDQU 352(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU (R14), Y4
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 384(CX), Y9
-	VMOVDQU 416(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU (R15), Y5
+	VMOVDQU 320(CX), Y8
+	VMOVDQU 352(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y6, Y6
-	VMOVDQU 448(CX), Y9
-	VMOVDQU 480(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU (R9), Y6
+	VMOVDQU 384(CX), Y8
+	VMOVDQU 416(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y7, Y7
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
 
-	// Load and process 32 bytes from input 1 to 8 outputs
-	VMOVDQU (BX), Y11
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y8, Y11, Y11
-	VPAND   Y8, Y12, Y12
-	VMOVDQU 512(CX), Y9
-	VMOVDQU 544(CX), Y10
-	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y0, Y0
-	VMOVDQU 576(CX), Y9
-	VMOVDQU 608(CX), Y10
+	// Load and process 32 bytes from input 1 to 7 outputs
+	VMOVDQU (SI), Y10
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 448(CX), Y8
+	VMOVDQU 480(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y1, Y1
-	VMOVDQU 640(CX), Y9
-	VMOVDQU 672(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 512(CX), Y8
+	VMOVDQU 544(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y2, Y2
-	VMOVDQU 704(CX), Y9
-	VMOVDQU 736(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 576(CX), Y8
+	VMOVDQU 608(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y3, Y3
-	VMOVDQU 768(CX), Y9
-	VMOVDQU 800(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 640(CX), Y8
+	VMOVDQU 672(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y4, Y4
-	VMOVDQU 832(CX), Y9
-	VMOVDQU 864(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 704(CX), Y8
+	VMOVDQU 736(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
-	VPXOR   Y9, Y10, Y9
-	VPXOR   Y9, Y5, Y5
-	VMOVDQU 896(CX), Y9
-	VMOVDQU 928(CX), Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 768(CX), Y8
+	VMOVDQU 800(CX), Y9
+	VPSHUFB Y10, Y8, Y8
 	VPSHUFB Y11, Y9, Y9
-	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 832(CX), Y8
+	VMOVDQU 864(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 2 to 7 outputs
+	VMOVDQU (DI), Y10
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 896(CX), Y8
+	VMOVDQU 928(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 960(CX), Y8
+	VMOVDQU 992(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 1024(CX), Y8
+	VMOVDQU 1056(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 1088(CX), Y8
+	VMOVDQU 1120(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 1152(CX), Y8
+	VMOVDQU 1184(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 1216(CX), Y8
+	VMOVDQU 1248(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 1280(CX), Y8
+	VMOVDQU 1312(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 3 to 7 outputs
+	VMOVDQU (R8), Y10
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1344(CX), Y8
+	VMOVDQU 1376(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 1408(CX), Y8
+	VMOVDQU 1440(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 1472(CX), Y8
+	VMOVDQU 1504(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 1536(CX), Y8
+	VMOVDQU 1568(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 1600(CX), Y8
+	VMOVDQU 1632(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 1664(CX), Y8
+	VMOVDQU 1696(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 1728(CX), Y8
+	VMOVDQU 1760(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Load and process 32 bytes from input 4 to 7 outputs
+	VMOVDQU (DX), Y10
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y10, Y11
+	VPAND   Y7, Y10, Y10
+	VPAND   Y7, Y11, Y11
+	VMOVDQU 1792(CX), Y8
+	VMOVDQU 1824(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y0, Y0
+	VMOVDQU 1856(CX), Y8
+	VMOVDQU 1888(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y1, Y1
+	VMOVDQU 1920(CX), Y8
+	VMOVDQU 1952(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y2, Y2
+	VMOVDQU 1984(CX), Y8
+	VMOVDQU 2016(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y3, Y3
+	VMOVDQU 2048(CX), Y8
+	VMOVDQU 2080(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y4, Y4
+	VMOVDQU 2112(CX), Y8
+	VMOVDQU 2144(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y5, Y5
+	VMOVDQU 2176(CX), Y8
+	VMOVDQU 2208(CX), Y9
+	VPSHUFB Y10, Y8, Y8
+	VPSHUFB Y11, Y9, Y9
+	VPXOR   Y8, Y9, Y8
+	VPXOR   Y8, Y6, Y6
+
+	// Store 7 outputs
+	VMOVDQU Y0, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y1, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y2, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y3, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y4, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y5, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y6, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_5x7Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x7Xor_end:
+	RET
+
+// func mulAvxTwo_5x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x8(SB), NOSPLIT, $8-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 93 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x8_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), AX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R15
+	MOVQ  168(R8), R8
+	MOVQ  start+72(FP), BP
+
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
+
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X8
+	VPBROADCASTB X8, Y8
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
+
+mulAvxTwo_5x8_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y0
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y1
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y2
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y3
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y4
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y5
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y6
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y7
+
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
 	VPXOR   Y9, Y10, Y9
 	VPXOR   Y9, Y6, Y6
 	VMOVDQU 960(CX), Y9
@@ -10782,129 +20443,469 @@ mulAvxTwo_5x8_loop:
 mulAvxTwo_5x8_end:
 	RET
 
-// func mulAvxTwo_5x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_5x8Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x9(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_5x8Xor(SB), NOSPLIT, $8-88
 	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 104 YMM used
+	// Destination kept in GP registers
+	// Full registers estimated 93 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x9_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), DX
-	MOVQ  out_base+48(FP), R9
-	MOVQ  start+72(FP), R10
+	JZ    mulAvxTwo_5x8Xor_end
+	MOVQ  in_base+24(FP), AX
+	MOVQ  (AX), DX
+	MOVQ  24(AX), BX
+	MOVQ  48(AX), SI
+	MOVQ  72(AX), DI
+	MOVQ  96(AX), AX
+	MOVQ  out_base+48(FP), R8
+	MOVQ  (R8), R9
+	MOVQ  24(R8), R10
+	MOVQ  48(R8), R11
+	MOVQ  72(R8), R12
+	MOVQ  96(R8), R13
+	MOVQ  120(R8), R14
+	MOVQ  144(R8), R15
+	MOVQ  168(R8), R8
+	MOVQ  start+72(FP), BP
 
-	// Add start offset to input
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DI
-	ADDQ         R10, R8
-	ADDQ         R10, DX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X9
-	VPBROADCASTB X9, Y9
+	// Add start offset to output
+	ADDQ BP, R9
+	ADDQ BP, R10
+	ADDQ BP, R11
+	ADDQ BP, R12
+	ADDQ BP, R13
+	ADDQ BP, R14
+	ADDQ BP, R15
+	ADDQ BP, R8
 
-mulAvxTwo_5x9_loop:
-	// Clear 9 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
+	// Add start offset to input
+	ADDQ         BP, DX
+	ADDQ         BP, BX
+	ADDQ         BP, SI
+	ADDQ         BP, DI
+	ADDQ         BP, AX
+	MOVQ         $0x0000000f, BP
+	MOVQ         BP, X8
+	VPBROADCASTB X8, Y8
+	MOVQ         n+80(FP), BP
+	SHRQ         $0x05, BP
 
-	// Load and process 32 bytes from input 0 to 9 outputs
-	VMOVDQU (BX), Y12
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU (CX), Y10
-	VMOVDQU 32(CX), Y11
-	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 64(CX), Y10
-	VMOVDQU 96(CX), Y11
+mulAvxTwo_5x8Xor_loop:
+	// Load and process 32 bytes from input 0 to 8 outputs
+	VMOVDQU (DX), Y11
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU (R9), Y0
+	VMOVDQU (CX), Y9
+	VMOVDQU 32(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 128(CX), Y10
-	VMOVDQU 160(CX), Y11
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU (R10), Y1
+	VMOVDQU 64(CX), Y9
+	VMOVDQU 96(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y2, Y2
-	VMOVDQU 192(CX), Y10
-	VMOVDQU 224(CX), Y11
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU (R11), Y2
+	VMOVDQU 128(CX), Y9
+	VMOVDQU 160(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y3, Y3
-	VMOVDQU 256(CX), Y10
-	VMOVDQU 288(CX), Y11
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU (R12), Y3
+	VMOVDQU 192(CX), Y9
+	VMOVDQU 224(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y4, Y4
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU (R13), Y4
+	VMOVDQU 256(CX), Y9
+	VMOVDQU 288(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y5, Y5
-	VMOVDQU 384(CX), Y10
-	VMOVDQU 416(CX), Y11
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU (R14), Y5
+	VMOVDQU 320(CX), Y9
+	VMOVDQU 352(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y6, Y6
-	VMOVDQU 448(CX), Y10
-	VMOVDQU 480(CX), Y11
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU (R15), Y6
+	VMOVDQU 384(CX), Y9
+	VMOVDQU 416(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y7, Y7
-	VMOVDQU 512(CX), Y10
-	VMOVDQU 544(CX), Y11
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU (R8), Y7
+	VMOVDQU 448(CX), Y9
+	VMOVDQU 480(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y8, Y8
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
 
-	// Load and process 32 bytes from input 1 to 9 outputs
-	VMOVDQU (SI), Y12
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y12, Y13
-	VPAND   Y9, Y12, Y12
-	VPAND   Y9, Y13, Y13
-	VMOVDQU 576(CX), Y10
-	VMOVDQU 608(CX), Y11
+	// Load and process 32 bytes from input 1 to 8 outputs
+	VMOVDQU (BX), Y11
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 512(CX), Y9
+	VMOVDQU 544(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y0, Y0
-	VMOVDQU 640(CX), Y10
-	VMOVDQU 672(CX), Y11
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 576(CX), Y9
+	VMOVDQU 608(CX), Y10
+	VPSHUFB Y11, Y9, Y9
 	VPSHUFB Y12, Y10, Y10
-	VPSHUFB Y13, Y11, Y11
-	VPXOR   Y10, Y11, Y10
-	VPXOR   Y10, Y1, Y1
-	VMOVDQU 704(CX), Y10
-	VMOVDQU 736(CX), Y11
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 640(CX), Y9
+	VMOVDQU 672(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 704(CX), Y9
+	VMOVDQU 736(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 768(CX), Y9
+	VMOVDQU 800(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 832(CX), Y9
+	VMOVDQU 864(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 896(CX), Y9
+	VMOVDQU 928(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 960(CX), Y9
+	VMOVDQU 992(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Load and process 32 bytes from input 2 to 8 outputs
+	VMOVDQU (SI), Y11
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1024(CX), Y9
+	VMOVDQU 1056(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 1088(CX), Y9
+	VMOVDQU 1120(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 1152(CX), Y9
+	VMOVDQU 1184(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 1216(CX), Y9
+	VMOVDQU 1248(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 1280(CX), Y9
+	VMOVDQU 1312(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 1344(CX), Y9
+	VMOVDQU 1376(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 1408(CX), Y9
+	VMOVDQU 1440(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 1472(CX), Y9
+	VMOVDQU 1504(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Load and process 32 bytes from input 3 to 8 outputs
+	VMOVDQU (DI), Y11
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 1536(CX), Y9
+	VMOVDQU 1568(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 1600(CX), Y9
+	VMOVDQU 1632(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 1664(CX), Y9
+	VMOVDQU 1696(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 1728(CX), Y9
+	VMOVDQU 1760(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 1792(CX), Y9
+	VMOVDQU 1824(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 1856(CX), Y9
+	VMOVDQU 1888(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 1920(CX), Y9
+	VMOVDQU 1952(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 1984(CX), Y9
+	VMOVDQU 2016(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Load and process 32 bytes from input 4 to 8 outputs
+	VMOVDQU (AX), Y11
+	ADDQ    $0x20, AX
+	VPSRLQ  $0x04, Y11, Y12
+	VPAND   Y8, Y11, Y11
+	VPAND   Y8, Y12, Y12
+	VMOVDQU 2048(CX), Y9
+	VMOVDQU 2080(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y0, Y0
+	VMOVDQU 2112(CX), Y9
+	VMOVDQU 2144(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y1, Y1
+	VMOVDQU 2176(CX), Y9
+	VMOVDQU 2208(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y2, Y2
+	VMOVDQU 2240(CX), Y9
+	VMOVDQU 2272(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y3, Y3
+	VMOVDQU 2304(CX), Y9
+	VMOVDQU 2336(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y4, Y4
+	VMOVDQU 2368(CX), Y9
+	VMOVDQU 2400(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y5, Y5
+	VMOVDQU 2432(CX), Y9
+	VMOVDQU 2464(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y6, Y6
+	VMOVDQU 2496(CX), Y9
+	VMOVDQU 2528(CX), Y10
+	VPSHUFB Y11, Y9, Y9
+	VPSHUFB Y12, Y10, Y10
+	VPXOR   Y9, Y10, Y9
+	VPXOR   Y9, Y7, Y7
+
+	// Store 8 outputs
+	VMOVDQU Y0, (R9)
+	ADDQ    $0x20, R9
+	VMOVDQU Y1, (R10)
+	ADDQ    $0x20, R10
+	VMOVDQU Y2, (R11)
+	ADDQ    $0x20, R11
+	VMOVDQU Y3, (R12)
+	ADDQ    $0x20, R12
+	VMOVDQU Y4, (R13)
+	ADDQ    $0x20, R13
+	VMOVDQU Y5, (R14)
+	ADDQ    $0x20, R14
+	VMOVDQU Y6, (R15)
+	ADDQ    $0x20, R15
+	VMOVDQU Y7, (R8)
+	ADDQ    $0x20, R8
+
+	// Prepare for next loop
+	DECQ BP
+	JNZ  mulAvxTwo_5x8Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x8Xor_end:
+	RET
+
+// func mulAvxTwo_5x9(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x9(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 104 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x9_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X9
+	VPBROADCASTB X9, Y9
+
+mulAvxTwo_5x9_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y0
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y1
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y2
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y3
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y4
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y5
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y6
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y7
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y8
+
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
 	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
 	VPXOR   Y10, Y11, Y10
@@ -11158,17 +21159,17 @@ mulAvxTwo_5x9_loop:
 mulAvxTwo_5x9_end:
 	RET
 
-// func mulAvxTwo_5x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_5x9Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_5x10(SB), NOSPLIT, $0-88
+TEXT ·mulAvxTwo_5x9Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 115 YMM used
+	// Full registers estimated 104 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
 	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_5x10_end
+	JZ    mulAvxTwo_5x9Xor_end
 	MOVQ  in_base+24(FP), DX
 	MOVQ  (DX), BX
 	MOVQ  24(DX), SI
@@ -11185,132 +21186,493 @@ TEXT ·mulAvxTwo_5x10(SB), NOSPLIT, $0-88
 	ADDQ         R10, R8
 	ADDQ         R10, DX
 	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X10
-	VPBROADCASTB X10, Y10
-
-mulAvxTwo_5x10_loop:
-	// Clear 10 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
-	VPXOR Y4, Y4, Y4
-	VPXOR Y5, Y5, Y5
-	VPXOR Y6, Y6, Y6
-	VPXOR Y7, Y7, Y7
-	VPXOR Y8, Y8, Y8
-	VPXOR Y9, Y9, Y9
+	MOVQ         R11, X9
+	VPBROADCASTB X9, Y9
 
-	// Load and process 32 bytes from input 0 to 10 outputs
-	VMOVDQU (BX), Y13
+mulAvxTwo_5x9Xor_loop:
+	// Load and process 32 bytes from input 0 to 9 outputs
+	VMOVDQU (BX), Y12
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU (CX), Y11
-	VMOVDQU 32(CX), Y12
-	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 64(CX), Y11
-	VMOVDQU 96(CX), Y12
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	MOVQ    (R9), R11
+	VMOVDQU (R11)(R10*1), Y0
+	VMOVDQU (CX), Y10
+	VMOVDQU 32(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 128(CX), Y11
-	VMOVDQU 160(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	MOVQ    24(R9), R11
+	VMOVDQU (R11)(R10*1), Y1
+	VMOVDQU 64(CX), Y10
+	VMOVDQU 96(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 192(CX), Y11
-	VMOVDQU 224(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	MOVQ    48(R9), R11
+	VMOVDQU (R11)(R10*1), Y2
+	VMOVDQU 128(CX), Y10
+	VMOVDQU 160(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 256(CX), Y11
-	VMOVDQU 288(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	MOVQ    72(R9), R11
+	VMOVDQU (R11)(R10*1), Y3
+	VMOVDQU 192(CX), Y10
+	VMOVDQU 224(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 320(CX), Y11
-	VMOVDQU 352(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	MOVQ    96(R9), R11
+	VMOVDQU (R11)(R10*1), Y4
+	VMOVDQU 256(CX), Y10
+	VMOVDQU 288(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 384(CX), Y11
-	VMOVDQU 416(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	MOVQ    120(R9), R11
+	VMOVDQU (R11)(R10*1), Y5
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y6, Y6
-	VMOVDQU 448(CX), Y11
-	VMOVDQU 480(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	MOVQ    144(R9), R11
+	VMOVDQU (R11)(R10*1), Y6
+	VMOVDQU 384(CX), Y10
+	VMOVDQU 416(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y7, Y7
-	VMOVDQU 512(CX), Y11
-	VMOVDQU 544(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	MOVQ    168(R9), R11
+	VMOVDQU (R11)(R10*1), Y7
+	VMOVDQU 448(CX), Y10
+	VMOVDQU 480(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y8, Y8
-	VMOVDQU 576(CX), Y11
-	VMOVDQU 608(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	MOVQ    192(R9), R11
+	VMOVDQU (R11)(R10*1), Y8
+	VMOVDQU 512(CX), Y10
+	VMOVDQU 544(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y9, Y9
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
 
-	// Load and process 32 bytes from input 1 to 10 outputs
-	VMOVDQU (SI), Y13
+	// Load and process 32 bytes from input 1 to 9 outputs
+	VMOVDQU (SI), Y12
 	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y13, Y14
-	VPAND   Y10, Y13, Y13
-	VPAND   Y10, Y14, Y14
-	VMOVDQU 640(CX), Y11
-	VMOVDQU 672(CX), Y12
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 576(CX), Y10
+	VMOVDQU 608(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y0, Y0
-	VMOVDQU 704(CX), Y11
-	VMOVDQU 736(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 640(CX), Y10
+	VMOVDQU 672(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y1, Y1
-	VMOVDQU 768(CX), Y11
-	VMOVDQU 800(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 704(CX), Y10
+	VMOVDQU 736(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y2, Y2
-	VMOVDQU 832(CX), Y11
-	VMOVDQU 864(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 768(CX), Y10
+	VMOVDQU 800(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y3, Y3
-	VMOVDQU 896(CX), Y11
-	VMOVDQU 928(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 832(CX), Y10
+	VMOVDQU 864(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y4, Y4
-	VMOVDQU 960(CX), Y11
-	VMOVDQU 992(CX), Y12
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 896(CX), Y10
+	VMOVDQU 928(CX), Y11
+	VPSHUFB Y12, Y10, Y10
 	VPSHUFB Y13, Y11, Y11
-	VPSHUFB Y14, Y12, Y12
-	VPXOR   Y11, Y12, Y11
-	VPXOR   Y11, Y5, Y5
-	VMOVDQU 1024(CX), Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 960(CX), Y10
+	VMOVDQU 992(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 1024(CX), Y10
+	VMOVDQU 1056(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 1088(CX), Y10
+	VMOVDQU 1120(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 2 to 9 outputs
+	VMOVDQU (DI), Y12
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1152(CX), Y10
+	VMOVDQU 1184(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 1216(CX), Y10
+	VMOVDQU 1248(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 1280(CX), Y10
+	VMOVDQU 1312(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 1344(CX), Y10
+	VMOVDQU 1376(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 1408(CX), Y10
+	VMOVDQU 1440(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 1472(CX), Y10
+	VMOVDQU 1504(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 1536(CX), Y10
+	VMOVDQU 1568(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 1600(CX), Y10
+	VMOVDQU 1632(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 1664(CX), Y10
+	VMOVDQU 1696(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 3 to 9 outputs
+	VMOVDQU (R8), Y12
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 1728(CX), Y10
+	VMOVDQU 1760(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 1792(CX), Y10
+	VMOVDQU 1824(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 1856(CX), Y10
+	VMOVDQU 1888(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 1920(CX), Y10
+	VMOVDQU 1952(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 1984(CX), Y10
+	VMOVDQU 2016(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 2048(CX), Y10
+	VMOVDQU 2080(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 2112(CX), Y10
+	VMOVDQU 2144(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 2176(CX), Y10
+	VMOVDQU 2208(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 2240(CX), Y10
+	VMOVDQU 2272(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Load and process 32 bytes from input 4 to 9 outputs
+	VMOVDQU (DX), Y12
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y12, Y13
+	VPAND   Y9, Y12, Y12
+	VPAND   Y9, Y13, Y13
+	VMOVDQU 2304(CX), Y10
+	VMOVDQU 2336(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y0, Y0
+	VMOVDQU 2368(CX), Y10
+	VMOVDQU 2400(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y1, Y1
+	VMOVDQU 2432(CX), Y10
+	VMOVDQU 2464(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y2, Y2
+	VMOVDQU 2496(CX), Y10
+	VMOVDQU 2528(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y3, Y3
+	VMOVDQU 2560(CX), Y10
+	VMOVDQU 2592(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y4, Y4
+	VMOVDQU 2624(CX), Y10
+	VMOVDQU 2656(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y5, Y5
+	VMOVDQU 2688(CX), Y10
+	VMOVDQU 2720(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y6, Y6
+	VMOVDQU 2752(CX), Y10
+	VMOVDQU 2784(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y7, Y7
+	VMOVDQU 2816(CX), Y10
+	VMOVDQU 2848(CX), Y11
+	VPSHUFB Y12, Y10, Y10
+	VPSHUFB Y13, Y11, Y11
+	VPXOR   Y10, Y11, Y10
+	VPXOR   Y10, Y8, Y8
+
+	// Store 9 outputs
+	MOVQ    (R9), R11
+	VMOVDQU Y0, (R11)(R10*1)
+	MOVQ    24(R9), R11
+	VMOVDQU Y1, (R11)(R10*1)
+	MOVQ    48(R9), R11
+	VMOVDQU Y2, (R11)(R10*1)
+	MOVQ    72(R9), R11
+	VMOVDQU Y3, (R11)(R10*1)
+	MOVQ    96(R9), R11
+	VMOVDQU Y4, (R11)(R10*1)
+	MOVQ    120(R9), R11
+	VMOVDQU Y5, (R11)(R10*1)
+	MOVQ    144(R9), R11
+	VMOVDQU Y6, (R11)(R10*1)
+	MOVQ    168(R9), R11
+	VMOVDQU Y7, (R11)(R10*1)
+	MOVQ    192(R9), R11
+	VMOVDQU Y8, (R11)(R10*1)
+
+	// Prepare for next loop
+	ADDQ $0x20, R10
+	DECQ AX
+	JNZ  mulAvxTwo_5x9Xor_loop
+	VZEROUPPER
+
+mulAvxTwo_5x9Xor_end:
+	RET
+
+// func mulAvxTwo_5x10(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_5x10(SB), NOSPLIT, $0-88
+	// Loading no tables to registers
+	// Destination kept on stack
+	// Full registers estimated 115 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x05, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_5x10_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
+	MOVQ  out_base+48(FP), R9
+	MOVQ  start+72(FP), R10
+
+	// Add start offset to input
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X10
+	VPBROADCASTB X10, Y10
+
+mulAvxTwo_5x10_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y0
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y1
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y2
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y3
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y4
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y5
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y6
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y7
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y8
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y9
+
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 1024(CX), Y11
 	VMOVDQU 1056(CX), Y12
 	VPSHUFB Y13, Y11, Y11
 	VPSHUFB Y14, Y12, Y12
@@ -11567,508 +21929,760 @@ mulAvxTwo_5x10_loop:
 mulAvxTwo_5x10_end:
 	RET
 
-// func mulAvxTwo_6x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
-// Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_6x1(SB), NOSPLIT, $0-88
-	// Loading all tables to registers
-	// Destination kept in GP registers
-	// Full registers estimated 16 YMM used
-	MOVQ    n+80(FP), AX
-	MOVQ    matrix_base+0(FP), CX
-	SHRQ    $0x05, AX
-	TESTQ   AX, AX
-	JZ      mulAvxTwo_6x1_end
-	VMOVDQU (CX), Y0
-	VMOVDQU 32(CX), Y1
-	VMOVDQU 64(CX), Y2
-	VMOVDQU 96(CX), Y3
-	VMOVDQU 128(CX), Y4
-	VMOVDQU 160(CX), Y5
-	VMOVDQU 192(CX), Y6
-	VMOVDQU 224(CX), Y7
-	VMOVDQU 256(CX), Y8
-	VMOVDQU 288(CX), Y9
-	VMOVDQU 320(CX), Y10
-	VMOVDQU 352(CX), Y11
-	MOVQ    in_base+24(FP), CX
-	MOVQ    (CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    48(CX), SI
-	MOVQ    72(CX), DI
-	MOVQ    96(CX), R8
-	MOVQ    120(CX), CX
-	MOVQ    out_base+48(FP), R9
-	MOVQ    (R9), R9
-	MOVQ    start+72(FP), R10
-
-	// Add start offset to output
-	ADDQ R10, R9
-
-	// Add start offset to input
-	ADDQ         R10, DX
-	ADDQ         R10, BX
-	ADDQ         R10, SI
-	ADDQ         R10, DI
-	ADDQ         R10, R8
-	ADDQ         R10, CX
-	MOVQ         $0x0000000f, R10
-	MOVQ         R10, X13
-	VPBROADCASTB X13, Y13
-
-mulAvxTwo_6x1_loop:
-	// Clear 1 outputs
-	VPXOR Y12, Y12, Y12
-
-	// Load and process 32 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y14
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y0, Y14
-	VPSHUFB Y15, Y1, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
-
-	// Load and process 32 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y14
-	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y2, Y14
-	VPSHUFB Y15, Y3, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
-
-	// Load and process 32 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y14
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y4, Y14
-	VPSHUFB Y15, Y5, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
-
-	// Load and process 32 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y14
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y6, Y14
-	VPSHUFB Y15, Y7, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
-
-	// Load and process 32 bytes from input 4 to 1 outputs
-	VMOVDQU (R8), Y14
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y8, Y14
-	VPSHUFB Y15, Y9, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
-
-	// Load and process 32 bytes from input 5 to 1 outputs
-	VMOVDQU (CX), Y14
-	ADDQ    $0x20, CX
-	VPSRLQ  $0x04, Y14, Y15
-	VPAND   Y13, Y14, Y14
-	VPAND   Y13, Y15, Y15
-	VPSHUFB Y14, Y10, Y14
-	VPSHUFB Y15, Y11, Y15
-	VPXOR   Y14, Y15, Y14
-	VPXOR   Y14, Y12, Y12
-
-	// Store 1 outputs
-	VMOVDQU Y12, (R9)
-	ADDQ    $0x20, R9
-
-	// Prepare for next loop
-	DECQ AX
-	JNZ  mulAvxTwo_6x1_loop
-	VZEROUPPER
-
-mulAvxTwo_6x1_end:
-	RET
-
-// func mulAvxTwo_6x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_5x10Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_6x1_64(SB), $0-88
+TEXT ·mulAvxTwo_5x10Xor(SB), NOSPLIT, $0-88
 	// Loading no tables to registers
 	// Destination kept on stack
-	// Full registers estimated 16 YMM used
+	// Full registers estimated 115 YMM used
 	MOVQ  n+80(FP), AX
 	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
+	SHRQ  $0x05, AX
 	TESTQ AX, AX
-	JZ    mulAvxTwo_6x1_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), AX
-	MOVQ  out_base+48(FP), R9
+	JZ    mulAvxTwo_5x10Xor_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), DX
 	MOVQ  out_base+48(FP), R9
 	MOVQ  start+72(FP), R10
 
 	// Add start offset to input
-	ADDQ         R10, DX
 	ADDQ         R10, BX
 	ADDQ         R10, SI
 	ADDQ         R10, DI
 	ADDQ         R10, R8
-	ADDQ         R10, AX
+	ADDQ         R10, DX
 	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X2
-	VPBROADCASTB X2, Y2
-	MOVQ         n+80(FP), R11
-	SHRQ         $0x06, R11
-
-mulAvxTwo_6x1_64_loop:
-	// Clear 1 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+	MOVQ         R11, X10
+	VPBROADCASTB X10, Y10
 
-	// Load and process 64 bytes from input 0 to 1 outputs
-	VMOVDQU (DX), Y6
-	VMOVDQU 32(DX), Y5
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU (CX), Y3
-	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
-
-	// Load and process 64 bytes from input 1 to 1 outputs
-	VMOVDQU (BX), Y6
-	VMOVDQU 32(BX), Y5
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 64(CX), Y3
-	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+mulAvxTwo_5x10Xor_loop:
+	// Load and process 32 bytes from input 0 to 10 outputs
+	VMOVDQU (BX), Y13
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	MOVQ    (R9), R11
+	VMOVDQU (R11)(R10*1), Y0
+	VMOVDQU (CX), Y11
+	VMOVDQU 32(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	MOVQ    24(R9), R11
+	VMOVDQU (R11)(R10*1), Y1
+	VMOVDQU 64(CX), Y11
+	VMOVDQU 96(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	MOVQ    48(R9), R11
+	VMOVDQU (R11)(R10*1), Y2
+	VMOVDQU 128(CX), Y11
+	VMOVDQU 160(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	MOVQ    72(R9), R11
+	VMOVDQU (R11)(R10*1), Y3
+	VMOVDQU 192(CX), Y11
+	VMOVDQU 224(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	MOVQ    96(R9), R11
+	VMOVDQU (R11)(R10*1), Y4
+	VMOVDQU 256(CX), Y11
+	VMOVDQU 288(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	MOVQ    120(R9), R11
+	VMOVDQU (R11)(R10*1), Y5
+	VMOVDQU 320(CX), Y11
+	VMOVDQU 352(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	MOVQ    144(R9), R11
+	VMOVDQU (R11)(R10*1), Y6
+	VMOVDQU 384(CX), Y11
+	VMOVDQU 416(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	MOVQ    168(R9), R11
+	VMOVDQU (R11)(R10*1), Y7
+	VMOVDQU 448(CX), Y11
+	VMOVDQU 480(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	MOVQ    192(R9), R11
+	VMOVDQU (R11)(R10*1), Y8
+	VMOVDQU 512(CX), Y11
+	VMOVDQU 544(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	MOVQ    216(R9), R11
+	VMOVDQU (R11)(R10*1), Y9
+	VMOVDQU 576(CX), Y11
+	VMOVDQU 608(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
 
-	// Load and process 64 bytes from input 2 to 1 outputs
-	VMOVDQU (SI), Y6
-	VMOVDQU 32(SI), Y5
-	ADDQ    $0x40, SI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 128(CX), Y3
-	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 32 bytes from input 1 to 10 outputs
+	VMOVDQU (SI), Y13
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 640(CX), Y11
+	VMOVDQU 672(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 704(CX), Y11
+	VMOVDQU 736(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 768(CX), Y11
+	VMOVDQU 800(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 832(CX), Y11
+	VMOVDQU 864(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 896(CX), Y11
+	VMOVDQU 928(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 960(CX), Y11
+	VMOVDQU 992(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 1024(CX), Y11
+	VMOVDQU 1056(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 1088(CX), Y11
+	VMOVDQU 1120(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 1152(CX), Y11
+	VMOVDQU 1184(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 1216(CX), Y11
+	VMOVDQU 1248(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
 
-	// Load and process 64 bytes from input 3 to 1 outputs
-	VMOVDQU (DI), Y6
-	VMOVDQU 32(DI), Y5
-	ADDQ    $0x40, DI
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 192(CX), Y3
-	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 32 bytes from input 2 to 10 outputs
+	VMOVDQU (DI), Y13
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1280(CX), Y11
+	VMOVDQU 1312(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 1344(CX), Y11
+	VMOVDQU 1376(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 1408(CX), Y11
+	VMOVDQU 1440(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 1472(CX), Y11
+	VMOVDQU 1504(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 1536(CX), Y11
+	VMOVDQU 1568(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 1600(CX), Y11
+	VMOVDQU 1632(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 1664(CX), Y11
+	VMOVDQU 1696(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 1728(CX), Y11
+	VMOVDQU 1760(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 1792(CX), Y11
+	VMOVDQU 1824(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 1856(CX), Y11
+	VMOVDQU 1888(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
 
-	// Load and process 64 bytes from input 4 to 1 outputs
-	VMOVDQU (R8), Y6
-	VMOVDQU 32(R8), Y5
-	ADDQ    $0x40, R8
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 256(CX), Y3
-	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 32 bytes from input 3 to 10 outputs
+	VMOVDQU (R8), Y13
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 1920(CX), Y11
+	VMOVDQU 1952(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 1984(CX), Y11
+	VMOVDQU 2016(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 2048(CX), Y11
+	VMOVDQU 2080(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 2112(CX), Y11
+	VMOVDQU 2144(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 2176(CX), Y11
+	VMOVDQU 2208(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 2240(CX), Y11
+	VMOVDQU 2272(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 2304(CX), Y11
+	VMOVDQU 2336(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 2368(CX), Y11
+	VMOVDQU 2400(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 2432(CX), Y11
+	VMOVDQU 2464(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 2496(CX), Y11
+	VMOVDQU 2528(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
 
-	// Load and process 64 bytes from input 5 to 1 outputs
-	VMOVDQU (AX), Y6
-	VMOVDQU 32(AX), Y5
-	ADDQ    $0x40, AX
-	VPSRLQ  $0x04, Y6, Y7
-	VPSRLQ  $0x04, Y5, Y8
-	VPAND   Y2, Y6, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y7, Y7
-	VPAND   Y2, Y8, Y8
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y5
-	VPSHUFB Y6, Y3, Y3
-	VPSHUFB Y8, Y4, Y6
-	VPSHUFB Y7, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y3, Y0, Y0
-	VPXOR   Y5, Y1, Y1
+	// Load and process 32 bytes from input 4 to 10 outputs
+	VMOVDQU (DX), Y13
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y13, Y14
+	VPAND   Y10, Y13, Y13
+	VPAND   Y10, Y14, Y14
+	VMOVDQU 2560(CX), Y11
+	VMOVDQU 2592(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y0, Y0
+	VMOVDQU 2624(CX), Y11
+	VMOVDQU 2656(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y1, Y1
+	VMOVDQU 2688(CX), Y11
+	VMOVDQU 2720(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y2, Y2
+	VMOVDQU 2752(CX), Y11
+	VMOVDQU 2784(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y3, Y3
+	VMOVDQU 2816(CX), Y11
+	VMOVDQU 2848(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y4, Y4
+	VMOVDQU 2880(CX), Y11
+	VMOVDQU 2912(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y5, Y5
+	VMOVDQU 2944(CX), Y11
+	VMOVDQU 2976(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y6, Y6
+	VMOVDQU 3008(CX), Y11
+	VMOVDQU 3040(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y7, Y7
+	VMOVDQU 3072(CX), Y11
+	VMOVDQU 3104(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y8, Y8
+	VMOVDQU 3136(CX), Y11
+	VMOVDQU 3168(CX), Y12
+	VPSHUFB Y13, Y11, Y11
+	VPSHUFB Y14, Y12, Y12
+	VPXOR   Y11, Y12, Y11
+	VPXOR   Y11, Y9, Y9
 
-	// Store 1 outputs
-	MOVQ    (R9), R12
-	VMOVDQU Y0, (R12)(R10*1)
-	VMOVDQU Y1, 32(R12)(R10*1)
+	// Store 10 outputs
+	MOVQ    (R9), R11
+	VMOVDQU Y0, (R11)(R10*1)
+	MOVQ    24(R9), R11
+	VMOVDQU Y1, (R11)(R10*1)
+	MOVQ    48(R9), R11
+	VMOVDQU Y2, (R11)(R10*1)
+	MOVQ    72(R9), R11
+	VMOVDQU Y3, (R11)(R10*1)
+	MOVQ    96(R9), R11
+	VMOVDQU Y4, (R11)(R10*1)
+	MOVQ    120(R9), R11
+	VMOVDQU Y5, (R11)(R10*1)
+	MOVQ    144(R9), R11
+	VMOVDQU Y6, (R11)(R10*1)
+	MOVQ    168(R9), R11
+	VMOVDQU Y7, (R11)(R10*1)
+	MOVQ    192(R9), R11
+	VMOVDQU Y8, (R11)(R10*1)
+	MOVQ    216(R9), R11
+	VMOVDQU Y9, (R11)(R10*1)
 
 	// Prepare for next loop
-	ADDQ $0x40, R10
-	DECQ R11
-	JNZ  mulAvxTwo_6x1_64_loop
+	ADDQ $0x20, R10
+	DECQ AX
+	JNZ  mulAvxTwo_5x10Xor_loop
 	VZEROUPPER
 
-mulAvxTwo_6x1_64_end:
+mulAvxTwo_5x10Xor_end:
 	RET
 
-// func mulAvxTwo_6x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_6x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_6x2(SB), NOSPLIT, $0-88
-	// Loading no tables to registers
+TEXT ·mulAvxTwo_6x1(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
 	// Destination kept in GP registers
-	// Full registers estimated 31 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x05, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_6x2_end
-	MOVQ  in_base+24(FP), DX
-	MOVQ  (DX), BX
-	MOVQ  24(DX), SI
-	MOVQ  48(DX), DI
-	MOVQ  72(DX), R8
-	MOVQ  96(DX), R9
-	MOVQ  120(DX), DX
-	MOVQ  out_base+48(FP), R10
-	MOVQ  (R10), R11
-	MOVQ  24(R10), R10
-	MOVQ  start+72(FP), R12
-
-	// Add start offset to output
-	ADDQ R12, R11
-	ADDQ R12, R10
-
-	// Add start offset to input
-	ADDQ         R12, BX
-	ADDQ         R12, SI
-	ADDQ         R12, DI
-	ADDQ         R12, R8
-	ADDQ         R12, R9
-	ADDQ         R12, DX
-	MOVQ         $0x0000000f, R12
-	MOVQ         R12, X2
-	VPBROADCASTB X2, Y2
+	// Full registers estimated 16 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_6x1_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), DI
+	MOVQ    96(CX), R8
+	MOVQ    120(CX), CX
+	MOVQ    out_base+48(FP), R9
+	MOVQ    (R9), R9
+	MOVQ    start+72(FP), R10
 
-mulAvxTwo_6x2_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
+	// Add start offset to output
+	ADDQ R10, R9
 
-	// Load and process 32 bytes from input 0 to 2 outputs
-	VMOVDQU (BX), Y5
+	// Add start offset to input
+	ADDQ         R10, DX
+	ADDQ         R10, BX
+	ADDQ         R10, SI
+	ADDQ         R10, DI
+	ADDQ         R10, R8
+	ADDQ         R10, CX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X13
+	VPBROADCASTB X13, Y13
+
+mulAvxTwo_6x1_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y14
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y0, Y14
+	VPSHUFB Y15, Y1, Y15
+	VPXOR   Y14, Y15, Y12
+
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y14
 	ADDQ    $0x20, BX
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y2, Y14
+	VPSHUFB Y15, Y3, Y15
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y14, Y12, Y12
+
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y14
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y4, Y14
+	VPSHUFB Y15, Y5, Y15
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y14, Y12, Y12
+
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (DI), Y14
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y6, Y14
+	VPSHUFB Y15, Y7, Y15
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y14, Y12, Y12
+
+	// Load and process 32 bytes from input 4 to 1 outputs
+	VMOVDQU (R8), Y14
+	ADDQ    $0x20, R8
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y8, Y14
+	VPSHUFB Y15, Y9, Y15
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y14, Y12, Y12
+
+	// Load and process 32 bytes from input 5 to 1 outputs
+	VMOVDQU (CX), Y14
+	ADDQ    $0x20, CX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y10, Y14
+	VPSHUFB Y15, Y11, Y15
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y14, Y12, Y12
+
+	// Store 1 outputs
+	VMOVDQU Y12, (R9)
+	ADDQ    $0x20, R9
+
+	// Prepare for next loop
+	DECQ AX
+	JNZ  mulAvxTwo_6x1_loop
+	VZEROUPPER
+
+mulAvxTwo_6x1_end:
+	RET
+
+// func mulAvxTwo_6x1_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// Requires: AVX, AVX2, SSE2
+TEXT ·mulAvxTwo_6x1_64(SB), $0-88
+	// Loading no tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 30 YMM used
+	MOVQ  n+80(FP), AX
+	MOVQ  matrix_base+0(FP), CX
+	SHRQ  $0x06, AX
+	TESTQ AX, AX
+	JZ    mulAvxTwo_6x1_64_end
+	MOVQ  in_base+24(FP), DX
+	MOVQ  (DX), BX
+	MOVQ  24(DX), SI
+	MOVQ  48(DX), DI
+	MOVQ  72(DX), R8
+	MOVQ  96(DX), R9
+	MOVQ  120(DX), DX
+	MOVQ  out_base+48(FP), R10
+	MOVQ  out_base+48(FP), R10
+	MOVQ  (R10), R10
+	MOVQ  start+72(FP), R11
+
+	// Add start offset to output
+	ADDQ R11, R10
+
+	// Add start offset to input
+	ADDQ         R11, BX
+	ADDQ         R11, SI
+	ADDQ         R11, DI
+	ADDQ         R11, R8
+	ADDQ         R11, R9
+	ADDQ         R11, DX
+	MOVQ         $0x0000000f, R11
+	MOVQ         R11, X2
+	VPBROADCASTB X2, Y2
+
+mulAvxTwo_6x1_64_loop:
+	// Load and process 64 bytes from input 0 to 1 outputs
+	VMOVDQU (BX), Y6
+	VMOVDQU 32(BX), Y5
+	ADDQ    $0x40, BX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU (CX), Y3
 	VMOVDQU 32(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
+	VPXOR   Y3, Y4, Y0
+	VPXOR   Y5, Y6, Y1
+
+	// Load and process 64 bytes from input 1 to 1 outputs
+	VMOVDQU (SI), Y6
+	VMOVDQU 32(SI), Y5
+	ADDQ    $0x40, SI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU 64(CX), Y3
 	VMOVDQU 96(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
 	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
 
-	// Load and process 32 bytes from input 1 to 2 outputs
-	VMOVDQU (SI), Y5
-	ADDQ    $0x20, SI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
+	// Load and process 64 bytes from input 2 to 1 outputs
+	VMOVDQU (DI), Y6
+	VMOVDQU 32(DI), Y5
+	ADDQ    $0x40, DI
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU 128(CX), Y3
 	VMOVDQU 160(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
 	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
 	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
+
+	// Load and process 64 bytes from input 3 to 1 outputs
+	VMOVDQU (R8), Y6
+	VMOVDQU 32(R8), Y5
+	ADDQ    $0x40, R8
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
+	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU 192(CX), Y3
 	VMOVDQU 224(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
 	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPXOR   Y5, Y6, Y5
+	VPXOR   Y3, Y0, Y0
+	VPXOR   Y5, Y1, Y1
 
-	// Load and process 32 bytes from input 2 to 2 outputs
-	VMOVDQU (DI), Y5
-	ADDQ    $0x20, DI
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
+	// Load and process 64 bytes from input 4 to 1 outputs
+	VMOVDQU (R9), Y6
+	VMOVDQU 32(R9), Y5
+	ADDQ    $0x40, R9
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
+	VPAND   Y2, Y5, Y5
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
 	VMOVDQU 256(CX), Y3
 	VMOVDQU 288(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 320(CX), Y3
-	VMOVDQU 352(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 3 to 2 outputs
-	VMOVDQU (R8), Y5
-	ADDQ    $0x20, R8
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 384(CX), Y3
-	VMOVDQU 416(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
 	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
 	VPXOR   Y3, Y0, Y0
-	VMOVDQU 448(CX), Y3
-	VMOVDQU 480(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPXOR   Y5, Y1, Y1
 
-	// Load and process 32 bytes from input 4 to 2 outputs
-	VMOVDQU (R9), Y5
-	ADDQ    $0x20, R9
-	VPSRLQ  $0x04, Y5, Y6
-	VPAND   Y2, Y5, Y5
+	// Load and process 64 bytes from input 5 to 1 outputs
+	VMOVDQU (DX), Y6
+	VMOVDQU 32(DX), Y5
+	ADDQ    $0x40, DX
+	VPSRLQ  $0x04, Y6, Y7
+	VPSRLQ  $0x04, Y5, Y8
 	VPAND   Y2, Y6, Y6
-	VMOVDQU 512(CX), Y3
-	VMOVDQU 544(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y0, Y0
-	VMOVDQU 576(CX), Y3
-	VMOVDQU 608(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
-
-	// Load and process 32 bytes from input 5 to 2 outputs
-	VMOVDQU (DX), Y5
-	ADDQ    $0x20, DX
-	VPSRLQ  $0x04, Y5, Y6
 	VPAND   Y2, Y5, Y5
-	VPAND   Y2, Y6, Y6
-	VMOVDQU 640(CX), Y3
-	VMOVDQU 672(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
+	VPAND   Y2, Y7, Y7
+	VPAND   Y2, Y8, Y8
+	VMOVDQU 320(CX), Y3
+	VMOVDQU 352(CX), Y4
+	VPSHUFB Y5, Y3, Y5
+	VPSHUFB Y6, Y3, Y3
+	VPSHUFB Y8, Y4, Y6
+	VPSHUFB Y7, Y4, Y4
 	VPXOR   Y3, Y4, Y3
+	VPXOR   Y5, Y6, Y5
 	VPXOR   Y3, Y0, Y0
-	VMOVDQU 704(CX), Y3
-	VMOVDQU 736(CX), Y4
-	VPSHUFB Y5, Y3, Y3
-	VPSHUFB Y6, Y4, Y4
-	VPXOR   Y3, Y4, Y3
-	VPXOR   Y3, Y1, Y1
+	VPXOR   Y5, Y1, Y1
 
-	// Store 2 outputs
-	VMOVDQU Y0, (R11)
-	ADDQ    $0x20, R11
-	VMOVDQU Y1, (R10)
-	ADDQ    $0x20, R10
+	// Store 1 outputs
+	VMOVDQU Y0, (R10)
+	VMOVDQU Y1, 32(R10)
+	ADDQ    $0x40, R10
 
 	// Prepare for next loop
 	DECQ AX
-	JNZ  mulAvxTwo_6x2_loop
+	JNZ  mulAvxTwo_6x1_64_loop
 	VZEROUPPER
 
-mulAvxTwo_6x2_end:
+mulAvxTwo_6x1_64_end:
 	RET
 
-// func mulAvxTwo_6x2_64(matrix []byte, in [][]byte, out [][]byte, start int, n int)
+// func mulAvxTwo_6x1Xor(matrix []byte, in [][]byte, out [][]byte, start int, n int)
 // Requires: AVX, AVX2, SSE2
-TEXT ·mulAvxTwo_6x2_64(SB), $0-88
-	// Loading no tables to registers
-	// Destination kept on stack
-	// Full registers estimated 31 YMM used
-	MOVQ  n+80(FP), AX
-	MOVQ  matrix_base+0(FP), CX
-	SHRQ  $0x06, AX
-	TESTQ AX, AX
-	JZ    mulAvxTwo_6x2_64_end
-	MOVQ  in_base+24(FP), AX
-	MOVQ  (AX), DX
-	MOVQ  24(AX), BX
-	MOVQ  48(AX), SI
-	MOVQ  72(AX), DI
-	MOVQ  96(AX), R8
-	MOVQ  120(AX), AX
-	MOVQ  out_base+48(FP), R9
-	MOVQ  out_base+48(FP), R9
-	MOVQ  start+72(FP), R10
+TEXT ·mulAvxTwo_6x1Xor(SB), NOSPLIT, $0-88
+	// Loading all tables to registers
+	// Destination kept in GP registers
+	// Full registers estimated 16 YMM used
+	MOVQ    n+80(FP), AX
+	MOVQ    matrix_base+0(FP), CX
+	SHRQ    $0x05, AX
+	TESTQ   AX, AX
+	JZ      mulAvxTwo_6x1Xor_end
+	VMOVDQU (CX), Y0
+	VMOVDQU 32(CX), Y1
+	VMOVDQU 64(CX), Y2
+	VMOVDQU 96(CX), Y3
+	VMOVDQU 128(CX), Y4
+	VMOVDQU 160(CX), Y5
+	VMOVDQU 192(CX), Y6
+	VMOVDQU 224(CX), Y7
+	VMOVDQU 256(CX), Y8
+	VMOVDQU 288(CX), Y9
+	VMOVDQU 320(CX), Y10
+	VMOVDQU 352(CX), Y11
+	MOVQ    in_base+24(FP), CX
+	MOVQ    (CX), DX
+	MOVQ    24(CX), BX
+	MOVQ    48(CX), SI
+	MOVQ    72(CX), DI
+	MOVQ    96(CX), R8
+	MOVQ    120(CX), CX
+	MOVQ    out_base+48(FP), R9
+	MOVQ    (R9), R9
+	MOVQ    start+72(FP), R10
+
+	// Add start offset to output
+	ADDQ R10, R9
 
 	// Add start offset to input
 	ADDQ         R10, DX
@@ -12076,86 +22690,539 @@ TEXT ·mulAvxTwo_6x2_64(SB), $0-88
 	ADDQ         R10, SI
 	ADDQ         R10, DI
 	ADDQ         R10, R8
-	ADDQ         R10, AX
-	MOVQ         $0x0000000f, R11
-	MOVQ         R11, X4
-	VPBROADCASTB X4, Y4
-	MOVQ         n+80(FP), R11
-	SHRQ         $0x06, R11
+	ADDQ         R10, CX
+	MOVQ         $0x0000000f, R10
+	MOVQ         R10, X13
+	VPBROADCASTB X13, Y13
 
-mulAvxTwo_6x2_64_loop:
-	// Clear 2 outputs
-	VPXOR Y0, Y0, Y0
-	VPXOR Y1, Y1, Y1
-	VPXOR Y2, Y2, Y2
-	VPXOR Y3, Y3, Y3
+mulAvxTwo_6x1Xor_loop:
+	// Load and process 32 bytes from input 0 to 1 outputs
+	VMOVDQU (DX), Y14
+	ADDQ    $0x20, DX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VMOVDQU (R9), Y12
+	VPSHUFB Y14, Y0, Y14
+	VPSHUFB Y15, Y1, Y15
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y14, Y12, Y12
 
-	// Load and process 64 bytes from input 0 to 2 outputs
-	VMOVDQU (DX), Y9
-	VMOVDQU 32(DX), Y11
-	ADDQ    $0x40, DX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU (CX), Y5
-	VMOVDQU 32(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 64(CX), Y5
-	VMOVDQU 96(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Load and process 32 bytes from input 1 to 1 outputs
+	VMOVDQU (BX), Y14
+	ADDQ    $0x20, BX
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y2, Y14
+	VPSHUFB Y15, Y3, Y15
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y14, Y12, Y12
 
-	// Load and process 64 bytes from input 1 to 2 outputs
-	VMOVDQU (BX), Y9
-	VMOVDQU 32(BX), Y11
-	ADDQ    $0x40, BX
-	VPSRLQ  $0x04, Y9, Y10
-	VPSRLQ  $0x04, Y11, Y12
-	VPAND   Y4, Y9, Y9
-	VPAND   Y4, Y11, Y11
-	VPAND   Y4, Y10, Y10
-	VPAND   Y4, Y12, Y12
-	VMOVDQU 128(CX), Y5
-	VMOVDQU 160(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y0, Y0
-	VPXOR   Y7, Y1, Y1
-	VMOVDQU 192(CX), Y5
-	VMOVDQU 224(CX), Y6
-	VPSHUFB Y11, Y5, Y7
-	VPSHUFB Y9, Y5, Y5
-	VPSHUFB Y12, Y6, Y8
-	VPSHUFB Y10, Y6, Y6
-	VPXOR   Y5, Y6, Y5
-	VPXOR   Y7, Y8, Y7
-	VPXOR   Y5, Y2, Y2
-	VPXOR   Y7, Y3, Y3
+	// Load and process 32 bytes from input 2 to 1 outputs
+	VMOVDQU (SI), Y14
+	ADDQ    $0x20, SI
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y4, Y14
+	VPSHUFB Y15, Y5, Y15
+	VPXOR   Y14, Y15, Y14
+	VPXOR   Y14, Y12, Y12
 
-	// Load and process 64 bytes from input 2 to 2 outputs
-	VMOVDQU (SI), Y9
-	VMOVDQU 32(SI), Y11
-	ADDQ    $0x40, SI
+	// Load and process 32 bytes from input 3 to 1 outputs
+	VMOVDQU (DI), Y14
+	ADDQ    $0x20, DI
+	VPSRLQ  $0x04, Y14, Y15
+	VPAND   Y13, Y14, Y14
+	VPAND   Y13, Y15, Y15
+	VPSHUFB Y14, Y6, Y14
+	VPSHUFB Y15, Y7, Y15
+	VPXOR   Y