All Downloads are FREE. Search and download functionalities are using the official Maven repository.

vendor.github.com.klauspost.reedsolomon.galois_gen_amd64.s Maven / Gradle / Ivy

There is a newer version: 2.9.1
Show newest version
// Code generated by command: go run gen.go -out galois_gen_amd64.s -stubs galois_gen_amd64.go. DO NOT EDIT.

// +build !appengine
// +build !noasm
// +build !nogen
// +build gc

// func mulAvxTwo_1x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_1x1(SB), $0-88
	// Loading all tables to registers
	// Full registers estimated 6 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_1x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	VMOVDQU      (CX), Y1
	VMOVDQU      32(CX), Y2
	MOVQ         in_base+24(FP), CX
	MOVQ         (CX), CX
	MOVQ         $0x0000000f, BX
	MOVQ         BX, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), BX

mulAvxTwo_1x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (CX)(BX*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y3, Y4, Y4
	VPAND   Y3, Y5, Y5
	VPSHUFB Y4, Y1, Y4
	VPSHUFB Y5, Y2, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(BX*1)

	// Prepare for next loop
	ADDQ $0x20, BX
	DECQ AX
	JNZ  mulAvxTwo_1x1_loop
	VZEROUPPER

mulAvxTwo_1x1_end:
	RET

// func mulAvxTwo_1x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_1x2(SB), $0-88
	// Loading all tables to registers
	// Full registers estimated 11 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_1x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	VMOVDQU      (CX), Y2
	VMOVDQU      32(CX), Y3
	VMOVDQU      64(CX), Y4
	VMOVDQU      96(CX), Y5
	MOVQ         in_base+24(FP), CX
	MOVQ         (CX), CX
	MOVQ         $0x0000000f, BP
	MOVQ         BP, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), BP

mulAvxTwo_1x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (CX)(BP*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VPSHUFB Y9, Y2, Y7
	VPSHUFB Y10, Y3, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VPSHUFB Y9, Y4, Y7
	VPSHUFB Y10, Y5, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(BP*1)
	VMOVDQU Y1, (DX)(BP*1)

	// Prepare for next loop
	ADDQ $0x20, BP
	DECQ AX
	JNZ  mulAvxTwo_1x2_loop
	VZEROUPPER

mulAvxTwo_1x2_end:
	RET

// func mulAvxTwo_1x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_1x3(SB), $0-88
	// Loading all tables to registers
	// Full registers estimated 14 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_1x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), DX
	VMOVDQU      (CX), Y3
	VMOVDQU      32(CX), Y4
	VMOVDQU      64(CX), Y5
	VMOVDQU      96(CX), Y6
	VMOVDQU      128(CX), Y7
	VMOVDQU      160(CX), Y8
	MOVQ         in_base+24(FP), CX
	MOVQ         (CX), CX
	MOVQ         $0x0000000f, SI
	MOVQ         SI, X9
	VPBROADCASTB X9, Y9
	MOVQ         start+72(FP), SI

mulAvxTwo_1x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (CX)(SI*1), Y12
	VPSRLQ  $0x04, Y12, Y13
	VPAND   Y9, Y12, Y12
	VPAND   Y9, Y13, Y13
	VPSHUFB Y12, Y3, Y10
	VPSHUFB Y13, Y4, Y11
	VPXOR   Y10, Y11, Y10
	VPXOR   Y10, Y0, Y0
	VPSHUFB Y12, Y5, Y10
	VPSHUFB Y13, Y6, Y11
	VPXOR   Y10, Y11, Y10
	VPXOR   Y10, Y1, Y1
	VPSHUFB Y12, Y7, Y10
	VPSHUFB Y13, Y8, Y11
	VPXOR   Y10, Y11, Y10
	VPXOR   Y10, Y2, Y2

	// Store 3 outputs
	VMOVDQU Y0, (BX)(SI*1)
	VMOVDQU Y1, (BP)(SI*1)
	VMOVDQU Y2, (DX)(SI*1)

	// Prepare for next loop
	ADDQ $0x20, SI
	DECQ AX
	JNZ  mulAvxTwo_1x3_loop
	VZEROUPPER

mulAvxTwo_1x3_end:
	RET

// func mulAvxTwo_1x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_1x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 17 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_1x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DX
	MOVQ         in_base+24(FP), DI
	MOVQ         (DI), DI
	MOVQ         $0x0000000f, R8
	MOVQ         R8, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R8

mulAvxTwo_1x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (DI)(R8*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	VMOVDQU Y0, (BX)(R8*1)
	VMOVDQU Y1, (BP)(R8*1)
	VMOVDQU Y2, (SI)(R8*1)
	VMOVDQU Y3, (DX)(R8*1)

	// Prepare for next loop
	ADDQ $0x20, R8
	DECQ AX
	JNZ  mulAvxTwo_1x4_loop
	VZEROUPPER

mulAvxTwo_1x4_end:
	RET

// func mulAvxTwo_1x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_1x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 20 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_1x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), DX
	MOVQ         in_base+24(FP), R8
	MOVQ         (R8), R8
	MOVQ         $0x0000000f, R9
	MOVQ         R9, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R9

mulAvxTwo_1x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (R8)(R9*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	VMOVDQU Y0, (BX)(R9*1)
	VMOVDQU Y1, (BP)(R9*1)
	VMOVDQU Y2, (SI)(R9*1)
	VMOVDQU Y3, (DI)(R9*1)
	VMOVDQU Y4, (DX)(R9*1)

	// Prepare for next loop
	ADDQ $0x20, R9
	DECQ AX
	JNZ  mulAvxTwo_1x5_loop
	VZEROUPPER

mulAvxTwo_1x5_end:
	RET

// func mulAvxTwo_1x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_1x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 23 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_1x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), DX
	MOVQ         in_base+24(FP), R9
	MOVQ         (R9), R9
	MOVQ         $0x0000000f, R10
	MOVQ         R10, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R10

mulAvxTwo_1x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (R9)(R10*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	VMOVDQU Y0, (BX)(R10*1)
	VMOVDQU Y1, (BP)(R10*1)
	VMOVDQU Y2, (SI)(R10*1)
	VMOVDQU Y3, (DI)(R10*1)
	VMOVDQU Y4, (R8)(R10*1)
	VMOVDQU Y5, (DX)(R10*1)

	// Prepare for next loop
	ADDQ $0x20, R10
	DECQ AX
	JNZ  mulAvxTwo_1x6_loop
	VZEROUPPER

mulAvxTwo_1x6_end:
	RET

// func mulAvxTwo_1x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_1x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 26 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_1x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), R9
	MOVQ         144(DX), DX
	MOVQ         in_base+24(FP), R10
	MOVQ         (R10), R10
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R11

mulAvxTwo_1x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (R10)(R11*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	VMOVDQU Y0, (BX)(R11*1)
	VMOVDQU Y1, (BP)(R11*1)
	VMOVDQU Y2, (SI)(R11*1)
	VMOVDQU Y3, (DI)(R11*1)
	VMOVDQU Y4, (R8)(R11*1)
	VMOVDQU Y5, (R9)(R11*1)
	VMOVDQU Y6, (DX)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_1x7_loop
	VZEROUPPER

mulAvxTwo_1x7_end:
	RET

// func mulAvxTwo_1x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_1x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 29 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_1x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), R9
	MOVQ         144(DX), R10
	MOVQ         168(DX), DX
	MOVQ         in_base+24(FP), R11
	MOVQ         (R11), R11
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R12

mulAvxTwo_1x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (R11)(R12*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	VMOVDQU Y0, (BX)(R12*1)
	VMOVDQU Y1, (BP)(R12*1)
	VMOVDQU Y2, (SI)(R12*1)
	VMOVDQU Y3, (DI)(R12*1)
	VMOVDQU Y4, (R8)(R12*1)
	VMOVDQU Y5, (R9)(R12*1)
	VMOVDQU Y6, (R10)(R12*1)
	VMOVDQU Y7, (DX)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_1x8_loop
	VZEROUPPER

mulAvxTwo_1x8_end:
	RET

// func mulAvxTwo_2x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_2x1(SB), $0-88
	// Loading all tables to registers
	// Full registers estimated 8 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_2x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	VMOVDQU      (CX), Y1
	VMOVDQU      32(CX), Y2
	VMOVDQU      64(CX), Y3
	VMOVDQU      96(CX), Y4
	MOVQ         in_base+24(FP), CX
	MOVQ         (CX), BX
	MOVQ         24(CX), CX
	MOVQ         $0x0000000f, BP
	MOVQ         BP, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), BP

mulAvxTwo_2x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (BX)(BP*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y5, Y6, Y6
	VPAND   Y5, Y7, Y7
	VPSHUFB Y6, Y1, Y6
	VPSHUFB Y7, Y2, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0

	// Load and process 32 bytes from input 1 to 1 outputs
	VMOVDQU (CX)(BP*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y5, Y6, Y6
	VPAND   Y5, Y7, Y7
	VPSHUFB Y6, Y3, Y6
	VPSHUFB Y7, Y4, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(BP*1)

	// Prepare for next loop
	ADDQ $0x20, BP
	DECQ AX
	JNZ  mulAvxTwo_2x1_loop
	VZEROUPPER

mulAvxTwo_2x1_end:
	RET

// func mulAvxTwo_2x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_2x2(SB), $0-88
	// Loading all tables to registers
	// Full registers estimated 15 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_2x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	VMOVDQU      (CX), Y2
	VMOVDQU      32(CX), Y3
	VMOVDQU      64(CX), Y4
	VMOVDQU      96(CX), Y5
	VMOVDQU      128(CX), Y6
	VMOVDQU      160(CX), Y7
	VMOVDQU      192(CX), Y8
	VMOVDQU      224(CX), Y9
	MOVQ         in_base+24(FP), CX
	MOVQ         (CX), BP
	MOVQ         24(CX), CX
	MOVQ         $0x0000000f, SI
	MOVQ         SI, X10
	VPBROADCASTB X10, Y10
	MOVQ         start+72(FP), SI

mulAvxTwo_2x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (BP)(SI*1), Y13
	VPSRLQ  $0x04, Y13, Y14
	VPAND   Y10, Y13, Y13
	VPAND   Y10, Y14, Y14
	VPSHUFB Y13, Y2, Y11
	VPSHUFB Y14, Y3, Y12
	VPXOR   Y11, Y12, Y11
	VPXOR   Y11, Y0, Y0
	VPSHUFB Y13, Y4, Y11
	VPSHUFB Y14, Y5, Y12
	VPXOR   Y11, Y12, Y11
	VPXOR   Y11, Y1, Y1

	// Load and process 32 bytes from input 1 to 2 outputs
	VMOVDQU (CX)(SI*1), Y13
	VPSRLQ  $0x04, Y13, Y14
	VPAND   Y10, Y13, Y13
	VPAND   Y10, Y14, Y14
	VPSHUFB Y13, Y6, Y11
	VPSHUFB Y14, Y7, Y12
	VPXOR   Y11, Y12, Y11
	VPXOR   Y11, Y0, Y0
	VPSHUFB Y13, Y8, Y11
	VPSHUFB Y14, Y9, Y12
	VPXOR   Y11, Y12, Y11
	VPXOR   Y11, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(SI*1)
	VMOVDQU Y1, (DX)(SI*1)

	// Prepare for next loop
	ADDQ $0x20, SI
	DECQ AX
	JNZ  mulAvxTwo_2x2_loop
	VZEROUPPER

mulAvxTwo_2x2_end:
	RET

// func mulAvxTwo_2x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_2x3(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 20 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_2x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), DX
	MOVQ         in_base+24(FP), SI
	MOVQ         (SI), DI
	MOVQ         24(SI), SI
	MOVQ         $0x0000000f, R8
	MOVQ         R8, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), R8

mulAvxTwo_2x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (DI)(R8*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU (CX), Y4
	VMOVDQU 32(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 64(CX), Y4
	VMOVDQU 96(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 128(CX), Y4
	VMOVDQU 160(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 1 to 3 outputs
	VMOVDQU (SI)(R8*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 192(CX), Y4
	VMOVDQU 224(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 256(CX), Y4
	VMOVDQU 288(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 320(CX), Y4
	VMOVDQU 352(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Store 3 outputs
	VMOVDQU Y0, (BX)(R8*1)
	VMOVDQU Y1, (BP)(R8*1)
	VMOVDQU Y2, (DX)(R8*1)

	// Prepare for next loop
	ADDQ $0x20, R8
	DECQ AX
	JNZ  mulAvxTwo_2x3_loop
	VZEROUPPER

mulAvxTwo_2x3_end:
	RET

// func mulAvxTwo_2x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_2x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 25 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_2x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DX
	MOVQ         in_base+24(FP), DI
	MOVQ         (DI), R8
	MOVQ         24(DI), DI
	MOVQ         $0x0000000f, R9
	MOVQ         R9, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R9

mulAvxTwo_2x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (R8)(R9*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 1 to 4 outputs
	VMOVDQU (DI)(R9*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 256(CX), Y5
	VMOVDQU 288(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 320(CX), Y5
	VMOVDQU 352(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 384(CX), Y5
	VMOVDQU 416(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 448(CX), Y5
	VMOVDQU 480(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	VMOVDQU Y0, (BX)(R9*1)
	VMOVDQU Y1, (BP)(R9*1)
	VMOVDQU Y2, (SI)(R9*1)
	VMOVDQU Y3, (DX)(R9*1)

	// Prepare for next loop
	ADDQ $0x20, R9
	DECQ AX
	JNZ  mulAvxTwo_2x4_loop
	VZEROUPPER

mulAvxTwo_2x4_end:
	RET

// func mulAvxTwo_2x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_2x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 30 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_2x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), DX
	MOVQ         in_base+24(FP), R8
	MOVQ         (R8), R9
	MOVQ         24(R8), R8
	MOVQ         $0x0000000f, R10
	MOVQ         R10, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R10

mulAvxTwo_2x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (R9)(R10*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 1 to 5 outputs
	VMOVDQU (R8)(R10*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 320(CX), Y6
	VMOVDQU 352(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 384(CX), Y6
	VMOVDQU 416(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 448(CX), Y6
	VMOVDQU 480(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 512(CX), Y6
	VMOVDQU 544(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 576(CX), Y6
	VMOVDQU 608(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	VMOVDQU Y0, (BX)(R10*1)
	VMOVDQU Y1, (BP)(R10*1)
	VMOVDQU Y2, (SI)(R10*1)
	VMOVDQU Y3, (DI)(R10*1)
	VMOVDQU Y4, (DX)(R10*1)

	// Prepare for next loop
	ADDQ $0x20, R10
	DECQ AX
	JNZ  mulAvxTwo_2x5_loop
	VZEROUPPER

mulAvxTwo_2x5_end:
	RET

// func mulAvxTwo_2x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_2x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 35 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_2x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), DX
	MOVQ         in_base+24(FP), R9
	MOVQ         (R9), R10
	MOVQ         24(R9), R9
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R11

mulAvxTwo_2x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (R10)(R11*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 1 to 6 outputs
	VMOVDQU (R9)(R11*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 384(CX), Y7
	VMOVDQU 416(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 448(CX), Y7
	VMOVDQU 480(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 512(CX), Y7
	VMOVDQU 544(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 576(CX), Y7
	VMOVDQU 608(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 640(CX), Y7
	VMOVDQU 672(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 704(CX), Y7
	VMOVDQU 736(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	VMOVDQU Y0, (BX)(R11*1)
	VMOVDQU Y1, (BP)(R11*1)
	VMOVDQU Y2, (SI)(R11*1)
	VMOVDQU Y3, (DI)(R11*1)
	VMOVDQU Y4, (R8)(R11*1)
	VMOVDQU Y5, (DX)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_2x6_loop
	VZEROUPPER

mulAvxTwo_2x6_end:
	RET

// func mulAvxTwo_2x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_2x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 40 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_2x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), R9
	MOVQ         144(DX), DX
	MOVQ         in_base+24(FP), R10
	MOVQ         (R10), R11
	MOVQ         24(R10), R10
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R12

mulAvxTwo_2x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (R11)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 1 to 7 outputs
	VMOVDQU (R10)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 448(CX), Y8
	VMOVDQU 480(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 512(CX), Y8
	VMOVDQU 544(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 576(CX), Y8
	VMOVDQU 608(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 640(CX), Y8
	VMOVDQU 672(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 704(CX), Y8
	VMOVDQU 736(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 768(CX), Y8
	VMOVDQU 800(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 832(CX), Y8
	VMOVDQU 864(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	VMOVDQU Y0, (BX)(R12*1)
	VMOVDQU Y1, (BP)(R12*1)
	VMOVDQU Y2, (SI)(R12*1)
	VMOVDQU Y3, (DI)(R12*1)
	VMOVDQU Y4, (R8)(R12*1)
	VMOVDQU Y5, (R9)(R12*1)
	VMOVDQU Y6, (DX)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_2x7_loop
	VZEROUPPER

mulAvxTwo_2x7_end:
	RET

// func mulAvxTwo_2x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_2x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 45 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_2x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), R9
	MOVQ         144(DX), R10
	MOVQ         168(DX), DX
	MOVQ         in_base+24(FP), R11
	MOVQ         (R11), R12
	MOVQ         24(R11), R11
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R13

mulAvxTwo_2x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (R12)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 1 to 8 outputs
	VMOVDQU (R11)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 512(CX), Y9
	VMOVDQU 544(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 576(CX), Y9
	VMOVDQU 608(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 640(CX), Y9
	VMOVDQU 672(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 704(CX), Y9
	VMOVDQU 736(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 768(CX), Y9
	VMOVDQU 800(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 832(CX), Y9
	VMOVDQU 864(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 896(CX), Y9
	VMOVDQU 928(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 960(CX), Y9
	VMOVDQU 992(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	VMOVDQU Y0, (BX)(R13*1)
	VMOVDQU Y1, (BP)(R13*1)
	VMOVDQU Y2, (SI)(R13*1)
	VMOVDQU Y3, (DI)(R13*1)
	VMOVDQU Y4, (R8)(R13*1)
	VMOVDQU Y5, (R9)(R13*1)
	VMOVDQU Y6, (R10)(R13*1)
	VMOVDQU Y7, (DX)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_2x8_loop
	VZEROUPPER

mulAvxTwo_2x8_end:
	RET

// func mulAvxTwo_3x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_3x1(SB), $0-88
	// Loading all tables to registers
	// Full registers estimated 10 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_3x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	VMOVDQU      (CX), Y1
	VMOVDQU      32(CX), Y2
	VMOVDQU      64(CX), Y3
	VMOVDQU      96(CX), Y4
	VMOVDQU      128(CX), Y5
	VMOVDQU      160(CX), Y6
	MOVQ         in_base+24(FP), CX
	MOVQ         (CX), BX
	MOVQ         24(CX), BP
	MOVQ         48(CX), CX
	MOVQ         $0x0000000f, SI
	MOVQ         SI, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), SI

mulAvxTwo_3x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (BX)(SI*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y7, Y8, Y8
	VPAND   Y7, Y9, Y9
	VPSHUFB Y8, Y1, Y8
	VPSHUFB Y9, Y2, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0

	// Load and process 32 bytes from input 1 to 1 outputs
	VMOVDQU (BP)(SI*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y7, Y8, Y8
	VPAND   Y7, Y9, Y9
	VPSHUFB Y8, Y3, Y8
	VPSHUFB Y9, Y4, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0

	// Load and process 32 bytes from input 2 to 1 outputs
	VMOVDQU (CX)(SI*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y7, Y8, Y8
	VPAND   Y7, Y9, Y9
	VPSHUFB Y8, Y5, Y8
	VPSHUFB Y9, Y6, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(SI*1)

	// Prepare for next loop
	ADDQ $0x20, SI
	DECQ AX
	JNZ  mulAvxTwo_3x1_loop
	VZEROUPPER

mulAvxTwo_3x1_end:
	RET

// func mulAvxTwo_3x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_3x2(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 19 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_3x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	MOVQ         in_base+24(FP), BP
	MOVQ         (BP), SI
	MOVQ         24(BP), DI
	MOVQ         48(BP), BP
	MOVQ         $0x0000000f, R8
	MOVQ         R8, X2
	VPBROADCASTB X2, Y2
	MOVQ         start+72(FP), R8

mulAvxTwo_3x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (SI)(R8*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU (CX), Y3
	VMOVDQU 32(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 64(CX), Y3
	VMOVDQU 96(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 1 to 2 outputs
	VMOVDQU (DI)(R8*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 128(CX), Y3
	VMOVDQU 160(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 192(CX), Y3
	VMOVDQU 224(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 2 to 2 outputs
	VMOVDQU (BP)(R8*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 256(CX), Y3
	VMOVDQU 288(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 320(CX), Y3
	VMOVDQU 352(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(R8*1)
	VMOVDQU Y1, (DX)(R8*1)

	// Prepare for next loop
	ADDQ $0x20, R8
	DECQ AX
	JNZ  mulAvxTwo_3x2_loop
	VZEROUPPER

mulAvxTwo_3x2_end:
	RET

// func mulAvxTwo_3x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_3x3(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 26 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_3x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), DX
	MOVQ         in_base+24(FP), SI
	MOVQ         (SI), DI
	MOVQ         24(SI), R8
	MOVQ         48(SI), SI
	MOVQ         $0x0000000f, R9
	MOVQ         R9, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), R9

mulAvxTwo_3x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (DI)(R9*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU (CX), Y4
	VMOVDQU 32(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 64(CX), Y4
	VMOVDQU 96(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 128(CX), Y4
	VMOVDQU 160(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 1 to 3 outputs
	VMOVDQU (R8)(R9*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 192(CX), Y4
	VMOVDQU 224(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 256(CX), Y4
	VMOVDQU 288(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 320(CX), Y4
	VMOVDQU 352(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 2 to 3 outputs
	VMOVDQU (SI)(R9*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 384(CX), Y4
	VMOVDQU 416(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 448(CX), Y4
	VMOVDQU 480(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 512(CX), Y4
	VMOVDQU 544(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Store 3 outputs
	VMOVDQU Y0, (BX)(R9*1)
	VMOVDQU Y1, (BP)(R9*1)
	VMOVDQU Y2, (DX)(R9*1)

	// Prepare for next loop
	ADDQ $0x20, R9
	DECQ AX
	JNZ  mulAvxTwo_3x3_loop
	VZEROUPPER

mulAvxTwo_3x3_end:
	RET

// func mulAvxTwo_3x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_3x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 33 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_3x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DX
	MOVQ         in_base+24(FP), DI
	MOVQ         (DI), R8
	MOVQ         24(DI), R9
	MOVQ         48(DI), DI
	MOVQ         $0x0000000f, R10
	MOVQ         R10, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R10

mulAvxTwo_3x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (R8)(R10*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 1 to 4 outputs
	VMOVDQU (R9)(R10*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 256(CX), Y5
	VMOVDQU 288(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 320(CX), Y5
	VMOVDQU 352(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 384(CX), Y5
	VMOVDQU 416(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 448(CX), Y5
	VMOVDQU 480(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 2 to 4 outputs
	VMOVDQU (DI)(R10*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 512(CX), Y5
	VMOVDQU 544(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 576(CX), Y5
	VMOVDQU 608(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 640(CX), Y5
	VMOVDQU 672(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 704(CX), Y5
	VMOVDQU 736(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	VMOVDQU Y0, (BX)(R10*1)
	VMOVDQU Y1, (BP)(R10*1)
	VMOVDQU Y2, (SI)(R10*1)
	VMOVDQU Y3, (DX)(R10*1)

	// Prepare for next loop
	ADDQ $0x20, R10
	DECQ AX
	JNZ  mulAvxTwo_3x4_loop
	VZEROUPPER

mulAvxTwo_3x4_end:
	RET

// func mulAvxTwo_3x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_3x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 40 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_3x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), DX
	MOVQ         in_base+24(FP), R8
	MOVQ         (R8), R9
	MOVQ         24(R8), R10
	MOVQ         48(R8), R8
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R11

mulAvxTwo_3x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (R9)(R11*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 1 to 5 outputs
	VMOVDQU (R10)(R11*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 320(CX), Y6
	VMOVDQU 352(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 384(CX), Y6
	VMOVDQU 416(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 448(CX), Y6
	VMOVDQU 480(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 512(CX), Y6
	VMOVDQU 544(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 576(CX), Y6
	VMOVDQU 608(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 2 to 5 outputs
	VMOVDQU (R8)(R11*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 640(CX), Y6
	VMOVDQU 672(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 704(CX), Y6
	VMOVDQU 736(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 768(CX), Y6
	VMOVDQU 800(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 832(CX), Y6
	VMOVDQU 864(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 896(CX), Y6
	VMOVDQU 928(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	VMOVDQU Y0, (BX)(R11*1)
	VMOVDQU Y1, (BP)(R11*1)
	VMOVDQU Y2, (SI)(R11*1)
	VMOVDQU Y3, (DI)(R11*1)
	VMOVDQU Y4, (DX)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_3x5_loop
	VZEROUPPER

mulAvxTwo_3x5_end:
	RET

// func mulAvxTwo_3x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_3x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 47 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_3x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), DX
	MOVQ         in_base+24(FP), R9
	MOVQ         (R9), R10
	MOVQ         24(R9), R11
	MOVQ         48(R9), R9
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R12

mulAvxTwo_3x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (R10)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 1 to 6 outputs
	VMOVDQU (R11)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 384(CX), Y7
	VMOVDQU 416(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 448(CX), Y7
	VMOVDQU 480(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 512(CX), Y7
	VMOVDQU 544(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 576(CX), Y7
	VMOVDQU 608(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 640(CX), Y7
	VMOVDQU 672(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 704(CX), Y7
	VMOVDQU 736(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 2 to 6 outputs
	VMOVDQU (R9)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 768(CX), Y7
	VMOVDQU 800(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 832(CX), Y7
	VMOVDQU 864(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 896(CX), Y7
	VMOVDQU 928(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 960(CX), Y7
	VMOVDQU 992(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1024(CX), Y7
	VMOVDQU 1056(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1088(CX), Y7
	VMOVDQU 1120(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	VMOVDQU Y0, (BX)(R12*1)
	VMOVDQU Y1, (BP)(R12*1)
	VMOVDQU Y2, (SI)(R12*1)
	VMOVDQU Y3, (DI)(R12*1)
	VMOVDQU Y4, (R8)(R12*1)
	VMOVDQU Y5, (DX)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_3x6_loop
	VZEROUPPER

mulAvxTwo_3x6_end:
	RET

// func mulAvxTwo_3x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_3x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 54 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_3x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), R9
	MOVQ         144(DX), DX
	MOVQ         in_base+24(FP), R10
	MOVQ         (R10), R11
	MOVQ         24(R10), R12
	MOVQ         48(R10), R10
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R13

mulAvxTwo_3x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (R11)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 1 to 7 outputs
	VMOVDQU (R12)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 448(CX), Y8
	VMOVDQU 480(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 512(CX), Y8
	VMOVDQU 544(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 576(CX), Y8
	VMOVDQU 608(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 640(CX), Y8
	VMOVDQU 672(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 704(CX), Y8
	VMOVDQU 736(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 768(CX), Y8
	VMOVDQU 800(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 832(CX), Y8
	VMOVDQU 864(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 2 to 7 outputs
	VMOVDQU (R10)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 896(CX), Y8
	VMOVDQU 928(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 960(CX), Y8
	VMOVDQU 992(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1024(CX), Y8
	VMOVDQU 1056(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1088(CX), Y8
	VMOVDQU 1120(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1152(CX), Y8
	VMOVDQU 1184(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1216(CX), Y8
	VMOVDQU 1248(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1280(CX), Y8
	VMOVDQU 1312(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	VMOVDQU Y0, (BX)(R13*1)
	VMOVDQU Y1, (BP)(R13*1)
	VMOVDQU Y2, (SI)(R13*1)
	VMOVDQU Y3, (DI)(R13*1)
	VMOVDQU Y4, (R8)(R13*1)
	VMOVDQU Y5, (R9)(R13*1)
	VMOVDQU Y6, (DX)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_3x7_loop
	VZEROUPPER

mulAvxTwo_3x7_end:
	RET

// func mulAvxTwo_3x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_3x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 61 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_3x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), R9
	MOVQ         144(DX), R10
	MOVQ         168(DX), DX
	MOVQ         in_base+24(FP), R11
	MOVQ         (R11), R12
	MOVQ         24(R11), R13
	MOVQ         48(R11), R11
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R14

mulAvxTwo_3x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (R12)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 1 to 8 outputs
	VMOVDQU (R13)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 512(CX), Y9
	VMOVDQU 544(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 576(CX), Y9
	VMOVDQU 608(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 640(CX), Y9
	VMOVDQU 672(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 704(CX), Y9
	VMOVDQU 736(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 768(CX), Y9
	VMOVDQU 800(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 832(CX), Y9
	VMOVDQU 864(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 896(CX), Y9
	VMOVDQU 928(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 960(CX), Y9
	VMOVDQU 992(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 2 to 8 outputs
	VMOVDQU (R11)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1024(CX), Y9
	VMOVDQU 1056(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1088(CX), Y9
	VMOVDQU 1120(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1152(CX), Y9
	VMOVDQU 1184(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1216(CX), Y9
	VMOVDQU 1248(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1280(CX), Y9
	VMOVDQU 1312(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1344(CX), Y9
	VMOVDQU 1376(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1408(CX), Y9
	VMOVDQU 1440(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1472(CX), Y9
	VMOVDQU 1504(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	VMOVDQU Y0, (BX)(R14*1)
	VMOVDQU Y1, (BP)(R14*1)
	VMOVDQU Y2, (SI)(R14*1)
	VMOVDQU Y3, (DI)(R14*1)
	VMOVDQU Y4, (R8)(R14*1)
	VMOVDQU Y5, (R9)(R14*1)
	VMOVDQU Y6, (R10)(R14*1)
	VMOVDQU Y7, (DX)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_3x8_loop
	VZEROUPPER

mulAvxTwo_3x8_end:
	RET

// func mulAvxTwo_4x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_4x1(SB), $0-88
	// Loading all tables to registers
	// Full registers estimated 12 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_4x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	VMOVDQU      (CX), Y1
	VMOVDQU      32(CX), Y2
	VMOVDQU      64(CX), Y3
	VMOVDQU      96(CX), Y4
	VMOVDQU      128(CX), Y5
	VMOVDQU      160(CX), Y6
	VMOVDQU      192(CX), Y7
	VMOVDQU      224(CX), Y8
	MOVQ         in_base+24(FP), CX
	MOVQ         (CX), BX
	MOVQ         24(CX), BP
	MOVQ         48(CX), SI
	MOVQ         72(CX), CX
	MOVQ         $0x0000000f, DI
	MOVQ         DI, X9
	VPBROADCASTB X9, Y9
	MOVQ         start+72(FP), DI

mulAvxTwo_4x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (BX)(DI*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y9, Y10, Y10
	VPAND   Y9, Y11, Y11
	VPSHUFB Y10, Y1, Y10
	VPSHUFB Y11, Y2, Y11
	VPXOR   Y10, Y11, Y10
	VPXOR   Y10, Y0, Y0

	// Load and process 32 bytes from input 1 to 1 outputs
	VMOVDQU (BP)(DI*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y9, Y10, Y10
	VPAND   Y9, Y11, Y11
	VPSHUFB Y10, Y3, Y10
	VPSHUFB Y11, Y4, Y11
	VPXOR   Y10, Y11, Y10
	VPXOR   Y10, Y0, Y0

	// Load and process 32 bytes from input 2 to 1 outputs
	VMOVDQU (SI)(DI*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y9, Y10, Y10
	VPAND   Y9, Y11, Y11
	VPSHUFB Y10, Y5, Y10
	VPSHUFB Y11, Y6, Y11
	VPXOR   Y10, Y11, Y10
	VPXOR   Y10, Y0, Y0

	// Load and process 32 bytes from input 3 to 1 outputs
	VMOVDQU (CX)(DI*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y9, Y10, Y10
	VPAND   Y9, Y11, Y11
	VPSHUFB Y10, Y7, Y10
	VPSHUFB Y11, Y8, Y11
	VPXOR   Y10, Y11, Y10
	VPXOR   Y10, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(DI*1)

	// Prepare for next loop
	ADDQ $0x20, DI
	DECQ AX
	JNZ  mulAvxTwo_4x1_loop
	VZEROUPPER

mulAvxTwo_4x1_end:
	RET

// func mulAvxTwo_4x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_4x2(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 23 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_4x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	MOVQ         in_base+24(FP), BP
	MOVQ         (BP), SI
	MOVQ         24(BP), DI
	MOVQ         48(BP), R8
	MOVQ         72(BP), BP
	MOVQ         $0x0000000f, R9
	MOVQ         R9, X2
	VPBROADCASTB X2, Y2
	MOVQ         start+72(FP), R9

mulAvxTwo_4x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (SI)(R9*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU (CX), Y3
	VMOVDQU 32(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 64(CX), Y3
	VMOVDQU 96(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 1 to 2 outputs
	VMOVDQU (DI)(R9*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 128(CX), Y3
	VMOVDQU 160(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 192(CX), Y3
	VMOVDQU 224(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 2 to 2 outputs
	VMOVDQU (R8)(R9*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 256(CX), Y3
	VMOVDQU 288(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 320(CX), Y3
	VMOVDQU 352(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 3 to 2 outputs
	VMOVDQU (BP)(R9*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 384(CX), Y3
	VMOVDQU 416(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 448(CX), Y3
	VMOVDQU 480(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(R9*1)
	VMOVDQU Y1, (DX)(R9*1)

	// Prepare for next loop
	ADDQ $0x20, R9
	DECQ AX
	JNZ  mulAvxTwo_4x2_loop
	VZEROUPPER

mulAvxTwo_4x2_end:
	RET

// func mulAvxTwo_4x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_4x3(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 32 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_4x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), DX
	MOVQ         in_base+24(FP), SI
	MOVQ         (SI), DI
	MOVQ         24(SI), R8
	MOVQ         48(SI), R9
	MOVQ         72(SI), SI
	MOVQ         $0x0000000f, R10
	MOVQ         R10, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), R10

mulAvxTwo_4x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (DI)(R10*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU (CX), Y4
	VMOVDQU 32(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 64(CX), Y4
	VMOVDQU 96(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 128(CX), Y4
	VMOVDQU 160(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 1 to 3 outputs
	VMOVDQU (R8)(R10*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 192(CX), Y4
	VMOVDQU 224(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 256(CX), Y4
	VMOVDQU 288(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 320(CX), Y4
	VMOVDQU 352(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 2 to 3 outputs
	VMOVDQU (R9)(R10*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 384(CX), Y4
	VMOVDQU 416(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 448(CX), Y4
	VMOVDQU 480(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 512(CX), Y4
	VMOVDQU 544(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 3 to 3 outputs
	VMOVDQU (SI)(R10*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 576(CX), Y4
	VMOVDQU 608(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 640(CX), Y4
	VMOVDQU 672(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 704(CX), Y4
	VMOVDQU 736(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Store 3 outputs
	VMOVDQU Y0, (BX)(R10*1)
	VMOVDQU Y1, (BP)(R10*1)
	VMOVDQU Y2, (DX)(R10*1)

	// Prepare for next loop
	ADDQ $0x20, R10
	DECQ AX
	JNZ  mulAvxTwo_4x3_loop
	VZEROUPPER

mulAvxTwo_4x3_end:
	RET

// func mulAvxTwo_4x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_4x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 41 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_4x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DX
	MOVQ         in_base+24(FP), DI
	MOVQ         (DI), R8
	MOVQ         24(DI), R9
	MOVQ         48(DI), R10
	MOVQ         72(DI), DI
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R11

mulAvxTwo_4x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (R8)(R11*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 1 to 4 outputs
	VMOVDQU (R9)(R11*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 256(CX), Y5
	VMOVDQU 288(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 320(CX), Y5
	VMOVDQU 352(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 384(CX), Y5
	VMOVDQU 416(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 448(CX), Y5
	VMOVDQU 480(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 2 to 4 outputs
	VMOVDQU (R10)(R11*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 512(CX), Y5
	VMOVDQU 544(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 576(CX), Y5
	VMOVDQU 608(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 640(CX), Y5
	VMOVDQU 672(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 704(CX), Y5
	VMOVDQU 736(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 3 to 4 outputs
	VMOVDQU (DI)(R11*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 768(CX), Y5
	VMOVDQU 800(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 832(CX), Y5
	VMOVDQU 864(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 896(CX), Y5
	VMOVDQU 928(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 960(CX), Y5
	VMOVDQU 992(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	VMOVDQU Y0, (BX)(R11*1)
	VMOVDQU Y1, (BP)(R11*1)
	VMOVDQU Y2, (SI)(R11*1)
	VMOVDQU Y3, (DX)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_4x4_loop
	VZEROUPPER

mulAvxTwo_4x4_end:
	RET

// func mulAvxTwo_4x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_4x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 50 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_4x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), DX
	MOVQ         in_base+24(FP), R8
	MOVQ         (R8), R9
	MOVQ         24(R8), R10
	MOVQ         48(R8), R11
	MOVQ         72(R8), R8
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R12

mulAvxTwo_4x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (R9)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 1 to 5 outputs
	VMOVDQU (R10)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 320(CX), Y6
	VMOVDQU 352(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 384(CX), Y6
	VMOVDQU 416(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 448(CX), Y6
	VMOVDQU 480(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 512(CX), Y6
	VMOVDQU 544(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 576(CX), Y6
	VMOVDQU 608(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 2 to 5 outputs
	VMOVDQU (R11)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 640(CX), Y6
	VMOVDQU 672(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 704(CX), Y6
	VMOVDQU 736(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 768(CX), Y6
	VMOVDQU 800(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 832(CX), Y6
	VMOVDQU 864(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 896(CX), Y6
	VMOVDQU 928(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 3 to 5 outputs
	VMOVDQU (R8)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 960(CX), Y6
	VMOVDQU 992(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1024(CX), Y6
	VMOVDQU 1056(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1088(CX), Y6
	VMOVDQU 1120(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1152(CX), Y6
	VMOVDQU 1184(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1216(CX), Y6
	VMOVDQU 1248(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	VMOVDQU Y0, (BX)(R12*1)
	VMOVDQU Y1, (BP)(R12*1)
	VMOVDQU Y2, (SI)(R12*1)
	VMOVDQU Y3, (DI)(R12*1)
	VMOVDQU Y4, (DX)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_4x5_loop
	VZEROUPPER

mulAvxTwo_4x5_end:
	RET

// func mulAvxTwo_4x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_4x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 59 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_4x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), DX
	MOVQ         in_base+24(FP), R9
	MOVQ         (R9), R10
	MOVQ         24(R9), R11
	MOVQ         48(R9), R12
	MOVQ         72(R9), R9
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R13

mulAvxTwo_4x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (R10)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 1 to 6 outputs
	VMOVDQU (R11)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 384(CX), Y7
	VMOVDQU 416(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 448(CX), Y7
	VMOVDQU 480(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 512(CX), Y7
	VMOVDQU 544(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 576(CX), Y7
	VMOVDQU 608(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 640(CX), Y7
	VMOVDQU 672(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 704(CX), Y7
	VMOVDQU 736(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 2 to 6 outputs
	VMOVDQU (R12)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 768(CX), Y7
	VMOVDQU 800(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 832(CX), Y7
	VMOVDQU 864(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 896(CX), Y7
	VMOVDQU 928(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 960(CX), Y7
	VMOVDQU 992(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1024(CX), Y7
	VMOVDQU 1056(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1088(CX), Y7
	VMOVDQU 1120(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 3 to 6 outputs
	VMOVDQU (R9)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1152(CX), Y7
	VMOVDQU 1184(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1216(CX), Y7
	VMOVDQU 1248(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1280(CX), Y7
	VMOVDQU 1312(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1344(CX), Y7
	VMOVDQU 1376(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1408(CX), Y7
	VMOVDQU 1440(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1472(CX), Y7
	VMOVDQU 1504(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	VMOVDQU Y0, (BX)(R13*1)
	VMOVDQU Y1, (BP)(R13*1)
	VMOVDQU Y2, (SI)(R13*1)
	VMOVDQU Y3, (DI)(R13*1)
	VMOVDQU Y4, (R8)(R13*1)
	VMOVDQU Y5, (DX)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_4x6_loop
	VZEROUPPER

mulAvxTwo_4x6_end:
	RET

// func mulAvxTwo_4x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_4x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 68 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_4x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), R9
	MOVQ         144(DX), DX
	MOVQ         in_base+24(FP), R10
	MOVQ         (R10), R11
	MOVQ         24(R10), R12
	MOVQ         48(R10), R13
	MOVQ         72(R10), R10
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R14

mulAvxTwo_4x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (R11)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 1 to 7 outputs
	VMOVDQU (R12)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 448(CX), Y8
	VMOVDQU 480(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 512(CX), Y8
	VMOVDQU 544(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 576(CX), Y8
	VMOVDQU 608(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 640(CX), Y8
	VMOVDQU 672(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 704(CX), Y8
	VMOVDQU 736(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 768(CX), Y8
	VMOVDQU 800(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 832(CX), Y8
	VMOVDQU 864(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 2 to 7 outputs
	VMOVDQU (R13)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 896(CX), Y8
	VMOVDQU 928(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 960(CX), Y8
	VMOVDQU 992(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1024(CX), Y8
	VMOVDQU 1056(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1088(CX), Y8
	VMOVDQU 1120(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1152(CX), Y8
	VMOVDQU 1184(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1216(CX), Y8
	VMOVDQU 1248(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1280(CX), Y8
	VMOVDQU 1312(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 3 to 7 outputs
	VMOVDQU (R10)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1344(CX), Y8
	VMOVDQU 1376(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1408(CX), Y8
	VMOVDQU 1440(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1472(CX), Y8
	VMOVDQU 1504(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1536(CX), Y8
	VMOVDQU 1568(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1600(CX), Y8
	VMOVDQU 1632(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1664(CX), Y8
	VMOVDQU 1696(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1728(CX), Y8
	VMOVDQU 1760(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	VMOVDQU Y0, (BX)(R14*1)
	VMOVDQU Y1, (BP)(R14*1)
	VMOVDQU Y2, (SI)(R14*1)
	VMOVDQU Y3, (DI)(R14*1)
	VMOVDQU Y4, (R8)(R14*1)
	VMOVDQU Y5, (R9)(R14*1)
	VMOVDQU Y6, (DX)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_4x7_loop
	VZEROUPPER

mulAvxTwo_4x7_end:
	RET

// func mulAvxTwo_4x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_4x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 77 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_4x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), R9
	MOVQ         144(DX), R10
	MOVQ         168(DX), DX
	MOVQ         in_base+24(FP), R11
	MOVQ         (R11), R12
	MOVQ         24(R11), R13
	MOVQ         48(R11), R14
	MOVQ         72(R11), R11
	MOVQ         $0x0000000f, R15
	MOVQ         R15, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R15

mulAvxTwo_4x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (R12)(R15*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 1 to 8 outputs
	VMOVDQU (R13)(R15*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 512(CX), Y9
	VMOVDQU 544(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 576(CX), Y9
	VMOVDQU 608(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 640(CX), Y9
	VMOVDQU 672(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 704(CX), Y9
	VMOVDQU 736(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 768(CX), Y9
	VMOVDQU 800(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 832(CX), Y9
	VMOVDQU 864(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 896(CX), Y9
	VMOVDQU 928(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 960(CX), Y9
	VMOVDQU 992(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 2 to 8 outputs
	VMOVDQU (R14)(R15*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1024(CX), Y9
	VMOVDQU 1056(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1088(CX), Y9
	VMOVDQU 1120(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1152(CX), Y9
	VMOVDQU 1184(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1216(CX), Y9
	VMOVDQU 1248(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1280(CX), Y9
	VMOVDQU 1312(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1344(CX), Y9
	VMOVDQU 1376(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1408(CX), Y9
	VMOVDQU 1440(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1472(CX), Y9
	VMOVDQU 1504(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 3 to 8 outputs
	VMOVDQU (R11)(R15*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1536(CX), Y9
	VMOVDQU 1568(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1600(CX), Y9
	VMOVDQU 1632(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1664(CX), Y9
	VMOVDQU 1696(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1728(CX), Y9
	VMOVDQU 1760(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1792(CX), Y9
	VMOVDQU 1824(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1856(CX), Y9
	VMOVDQU 1888(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1920(CX), Y9
	VMOVDQU 1952(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1984(CX), Y9
	VMOVDQU 2016(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	VMOVDQU Y0, (BX)(R15*1)
	VMOVDQU Y1, (BP)(R15*1)
	VMOVDQU Y2, (SI)(R15*1)
	VMOVDQU Y3, (DI)(R15*1)
	VMOVDQU Y4, (R8)(R15*1)
	VMOVDQU Y5, (R9)(R15*1)
	VMOVDQU Y6, (R10)(R15*1)
	VMOVDQU Y7, (DX)(R15*1)

	// Prepare for next loop
	ADDQ $0x20, R15
	DECQ AX
	JNZ  mulAvxTwo_4x8_loop
	VZEROUPPER

mulAvxTwo_4x8_end:
	RET

// func mulAvxTwo_5x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_5x1(SB), $0-88
	// Loading all tables to registers
	// Full registers estimated 14 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_5x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	VMOVDQU      (CX), Y1
	VMOVDQU      32(CX), Y2
	VMOVDQU      64(CX), Y3
	VMOVDQU      96(CX), Y4
	VMOVDQU      128(CX), Y5
	VMOVDQU      160(CX), Y6
	VMOVDQU      192(CX), Y7
	VMOVDQU      224(CX), Y8
	VMOVDQU      256(CX), Y9
	VMOVDQU      288(CX), Y10
	MOVQ         in_base+24(FP), CX
	MOVQ         (CX), BX
	MOVQ         24(CX), BP
	MOVQ         48(CX), SI
	MOVQ         72(CX), DI
	MOVQ         96(CX), CX
	MOVQ         $0x0000000f, R8
	MOVQ         R8, X11
	VPBROADCASTB X11, Y11
	MOVQ         start+72(FP), R8

mulAvxTwo_5x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (BX)(R8*1), Y12
	VPSRLQ  $0x04, Y12, Y13
	VPAND   Y11, Y12, Y12
	VPAND   Y11, Y13, Y13
	VPSHUFB Y12, Y1, Y12
	VPSHUFB Y13, Y2, Y13
	VPXOR   Y12, Y13, Y12
	VPXOR   Y12, Y0, Y0

	// Load and process 32 bytes from input 1 to 1 outputs
	VMOVDQU (BP)(R8*1), Y12
	VPSRLQ  $0x04, Y12, Y13
	VPAND   Y11, Y12, Y12
	VPAND   Y11, Y13, Y13
	VPSHUFB Y12, Y3, Y12
	VPSHUFB Y13, Y4, Y13
	VPXOR   Y12, Y13, Y12
	VPXOR   Y12, Y0, Y0

	// Load and process 32 bytes from input 2 to 1 outputs
	VMOVDQU (SI)(R8*1), Y12
	VPSRLQ  $0x04, Y12, Y13
	VPAND   Y11, Y12, Y12
	VPAND   Y11, Y13, Y13
	VPSHUFB Y12, Y5, Y12
	VPSHUFB Y13, Y6, Y13
	VPXOR   Y12, Y13, Y12
	VPXOR   Y12, Y0, Y0

	// Load and process 32 bytes from input 3 to 1 outputs
	VMOVDQU (DI)(R8*1), Y12
	VPSRLQ  $0x04, Y12, Y13
	VPAND   Y11, Y12, Y12
	VPAND   Y11, Y13, Y13
	VPSHUFB Y12, Y7, Y12
	VPSHUFB Y13, Y8, Y13
	VPXOR   Y12, Y13, Y12
	VPXOR   Y12, Y0, Y0

	// Load and process 32 bytes from input 4 to 1 outputs
	VMOVDQU (CX)(R8*1), Y12
	VPSRLQ  $0x04, Y12, Y13
	VPAND   Y11, Y12, Y12
	VPAND   Y11, Y13, Y13
	VPSHUFB Y12, Y9, Y12
	VPSHUFB Y13, Y10, Y13
	VPXOR   Y12, Y13, Y12
	VPXOR   Y12, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(R8*1)

	// Prepare for next loop
	ADDQ $0x20, R8
	DECQ AX
	JNZ  mulAvxTwo_5x1_loop
	VZEROUPPER

mulAvxTwo_5x1_end:
	RET

// func mulAvxTwo_5x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_5x2(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 27 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_5x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	MOVQ         in_base+24(FP), BP
	MOVQ         (BP), SI
	MOVQ         24(BP), DI
	MOVQ         48(BP), R8
	MOVQ         72(BP), R9
	MOVQ         96(BP), BP
	MOVQ         $0x0000000f, R10
	MOVQ         R10, X2
	VPBROADCASTB X2, Y2
	MOVQ         start+72(FP), R10

mulAvxTwo_5x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (SI)(R10*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU (CX), Y3
	VMOVDQU 32(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 64(CX), Y3
	VMOVDQU 96(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 1 to 2 outputs
	VMOVDQU (DI)(R10*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 128(CX), Y3
	VMOVDQU 160(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 192(CX), Y3
	VMOVDQU 224(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 2 to 2 outputs
	VMOVDQU (R8)(R10*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 256(CX), Y3
	VMOVDQU 288(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 320(CX), Y3
	VMOVDQU 352(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 3 to 2 outputs
	VMOVDQU (R9)(R10*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 384(CX), Y3
	VMOVDQU 416(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 448(CX), Y3
	VMOVDQU 480(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 4 to 2 outputs
	VMOVDQU (BP)(R10*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 512(CX), Y3
	VMOVDQU 544(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 576(CX), Y3
	VMOVDQU 608(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(R10*1)
	VMOVDQU Y1, (DX)(R10*1)

	// Prepare for next loop
	ADDQ $0x20, R10
	DECQ AX
	JNZ  mulAvxTwo_5x2_loop
	VZEROUPPER

mulAvxTwo_5x2_end:
	RET

// func mulAvxTwo_5x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_5x3(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 38 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_5x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), DX
	MOVQ         in_base+24(FP), SI
	MOVQ         (SI), DI
	MOVQ         24(SI), R8
	MOVQ         48(SI), R9
	MOVQ         72(SI), R10
	MOVQ         96(SI), SI
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), R11

mulAvxTwo_5x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (DI)(R11*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU (CX), Y4
	VMOVDQU 32(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 64(CX), Y4
	VMOVDQU 96(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 128(CX), Y4
	VMOVDQU 160(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 1 to 3 outputs
	VMOVDQU (R8)(R11*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 192(CX), Y4
	VMOVDQU 224(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 256(CX), Y4
	VMOVDQU 288(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 320(CX), Y4
	VMOVDQU 352(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 2 to 3 outputs
	VMOVDQU (R9)(R11*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 384(CX), Y4
	VMOVDQU 416(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 448(CX), Y4
	VMOVDQU 480(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 512(CX), Y4
	VMOVDQU 544(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 3 to 3 outputs
	VMOVDQU (R10)(R11*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 576(CX), Y4
	VMOVDQU 608(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 640(CX), Y4
	VMOVDQU 672(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 704(CX), Y4
	VMOVDQU 736(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 4 to 3 outputs
	VMOVDQU (SI)(R11*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 768(CX), Y4
	VMOVDQU 800(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 832(CX), Y4
	VMOVDQU 864(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 896(CX), Y4
	VMOVDQU 928(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Store 3 outputs
	VMOVDQU Y0, (BX)(R11*1)
	VMOVDQU Y1, (BP)(R11*1)
	VMOVDQU Y2, (DX)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_5x3_loop
	VZEROUPPER

mulAvxTwo_5x3_end:
	RET

// func mulAvxTwo_5x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_5x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 49 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_5x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DX
	MOVQ         in_base+24(FP), DI
	MOVQ         (DI), R8
	MOVQ         24(DI), R9
	MOVQ         48(DI), R10
	MOVQ         72(DI), R11
	MOVQ         96(DI), DI
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R12

mulAvxTwo_5x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (R8)(R12*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 1 to 4 outputs
	VMOVDQU (R9)(R12*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 256(CX), Y5
	VMOVDQU 288(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 320(CX), Y5
	VMOVDQU 352(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 384(CX), Y5
	VMOVDQU 416(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 448(CX), Y5
	VMOVDQU 480(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 2 to 4 outputs
	VMOVDQU (R10)(R12*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 512(CX), Y5
	VMOVDQU 544(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 576(CX), Y5
	VMOVDQU 608(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 640(CX), Y5
	VMOVDQU 672(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 704(CX), Y5
	VMOVDQU 736(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 3 to 4 outputs
	VMOVDQU (R11)(R12*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 768(CX), Y5
	VMOVDQU 800(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 832(CX), Y5
	VMOVDQU 864(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 896(CX), Y5
	VMOVDQU 928(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 960(CX), Y5
	VMOVDQU 992(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 4 to 4 outputs
	VMOVDQU (DI)(R12*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1024(CX), Y5
	VMOVDQU 1056(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1088(CX), Y5
	VMOVDQU 1120(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1152(CX), Y5
	VMOVDQU 1184(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1216(CX), Y5
	VMOVDQU 1248(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	VMOVDQU Y0, (BX)(R12*1)
	VMOVDQU Y1, (BP)(R12*1)
	VMOVDQU Y2, (SI)(R12*1)
	VMOVDQU Y3, (DX)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_5x4_loop
	VZEROUPPER

mulAvxTwo_5x4_end:
	RET

// func mulAvxTwo_5x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_5x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 60 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_5x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), DX
	MOVQ         in_base+24(FP), R8
	MOVQ         (R8), R9
	MOVQ         24(R8), R10
	MOVQ         48(R8), R11
	MOVQ         72(R8), R12
	MOVQ         96(R8), R8
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R13

mulAvxTwo_5x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (R9)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 1 to 5 outputs
	VMOVDQU (R10)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 320(CX), Y6
	VMOVDQU 352(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 384(CX), Y6
	VMOVDQU 416(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 448(CX), Y6
	VMOVDQU 480(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 512(CX), Y6
	VMOVDQU 544(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 576(CX), Y6
	VMOVDQU 608(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 2 to 5 outputs
	VMOVDQU (R11)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 640(CX), Y6
	VMOVDQU 672(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 704(CX), Y6
	VMOVDQU 736(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 768(CX), Y6
	VMOVDQU 800(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 832(CX), Y6
	VMOVDQU 864(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 896(CX), Y6
	VMOVDQU 928(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 3 to 5 outputs
	VMOVDQU (R12)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 960(CX), Y6
	VMOVDQU 992(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1024(CX), Y6
	VMOVDQU 1056(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1088(CX), Y6
	VMOVDQU 1120(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1152(CX), Y6
	VMOVDQU 1184(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1216(CX), Y6
	VMOVDQU 1248(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 4 to 5 outputs
	VMOVDQU (R8)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1280(CX), Y6
	VMOVDQU 1312(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1344(CX), Y6
	VMOVDQU 1376(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1408(CX), Y6
	VMOVDQU 1440(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1472(CX), Y6
	VMOVDQU 1504(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1536(CX), Y6
	VMOVDQU 1568(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	VMOVDQU Y0, (BX)(R13*1)
	VMOVDQU Y1, (BP)(R13*1)
	VMOVDQU Y2, (SI)(R13*1)
	VMOVDQU Y3, (DI)(R13*1)
	VMOVDQU Y4, (DX)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_5x5_loop
	VZEROUPPER

mulAvxTwo_5x5_end:
	RET

// func mulAvxTwo_5x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_5x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 71 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_5x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), DX
	MOVQ         in_base+24(FP), R9
	MOVQ         (R9), R10
	MOVQ         24(R9), R11
	MOVQ         48(R9), R12
	MOVQ         72(R9), R13
	MOVQ         96(R9), R9
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R14

mulAvxTwo_5x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (R10)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 1 to 6 outputs
	VMOVDQU (R11)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 384(CX), Y7
	VMOVDQU 416(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 448(CX), Y7
	VMOVDQU 480(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 512(CX), Y7
	VMOVDQU 544(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 576(CX), Y7
	VMOVDQU 608(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 640(CX), Y7
	VMOVDQU 672(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 704(CX), Y7
	VMOVDQU 736(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 2 to 6 outputs
	VMOVDQU (R12)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 768(CX), Y7
	VMOVDQU 800(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 832(CX), Y7
	VMOVDQU 864(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 896(CX), Y7
	VMOVDQU 928(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 960(CX), Y7
	VMOVDQU 992(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1024(CX), Y7
	VMOVDQU 1056(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1088(CX), Y7
	VMOVDQU 1120(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 3 to 6 outputs
	VMOVDQU (R13)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1152(CX), Y7
	VMOVDQU 1184(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1216(CX), Y7
	VMOVDQU 1248(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1280(CX), Y7
	VMOVDQU 1312(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1344(CX), Y7
	VMOVDQU 1376(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1408(CX), Y7
	VMOVDQU 1440(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1472(CX), Y7
	VMOVDQU 1504(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 4 to 6 outputs
	VMOVDQU (R9)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1536(CX), Y7
	VMOVDQU 1568(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1600(CX), Y7
	VMOVDQU 1632(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1664(CX), Y7
	VMOVDQU 1696(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1728(CX), Y7
	VMOVDQU 1760(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1792(CX), Y7
	VMOVDQU 1824(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1856(CX), Y7
	VMOVDQU 1888(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	VMOVDQU Y0, (BX)(R14*1)
	VMOVDQU Y1, (BP)(R14*1)
	VMOVDQU Y2, (SI)(R14*1)
	VMOVDQU Y3, (DI)(R14*1)
	VMOVDQU Y4, (R8)(R14*1)
	VMOVDQU Y5, (DX)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_5x6_loop
	VZEROUPPER

mulAvxTwo_5x6_end:
	RET

// func mulAvxTwo_5x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_5x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 82 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_5x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), R9
	MOVQ         144(DX), DX
	MOVQ         in_base+24(FP), R10
	MOVQ         (R10), R11
	MOVQ         24(R10), R12
	MOVQ         48(R10), R13
	MOVQ         72(R10), R14
	MOVQ         96(R10), R10
	MOVQ         $0x0000000f, R15
	MOVQ         R15, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R15

mulAvxTwo_5x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (R11)(R15*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 1 to 7 outputs
	VMOVDQU (R12)(R15*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 448(CX), Y8
	VMOVDQU 480(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 512(CX), Y8
	VMOVDQU 544(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 576(CX), Y8
	VMOVDQU 608(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 640(CX), Y8
	VMOVDQU 672(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 704(CX), Y8
	VMOVDQU 736(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 768(CX), Y8
	VMOVDQU 800(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 832(CX), Y8
	VMOVDQU 864(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 2 to 7 outputs
	VMOVDQU (R13)(R15*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 896(CX), Y8
	VMOVDQU 928(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 960(CX), Y8
	VMOVDQU 992(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1024(CX), Y8
	VMOVDQU 1056(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1088(CX), Y8
	VMOVDQU 1120(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1152(CX), Y8
	VMOVDQU 1184(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1216(CX), Y8
	VMOVDQU 1248(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1280(CX), Y8
	VMOVDQU 1312(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 3 to 7 outputs
	VMOVDQU (R14)(R15*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1344(CX), Y8
	VMOVDQU 1376(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1408(CX), Y8
	VMOVDQU 1440(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1472(CX), Y8
	VMOVDQU 1504(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1536(CX), Y8
	VMOVDQU 1568(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1600(CX), Y8
	VMOVDQU 1632(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1664(CX), Y8
	VMOVDQU 1696(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1728(CX), Y8
	VMOVDQU 1760(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 4 to 7 outputs
	VMOVDQU (R10)(R15*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1792(CX), Y8
	VMOVDQU 1824(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1856(CX), Y8
	VMOVDQU 1888(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1920(CX), Y8
	VMOVDQU 1952(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1984(CX), Y8
	VMOVDQU 2016(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2048(CX), Y8
	VMOVDQU 2080(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2112(CX), Y8
	VMOVDQU 2144(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2176(CX), Y8
	VMOVDQU 2208(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	VMOVDQU Y0, (BX)(R15*1)
	VMOVDQU Y1, (BP)(R15*1)
	VMOVDQU Y2, (SI)(R15*1)
	VMOVDQU Y3, (DI)(R15*1)
	VMOVDQU Y4, (R8)(R15*1)
	VMOVDQU Y5, (R9)(R15*1)
	VMOVDQU Y6, (DX)(R15*1)

	// Prepare for next loop
	ADDQ $0x20, R15
	DECQ AX
	JNZ  mulAvxTwo_5x7_loop
	VZEROUPPER

mulAvxTwo_5x7_end:
	RET

// func mulAvxTwo_5x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_5x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 93 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_5x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), BX
	MOVQ         $0x0000000f, R9
	MOVQ         R9, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R9

mulAvxTwo_5x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (BP)(R9*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 1 to 8 outputs
	VMOVDQU (SI)(R9*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 512(CX), Y9
	VMOVDQU 544(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 576(CX), Y9
	VMOVDQU 608(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 640(CX), Y9
	VMOVDQU 672(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 704(CX), Y9
	VMOVDQU 736(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 768(CX), Y9
	VMOVDQU 800(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 832(CX), Y9
	VMOVDQU 864(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 896(CX), Y9
	VMOVDQU 928(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 960(CX), Y9
	VMOVDQU 992(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 2 to 8 outputs
	VMOVDQU (DI)(R9*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1024(CX), Y9
	VMOVDQU 1056(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1088(CX), Y9
	VMOVDQU 1120(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1152(CX), Y9
	VMOVDQU 1184(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1216(CX), Y9
	VMOVDQU 1248(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1280(CX), Y9
	VMOVDQU 1312(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1344(CX), Y9
	VMOVDQU 1376(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1408(CX), Y9
	VMOVDQU 1440(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1472(CX), Y9
	VMOVDQU 1504(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 3 to 8 outputs
	VMOVDQU (R8)(R9*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1536(CX), Y9
	VMOVDQU 1568(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1600(CX), Y9
	VMOVDQU 1632(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1664(CX), Y9
	VMOVDQU 1696(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1728(CX), Y9
	VMOVDQU 1760(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1792(CX), Y9
	VMOVDQU 1824(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1856(CX), Y9
	VMOVDQU 1888(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1920(CX), Y9
	VMOVDQU 1952(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1984(CX), Y9
	VMOVDQU 2016(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 4 to 8 outputs
	VMOVDQU (BX)(R9*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2048(CX), Y9
	VMOVDQU 2080(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2112(CX), Y9
	VMOVDQU 2144(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2176(CX), Y9
	VMOVDQU 2208(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2240(CX), Y9
	VMOVDQU 2272(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2304(CX), Y9
	VMOVDQU 2336(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2368(CX), Y9
	VMOVDQU 2400(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2432(CX), Y9
	VMOVDQU 2464(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 2496(CX), Y9
	VMOVDQU 2528(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	MOVQ    (DX), R10
	VMOVDQU Y0, (R10)(R9*1)
	MOVQ    24(DX), R10
	VMOVDQU Y1, (R10)(R9*1)
	MOVQ    48(DX), R10
	VMOVDQU Y2, (R10)(R9*1)
	MOVQ    72(DX), R10
	VMOVDQU Y3, (R10)(R9*1)
	MOVQ    96(DX), R10
	VMOVDQU Y4, (R10)(R9*1)
	MOVQ    120(DX), R10
	VMOVDQU Y5, (R10)(R9*1)
	MOVQ    144(DX), R10
	VMOVDQU Y6, (R10)(R9*1)
	MOVQ    168(DX), R10
	VMOVDQU Y7, (R10)(R9*1)

	// Prepare for next loop
	ADDQ $0x20, R9
	DECQ AX
	JNZ  mulAvxTwo_5x8_loop
	VZEROUPPER

mulAvxTwo_5x8_end:
	RET

// func mulAvxTwo_6x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_6x1(SB), $0-88
	// Loading all tables to registers
	// Full registers estimated 16 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_6x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	VMOVDQU      (CX), Y1
	VMOVDQU      32(CX), Y2
	VMOVDQU      64(CX), Y3
	VMOVDQU      96(CX), Y4
	VMOVDQU      128(CX), Y5
	VMOVDQU      160(CX), Y6
	VMOVDQU      192(CX), Y7
	VMOVDQU      224(CX), Y8
	VMOVDQU      256(CX), Y9
	VMOVDQU      288(CX), Y10
	VMOVDQU      320(CX), Y11
	VMOVDQU      352(CX), Y12
	MOVQ         in_base+24(FP), CX
	MOVQ         (CX), BX
	MOVQ         24(CX), BP
	MOVQ         48(CX), SI
	MOVQ         72(CX), DI
	MOVQ         96(CX), R8
	MOVQ         120(CX), CX
	MOVQ         $0x0000000f, R9
	MOVQ         R9, X13
	VPBROADCASTB X13, Y13
	MOVQ         start+72(FP), R9

mulAvxTwo_6x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (BX)(R9*1), Y14
	VPSRLQ  $0x04, Y14, Y15
	VPAND   Y13, Y14, Y14
	VPAND   Y13, Y15, Y15
	VPSHUFB Y14, Y1, Y14
	VPSHUFB Y15, Y2, Y15
	VPXOR   Y14, Y15, Y14
	VPXOR   Y14, Y0, Y0

	// Load and process 32 bytes from input 1 to 1 outputs
	VMOVDQU (BP)(R9*1), Y14
	VPSRLQ  $0x04, Y14, Y15
	VPAND   Y13, Y14, Y14
	VPAND   Y13, Y15, Y15
	VPSHUFB Y14, Y3, Y14
	VPSHUFB Y15, Y4, Y15
	VPXOR   Y14, Y15, Y14
	VPXOR   Y14, Y0, Y0

	// Load and process 32 bytes from input 2 to 1 outputs
	VMOVDQU (SI)(R9*1), Y14
	VPSRLQ  $0x04, Y14, Y15
	VPAND   Y13, Y14, Y14
	VPAND   Y13, Y15, Y15
	VPSHUFB Y14, Y5, Y14
	VPSHUFB Y15, Y6, Y15
	VPXOR   Y14, Y15, Y14
	VPXOR   Y14, Y0, Y0

	// Load and process 32 bytes from input 3 to 1 outputs
	VMOVDQU (DI)(R9*1), Y14
	VPSRLQ  $0x04, Y14, Y15
	VPAND   Y13, Y14, Y14
	VPAND   Y13, Y15, Y15
	VPSHUFB Y14, Y7, Y14
	VPSHUFB Y15, Y8, Y15
	VPXOR   Y14, Y15, Y14
	VPXOR   Y14, Y0, Y0

	// Load and process 32 bytes from input 4 to 1 outputs
	VMOVDQU (R8)(R9*1), Y14
	VPSRLQ  $0x04, Y14, Y15
	VPAND   Y13, Y14, Y14
	VPAND   Y13, Y15, Y15
	VPSHUFB Y14, Y9, Y14
	VPSHUFB Y15, Y10, Y15
	VPXOR   Y14, Y15, Y14
	VPXOR   Y14, Y0, Y0

	// Load and process 32 bytes from input 5 to 1 outputs
	VMOVDQU (CX)(R9*1), Y14
	VPSRLQ  $0x04, Y14, Y15
	VPAND   Y13, Y14, Y14
	VPAND   Y13, Y15, Y15
	VPSHUFB Y14, Y11, Y14
	VPSHUFB Y15, Y12, Y15
	VPXOR   Y14, Y15, Y14
	VPXOR   Y14, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(R9*1)

	// Prepare for next loop
	ADDQ $0x20, R9
	DECQ AX
	JNZ  mulAvxTwo_6x1_loop
	VZEROUPPER

mulAvxTwo_6x1_end:
	RET

// func mulAvxTwo_6x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_6x2(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 31 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_6x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	MOVQ         in_base+24(FP), BP
	MOVQ         (BP), SI
	MOVQ         24(BP), DI
	MOVQ         48(BP), R8
	MOVQ         72(BP), R9
	MOVQ         96(BP), R10
	MOVQ         120(BP), BP
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X2
	VPBROADCASTB X2, Y2
	MOVQ         start+72(FP), R11

mulAvxTwo_6x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (SI)(R11*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU (CX), Y3
	VMOVDQU 32(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 64(CX), Y3
	VMOVDQU 96(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 1 to 2 outputs
	VMOVDQU (DI)(R11*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 128(CX), Y3
	VMOVDQU 160(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 192(CX), Y3
	VMOVDQU 224(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 2 to 2 outputs
	VMOVDQU (R8)(R11*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 256(CX), Y3
	VMOVDQU 288(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 320(CX), Y3
	VMOVDQU 352(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 3 to 2 outputs
	VMOVDQU (R9)(R11*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 384(CX), Y3
	VMOVDQU 416(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 448(CX), Y3
	VMOVDQU 480(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 4 to 2 outputs
	VMOVDQU (R10)(R11*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 512(CX), Y3
	VMOVDQU 544(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 576(CX), Y3
	VMOVDQU 608(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 5 to 2 outputs
	VMOVDQU (BP)(R11*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 640(CX), Y3
	VMOVDQU 672(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 704(CX), Y3
	VMOVDQU 736(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(R11*1)
	VMOVDQU Y1, (DX)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_6x2_loop
	VZEROUPPER

mulAvxTwo_6x2_end:
	RET

// func mulAvxTwo_6x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_6x3(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 44 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_6x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), DX
	MOVQ         in_base+24(FP), SI
	MOVQ         (SI), DI
	MOVQ         24(SI), R8
	MOVQ         48(SI), R9
	MOVQ         72(SI), R10
	MOVQ         96(SI), R11
	MOVQ         120(SI), SI
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), R12

mulAvxTwo_6x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (DI)(R12*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU (CX), Y4
	VMOVDQU 32(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 64(CX), Y4
	VMOVDQU 96(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 128(CX), Y4
	VMOVDQU 160(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 1 to 3 outputs
	VMOVDQU (R8)(R12*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 192(CX), Y4
	VMOVDQU 224(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 256(CX), Y4
	VMOVDQU 288(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 320(CX), Y4
	VMOVDQU 352(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 2 to 3 outputs
	VMOVDQU (R9)(R12*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 384(CX), Y4
	VMOVDQU 416(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 448(CX), Y4
	VMOVDQU 480(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 512(CX), Y4
	VMOVDQU 544(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 3 to 3 outputs
	VMOVDQU (R10)(R12*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 576(CX), Y4
	VMOVDQU 608(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 640(CX), Y4
	VMOVDQU 672(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 704(CX), Y4
	VMOVDQU 736(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 4 to 3 outputs
	VMOVDQU (R11)(R12*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 768(CX), Y4
	VMOVDQU 800(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 832(CX), Y4
	VMOVDQU 864(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 896(CX), Y4
	VMOVDQU 928(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 5 to 3 outputs
	VMOVDQU (SI)(R12*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 960(CX), Y4
	VMOVDQU 992(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1024(CX), Y4
	VMOVDQU 1056(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1088(CX), Y4
	VMOVDQU 1120(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Store 3 outputs
	VMOVDQU Y0, (BX)(R12*1)
	VMOVDQU Y1, (BP)(R12*1)
	VMOVDQU Y2, (DX)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_6x3_loop
	VZEROUPPER

mulAvxTwo_6x3_end:
	RET

// func mulAvxTwo_6x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_6x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 57 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_6x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DX
	MOVQ         in_base+24(FP), DI
	MOVQ         (DI), R8
	MOVQ         24(DI), R9
	MOVQ         48(DI), R10
	MOVQ         72(DI), R11
	MOVQ         96(DI), R12
	MOVQ         120(DI), DI
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R13

mulAvxTwo_6x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (R8)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 1 to 4 outputs
	VMOVDQU (R9)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 256(CX), Y5
	VMOVDQU 288(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 320(CX), Y5
	VMOVDQU 352(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 384(CX), Y5
	VMOVDQU 416(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 448(CX), Y5
	VMOVDQU 480(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 2 to 4 outputs
	VMOVDQU (R10)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 512(CX), Y5
	VMOVDQU 544(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 576(CX), Y5
	VMOVDQU 608(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 640(CX), Y5
	VMOVDQU 672(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 704(CX), Y5
	VMOVDQU 736(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 3 to 4 outputs
	VMOVDQU (R11)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 768(CX), Y5
	VMOVDQU 800(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 832(CX), Y5
	VMOVDQU 864(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 896(CX), Y5
	VMOVDQU 928(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 960(CX), Y5
	VMOVDQU 992(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 4 to 4 outputs
	VMOVDQU (R12)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1024(CX), Y5
	VMOVDQU 1056(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1088(CX), Y5
	VMOVDQU 1120(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1152(CX), Y5
	VMOVDQU 1184(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1216(CX), Y5
	VMOVDQU 1248(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 5 to 4 outputs
	VMOVDQU (DI)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1280(CX), Y5
	VMOVDQU 1312(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1344(CX), Y5
	VMOVDQU 1376(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1408(CX), Y5
	VMOVDQU 1440(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1472(CX), Y5
	VMOVDQU 1504(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	VMOVDQU Y0, (BX)(R13*1)
	VMOVDQU Y1, (BP)(R13*1)
	VMOVDQU Y2, (SI)(R13*1)
	VMOVDQU Y3, (DX)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_6x4_loop
	VZEROUPPER

mulAvxTwo_6x4_end:
	RET

// func mulAvxTwo_6x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_6x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 70 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_6x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), DX
	MOVQ         in_base+24(FP), R8
	MOVQ         (R8), R9
	MOVQ         24(R8), R10
	MOVQ         48(R8), R11
	MOVQ         72(R8), R12
	MOVQ         96(R8), R13
	MOVQ         120(R8), R8
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R14

mulAvxTwo_6x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (R9)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 1 to 5 outputs
	VMOVDQU (R10)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 320(CX), Y6
	VMOVDQU 352(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 384(CX), Y6
	VMOVDQU 416(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 448(CX), Y6
	VMOVDQU 480(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 512(CX), Y6
	VMOVDQU 544(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 576(CX), Y6
	VMOVDQU 608(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 2 to 5 outputs
	VMOVDQU (R11)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 640(CX), Y6
	VMOVDQU 672(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 704(CX), Y6
	VMOVDQU 736(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 768(CX), Y6
	VMOVDQU 800(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 832(CX), Y6
	VMOVDQU 864(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 896(CX), Y6
	VMOVDQU 928(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 3 to 5 outputs
	VMOVDQU (R12)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 960(CX), Y6
	VMOVDQU 992(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1024(CX), Y6
	VMOVDQU 1056(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1088(CX), Y6
	VMOVDQU 1120(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1152(CX), Y6
	VMOVDQU 1184(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1216(CX), Y6
	VMOVDQU 1248(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 4 to 5 outputs
	VMOVDQU (R13)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1280(CX), Y6
	VMOVDQU 1312(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1344(CX), Y6
	VMOVDQU 1376(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1408(CX), Y6
	VMOVDQU 1440(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1472(CX), Y6
	VMOVDQU 1504(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1536(CX), Y6
	VMOVDQU 1568(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 5 to 5 outputs
	VMOVDQU (R8)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1600(CX), Y6
	VMOVDQU 1632(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1664(CX), Y6
	VMOVDQU 1696(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1728(CX), Y6
	VMOVDQU 1760(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1792(CX), Y6
	VMOVDQU 1824(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1856(CX), Y6
	VMOVDQU 1888(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	VMOVDQU Y0, (BX)(R14*1)
	VMOVDQU Y1, (BP)(R14*1)
	VMOVDQU Y2, (SI)(R14*1)
	VMOVDQU Y3, (DI)(R14*1)
	VMOVDQU Y4, (DX)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_6x5_loop
	VZEROUPPER

mulAvxTwo_6x5_end:
	RET

// func mulAvxTwo_6x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_6x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 83 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_6x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), R8
	MOVQ         120(DX), DX
	MOVQ         in_base+24(FP), R9
	MOVQ         (R9), R10
	MOVQ         24(R9), R11
	MOVQ         48(R9), R12
	MOVQ         72(R9), R13
	MOVQ         96(R9), R14
	MOVQ         120(R9), R9
	MOVQ         $0x0000000f, R15
	MOVQ         R15, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R15

mulAvxTwo_6x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (R10)(R15*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 1 to 6 outputs
	VMOVDQU (R11)(R15*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 384(CX), Y7
	VMOVDQU 416(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 448(CX), Y7
	VMOVDQU 480(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 512(CX), Y7
	VMOVDQU 544(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 576(CX), Y7
	VMOVDQU 608(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 640(CX), Y7
	VMOVDQU 672(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 704(CX), Y7
	VMOVDQU 736(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 2 to 6 outputs
	VMOVDQU (R12)(R15*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 768(CX), Y7
	VMOVDQU 800(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 832(CX), Y7
	VMOVDQU 864(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 896(CX), Y7
	VMOVDQU 928(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 960(CX), Y7
	VMOVDQU 992(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1024(CX), Y7
	VMOVDQU 1056(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1088(CX), Y7
	VMOVDQU 1120(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 3 to 6 outputs
	VMOVDQU (R13)(R15*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1152(CX), Y7
	VMOVDQU 1184(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1216(CX), Y7
	VMOVDQU 1248(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1280(CX), Y7
	VMOVDQU 1312(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1344(CX), Y7
	VMOVDQU 1376(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1408(CX), Y7
	VMOVDQU 1440(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1472(CX), Y7
	VMOVDQU 1504(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 4 to 6 outputs
	VMOVDQU (R14)(R15*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1536(CX), Y7
	VMOVDQU 1568(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1600(CX), Y7
	VMOVDQU 1632(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1664(CX), Y7
	VMOVDQU 1696(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1728(CX), Y7
	VMOVDQU 1760(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1792(CX), Y7
	VMOVDQU 1824(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1856(CX), Y7
	VMOVDQU 1888(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 5 to 6 outputs
	VMOVDQU (R9)(R15*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1920(CX), Y7
	VMOVDQU 1952(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1984(CX), Y7
	VMOVDQU 2016(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2048(CX), Y7
	VMOVDQU 2080(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2112(CX), Y7
	VMOVDQU 2144(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2176(CX), Y7
	VMOVDQU 2208(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 2240(CX), Y7
	VMOVDQU 2272(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	VMOVDQU Y0, (BX)(R15*1)
	VMOVDQU Y1, (BP)(R15*1)
	VMOVDQU Y2, (SI)(R15*1)
	VMOVDQU Y3, (DI)(R15*1)
	VMOVDQU Y4, (R8)(R15*1)
	VMOVDQU Y5, (DX)(R15*1)

	// Prepare for next loop
	ADDQ $0x20, R15
	DECQ AX
	JNZ  mulAvxTwo_6x6_loop
	VZEROUPPER

mulAvxTwo_6x6_end:
	RET

// func mulAvxTwo_6x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_6x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 96 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_6x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), BX
	MOVQ         $0x0000000f, R10
	MOVQ         R10, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R10

mulAvxTwo_6x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (BP)(R10*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 1 to 7 outputs
	VMOVDQU (SI)(R10*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 448(CX), Y8
	VMOVDQU 480(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 512(CX), Y8
	VMOVDQU 544(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 576(CX), Y8
	VMOVDQU 608(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 640(CX), Y8
	VMOVDQU 672(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 704(CX), Y8
	VMOVDQU 736(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 768(CX), Y8
	VMOVDQU 800(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 832(CX), Y8
	VMOVDQU 864(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 2 to 7 outputs
	VMOVDQU (DI)(R10*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 896(CX), Y8
	VMOVDQU 928(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 960(CX), Y8
	VMOVDQU 992(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1024(CX), Y8
	VMOVDQU 1056(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1088(CX), Y8
	VMOVDQU 1120(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1152(CX), Y8
	VMOVDQU 1184(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1216(CX), Y8
	VMOVDQU 1248(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1280(CX), Y8
	VMOVDQU 1312(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 3 to 7 outputs
	VMOVDQU (R8)(R10*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1344(CX), Y8
	VMOVDQU 1376(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1408(CX), Y8
	VMOVDQU 1440(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1472(CX), Y8
	VMOVDQU 1504(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1536(CX), Y8
	VMOVDQU 1568(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1600(CX), Y8
	VMOVDQU 1632(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1664(CX), Y8
	VMOVDQU 1696(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1728(CX), Y8
	VMOVDQU 1760(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 4 to 7 outputs
	VMOVDQU (R9)(R10*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1792(CX), Y8
	VMOVDQU 1824(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1856(CX), Y8
	VMOVDQU 1888(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1920(CX), Y8
	VMOVDQU 1952(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1984(CX), Y8
	VMOVDQU 2016(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2048(CX), Y8
	VMOVDQU 2080(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2112(CX), Y8
	VMOVDQU 2144(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2176(CX), Y8
	VMOVDQU 2208(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 5 to 7 outputs
	VMOVDQU (BX)(R10*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 2240(CX), Y8
	VMOVDQU 2272(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 2304(CX), Y8
	VMOVDQU 2336(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 2368(CX), Y8
	VMOVDQU 2400(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 2432(CX), Y8
	VMOVDQU 2464(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2496(CX), Y8
	VMOVDQU 2528(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2560(CX), Y8
	VMOVDQU 2592(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2624(CX), Y8
	VMOVDQU 2656(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	MOVQ    (DX), R11
	VMOVDQU Y0, (R11)(R10*1)
	MOVQ    24(DX), R11
	VMOVDQU Y1, (R11)(R10*1)
	MOVQ    48(DX), R11
	VMOVDQU Y2, (R11)(R10*1)
	MOVQ    72(DX), R11
	VMOVDQU Y3, (R11)(R10*1)
	MOVQ    96(DX), R11
	VMOVDQU Y4, (R11)(R10*1)
	MOVQ    120(DX), R11
	VMOVDQU Y5, (R11)(R10*1)
	MOVQ    144(DX), R11
	VMOVDQU Y6, (R11)(R10*1)

	// Prepare for next loop
	ADDQ $0x20, R10
	DECQ AX
	JNZ  mulAvxTwo_6x7_loop
	VZEROUPPER

mulAvxTwo_6x7_end:
	RET

// func mulAvxTwo_6x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_6x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 109 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_6x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), BX
	MOVQ         $0x0000000f, R10
	MOVQ         R10, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R10

mulAvxTwo_6x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (BP)(R10*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 1 to 8 outputs
	VMOVDQU (SI)(R10*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 512(CX), Y9
	VMOVDQU 544(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 576(CX), Y9
	VMOVDQU 608(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 640(CX), Y9
	VMOVDQU 672(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 704(CX), Y9
	VMOVDQU 736(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 768(CX), Y9
	VMOVDQU 800(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 832(CX), Y9
	VMOVDQU 864(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 896(CX), Y9
	VMOVDQU 928(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 960(CX), Y9
	VMOVDQU 992(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 2 to 8 outputs
	VMOVDQU (DI)(R10*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1024(CX), Y9
	VMOVDQU 1056(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1088(CX), Y9
	VMOVDQU 1120(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1152(CX), Y9
	VMOVDQU 1184(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1216(CX), Y9
	VMOVDQU 1248(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1280(CX), Y9
	VMOVDQU 1312(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1344(CX), Y9
	VMOVDQU 1376(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1408(CX), Y9
	VMOVDQU 1440(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1472(CX), Y9
	VMOVDQU 1504(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 3 to 8 outputs
	VMOVDQU (R8)(R10*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1536(CX), Y9
	VMOVDQU 1568(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1600(CX), Y9
	VMOVDQU 1632(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1664(CX), Y9
	VMOVDQU 1696(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1728(CX), Y9
	VMOVDQU 1760(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1792(CX), Y9
	VMOVDQU 1824(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1856(CX), Y9
	VMOVDQU 1888(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1920(CX), Y9
	VMOVDQU 1952(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1984(CX), Y9
	VMOVDQU 2016(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 4 to 8 outputs
	VMOVDQU (R9)(R10*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2048(CX), Y9
	VMOVDQU 2080(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2112(CX), Y9
	VMOVDQU 2144(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2176(CX), Y9
	VMOVDQU 2208(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2240(CX), Y9
	VMOVDQU 2272(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2304(CX), Y9
	VMOVDQU 2336(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2368(CX), Y9
	VMOVDQU 2400(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2432(CX), Y9
	VMOVDQU 2464(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 2496(CX), Y9
	VMOVDQU 2528(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 5 to 8 outputs
	VMOVDQU (BX)(R10*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2560(CX), Y9
	VMOVDQU 2592(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2624(CX), Y9
	VMOVDQU 2656(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2688(CX), Y9
	VMOVDQU 2720(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2752(CX), Y9
	VMOVDQU 2784(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2816(CX), Y9
	VMOVDQU 2848(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2880(CX), Y9
	VMOVDQU 2912(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2944(CX), Y9
	VMOVDQU 2976(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 3008(CX), Y9
	VMOVDQU 3040(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	MOVQ    (DX), R11
	VMOVDQU Y0, (R11)(R10*1)
	MOVQ    24(DX), R11
	VMOVDQU Y1, (R11)(R10*1)
	MOVQ    48(DX), R11
	VMOVDQU Y2, (R11)(R10*1)
	MOVQ    72(DX), R11
	VMOVDQU Y3, (R11)(R10*1)
	MOVQ    96(DX), R11
	VMOVDQU Y4, (R11)(R10*1)
	MOVQ    120(DX), R11
	VMOVDQU Y5, (R11)(R10*1)
	MOVQ    144(DX), R11
	VMOVDQU Y6, (R11)(R10*1)
	MOVQ    168(DX), R11
	VMOVDQU Y7, (R11)(R10*1)

	// Prepare for next loop
	ADDQ $0x20, R10
	DECQ AX
	JNZ  mulAvxTwo_6x8_loop
	VZEROUPPER

mulAvxTwo_6x8_end:
	RET

// func mulAvxTwo_7x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_7x1(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 18 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_7x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), BX
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X1
	VPBROADCASTB X1, Y1
	MOVQ         start+72(FP), R11

mulAvxTwo_7x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (BP)(R11*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU (CX), Y2
	VMOVDQU 32(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 1 to 1 outputs
	VMOVDQU (SI)(R11*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 64(CX), Y2
	VMOVDQU 96(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 2 to 1 outputs
	VMOVDQU (DI)(R11*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 128(CX), Y2
	VMOVDQU 160(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 3 to 1 outputs
	VMOVDQU (R8)(R11*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 192(CX), Y2
	VMOVDQU 224(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 4 to 1 outputs
	VMOVDQU (R9)(R11*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 256(CX), Y2
	VMOVDQU 288(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 5 to 1 outputs
	VMOVDQU (R10)(R11*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 320(CX), Y2
	VMOVDQU 352(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 6 to 1 outputs
	VMOVDQU (BX)(R11*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 384(CX), Y2
	VMOVDQU 416(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_7x1_loop
	VZEROUPPER

mulAvxTwo_7x1_end:
	RET

// func mulAvxTwo_7x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_7x2(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 35 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_7x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	MOVQ         in_base+24(FP), BP
	MOVQ         (BP), SI
	MOVQ         24(BP), DI
	MOVQ         48(BP), R8
	MOVQ         72(BP), R9
	MOVQ         96(BP), R10
	MOVQ         120(BP), R11
	MOVQ         144(BP), BP
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X2
	VPBROADCASTB X2, Y2
	MOVQ         start+72(FP), R12

mulAvxTwo_7x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (SI)(R12*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU (CX), Y3
	VMOVDQU 32(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 64(CX), Y3
	VMOVDQU 96(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 1 to 2 outputs
	VMOVDQU (DI)(R12*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 128(CX), Y3
	VMOVDQU 160(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 192(CX), Y3
	VMOVDQU 224(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 2 to 2 outputs
	VMOVDQU (R8)(R12*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 256(CX), Y3
	VMOVDQU 288(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 320(CX), Y3
	VMOVDQU 352(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 3 to 2 outputs
	VMOVDQU (R9)(R12*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 384(CX), Y3
	VMOVDQU 416(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 448(CX), Y3
	VMOVDQU 480(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 4 to 2 outputs
	VMOVDQU (R10)(R12*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 512(CX), Y3
	VMOVDQU 544(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 576(CX), Y3
	VMOVDQU 608(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 5 to 2 outputs
	VMOVDQU (R11)(R12*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 640(CX), Y3
	VMOVDQU 672(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 704(CX), Y3
	VMOVDQU 736(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 6 to 2 outputs
	VMOVDQU (BP)(R12*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 768(CX), Y3
	VMOVDQU 800(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 832(CX), Y3
	VMOVDQU 864(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(R12*1)
	VMOVDQU Y1, (DX)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_7x2_loop
	VZEROUPPER

mulAvxTwo_7x2_end:
	RET

// func mulAvxTwo_7x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_7x3(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 50 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_7x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), DX
	MOVQ         in_base+24(FP), SI
	MOVQ         (SI), DI
	MOVQ         24(SI), R8
	MOVQ         48(SI), R9
	MOVQ         72(SI), R10
	MOVQ         96(SI), R11
	MOVQ         120(SI), R12
	MOVQ         144(SI), SI
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), R13

mulAvxTwo_7x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (DI)(R13*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU (CX), Y4
	VMOVDQU 32(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 64(CX), Y4
	VMOVDQU 96(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 128(CX), Y4
	VMOVDQU 160(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 1 to 3 outputs
	VMOVDQU (R8)(R13*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 192(CX), Y4
	VMOVDQU 224(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 256(CX), Y4
	VMOVDQU 288(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 320(CX), Y4
	VMOVDQU 352(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 2 to 3 outputs
	VMOVDQU (R9)(R13*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 384(CX), Y4
	VMOVDQU 416(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 448(CX), Y4
	VMOVDQU 480(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 512(CX), Y4
	VMOVDQU 544(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 3 to 3 outputs
	VMOVDQU (R10)(R13*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 576(CX), Y4
	VMOVDQU 608(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 640(CX), Y4
	VMOVDQU 672(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 704(CX), Y4
	VMOVDQU 736(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 4 to 3 outputs
	VMOVDQU (R11)(R13*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 768(CX), Y4
	VMOVDQU 800(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 832(CX), Y4
	VMOVDQU 864(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 896(CX), Y4
	VMOVDQU 928(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 5 to 3 outputs
	VMOVDQU (R12)(R13*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 960(CX), Y4
	VMOVDQU 992(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1024(CX), Y4
	VMOVDQU 1056(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1088(CX), Y4
	VMOVDQU 1120(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 6 to 3 outputs
	VMOVDQU (SI)(R13*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1152(CX), Y4
	VMOVDQU 1184(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1216(CX), Y4
	VMOVDQU 1248(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1280(CX), Y4
	VMOVDQU 1312(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Store 3 outputs
	VMOVDQU Y0, (BX)(R13*1)
	VMOVDQU Y1, (BP)(R13*1)
	VMOVDQU Y2, (DX)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_7x3_loop
	VZEROUPPER

mulAvxTwo_7x3_end:
	RET

// func mulAvxTwo_7x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_7x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 65 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_7x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DX
	MOVQ         in_base+24(FP), DI
	MOVQ         (DI), R8
	MOVQ         24(DI), R9
	MOVQ         48(DI), R10
	MOVQ         72(DI), R11
	MOVQ         96(DI), R12
	MOVQ         120(DI), R13
	MOVQ         144(DI), DI
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R14

mulAvxTwo_7x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (R8)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 1 to 4 outputs
	VMOVDQU (R9)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 256(CX), Y5
	VMOVDQU 288(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 320(CX), Y5
	VMOVDQU 352(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 384(CX), Y5
	VMOVDQU 416(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 448(CX), Y5
	VMOVDQU 480(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 2 to 4 outputs
	VMOVDQU (R10)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 512(CX), Y5
	VMOVDQU 544(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 576(CX), Y5
	VMOVDQU 608(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 640(CX), Y5
	VMOVDQU 672(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 704(CX), Y5
	VMOVDQU 736(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 3 to 4 outputs
	VMOVDQU (R11)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 768(CX), Y5
	VMOVDQU 800(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 832(CX), Y5
	VMOVDQU 864(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 896(CX), Y5
	VMOVDQU 928(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 960(CX), Y5
	VMOVDQU 992(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 4 to 4 outputs
	VMOVDQU (R12)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1024(CX), Y5
	VMOVDQU 1056(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1088(CX), Y5
	VMOVDQU 1120(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1152(CX), Y5
	VMOVDQU 1184(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1216(CX), Y5
	VMOVDQU 1248(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 5 to 4 outputs
	VMOVDQU (R13)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1280(CX), Y5
	VMOVDQU 1312(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1344(CX), Y5
	VMOVDQU 1376(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1408(CX), Y5
	VMOVDQU 1440(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1472(CX), Y5
	VMOVDQU 1504(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 6 to 4 outputs
	VMOVDQU (DI)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1536(CX), Y5
	VMOVDQU 1568(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1600(CX), Y5
	VMOVDQU 1632(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1664(CX), Y5
	VMOVDQU 1696(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1728(CX), Y5
	VMOVDQU 1760(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	VMOVDQU Y0, (BX)(R14*1)
	VMOVDQU Y1, (BP)(R14*1)
	VMOVDQU Y2, (SI)(R14*1)
	VMOVDQU Y3, (DX)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_7x4_loop
	VZEROUPPER

mulAvxTwo_7x4_end:
	RET

// func mulAvxTwo_7x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_7x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 80 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_7x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DI
	MOVQ         96(DX), DX
	MOVQ         in_base+24(FP), R8
	MOVQ         (R8), R9
	MOVQ         24(R8), R10
	MOVQ         48(R8), R11
	MOVQ         72(R8), R12
	MOVQ         96(R8), R13
	MOVQ         120(R8), R14
	MOVQ         144(R8), R8
	MOVQ         $0x0000000f, R15
	MOVQ         R15, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R15

mulAvxTwo_7x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (R9)(R15*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 1 to 5 outputs
	VMOVDQU (R10)(R15*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 320(CX), Y6
	VMOVDQU 352(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 384(CX), Y6
	VMOVDQU 416(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 448(CX), Y6
	VMOVDQU 480(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 512(CX), Y6
	VMOVDQU 544(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 576(CX), Y6
	VMOVDQU 608(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 2 to 5 outputs
	VMOVDQU (R11)(R15*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 640(CX), Y6
	VMOVDQU 672(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 704(CX), Y6
	VMOVDQU 736(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 768(CX), Y6
	VMOVDQU 800(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 832(CX), Y6
	VMOVDQU 864(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 896(CX), Y6
	VMOVDQU 928(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 3 to 5 outputs
	VMOVDQU (R12)(R15*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 960(CX), Y6
	VMOVDQU 992(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1024(CX), Y6
	VMOVDQU 1056(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1088(CX), Y6
	VMOVDQU 1120(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1152(CX), Y6
	VMOVDQU 1184(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1216(CX), Y6
	VMOVDQU 1248(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 4 to 5 outputs
	VMOVDQU (R13)(R15*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1280(CX), Y6
	VMOVDQU 1312(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1344(CX), Y6
	VMOVDQU 1376(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1408(CX), Y6
	VMOVDQU 1440(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1472(CX), Y6
	VMOVDQU 1504(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1536(CX), Y6
	VMOVDQU 1568(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 5 to 5 outputs
	VMOVDQU (R14)(R15*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1600(CX), Y6
	VMOVDQU 1632(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1664(CX), Y6
	VMOVDQU 1696(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1728(CX), Y6
	VMOVDQU 1760(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1792(CX), Y6
	VMOVDQU 1824(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1856(CX), Y6
	VMOVDQU 1888(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 6 to 5 outputs
	VMOVDQU (R8)(R15*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1920(CX), Y6
	VMOVDQU 1952(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1984(CX), Y6
	VMOVDQU 2016(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 2048(CX), Y6
	VMOVDQU 2080(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 2112(CX), Y6
	VMOVDQU 2144(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 2176(CX), Y6
	VMOVDQU 2208(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	VMOVDQU Y0, (BX)(R15*1)
	VMOVDQU Y1, (BP)(R15*1)
	VMOVDQU Y2, (SI)(R15*1)
	VMOVDQU Y3, (DI)(R15*1)
	VMOVDQU Y4, (DX)(R15*1)

	// Prepare for next loop
	ADDQ $0x20, R15
	DECQ AX
	JNZ  mulAvxTwo_7x5_loop
	VZEROUPPER

mulAvxTwo_7x5_end:
	RET

// func mulAvxTwo_7x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_7x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 95 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_7x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), BX
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R11

mulAvxTwo_7x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (BP)(R11*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 1 to 6 outputs
	VMOVDQU (SI)(R11*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 384(CX), Y7
	VMOVDQU 416(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 448(CX), Y7
	VMOVDQU 480(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 512(CX), Y7
	VMOVDQU 544(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 576(CX), Y7
	VMOVDQU 608(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 640(CX), Y7
	VMOVDQU 672(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 704(CX), Y7
	VMOVDQU 736(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 2 to 6 outputs
	VMOVDQU (DI)(R11*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 768(CX), Y7
	VMOVDQU 800(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 832(CX), Y7
	VMOVDQU 864(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 896(CX), Y7
	VMOVDQU 928(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 960(CX), Y7
	VMOVDQU 992(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1024(CX), Y7
	VMOVDQU 1056(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1088(CX), Y7
	VMOVDQU 1120(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 3 to 6 outputs
	VMOVDQU (R8)(R11*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1152(CX), Y7
	VMOVDQU 1184(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1216(CX), Y7
	VMOVDQU 1248(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1280(CX), Y7
	VMOVDQU 1312(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1344(CX), Y7
	VMOVDQU 1376(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1408(CX), Y7
	VMOVDQU 1440(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1472(CX), Y7
	VMOVDQU 1504(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 4 to 6 outputs
	VMOVDQU (R9)(R11*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1536(CX), Y7
	VMOVDQU 1568(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1600(CX), Y7
	VMOVDQU 1632(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1664(CX), Y7
	VMOVDQU 1696(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1728(CX), Y7
	VMOVDQU 1760(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1792(CX), Y7
	VMOVDQU 1824(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1856(CX), Y7
	VMOVDQU 1888(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 5 to 6 outputs
	VMOVDQU (R10)(R11*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1920(CX), Y7
	VMOVDQU 1952(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1984(CX), Y7
	VMOVDQU 2016(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2048(CX), Y7
	VMOVDQU 2080(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2112(CX), Y7
	VMOVDQU 2144(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2176(CX), Y7
	VMOVDQU 2208(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 2240(CX), Y7
	VMOVDQU 2272(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 6 to 6 outputs
	VMOVDQU (BX)(R11*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 2304(CX), Y7
	VMOVDQU 2336(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 2368(CX), Y7
	VMOVDQU 2400(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2432(CX), Y7
	VMOVDQU 2464(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2496(CX), Y7
	VMOVDQU 2528(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2560(CX), Y7
	VMOVDQU 2592(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 2624(CX), Y7
	VMOVDQU 2656(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	MOVQ    (DX), R12
	VMOVDQU Y0, (R12)(R11*1)
	MOVQ    24(DX), R12
	VMOVDQU Y1, (R12)(R11*1)
	MOVQ    48(DX), R12
	VMOVDQU Y2, (R12)(R11*1)
	MOVQ    72(DX), R12
	VMOVDQU Y3, (R12)(R11*1)
	MOVQ    96(DX), R12
	VMOVDQU Y4, (R12)(R11*1)
	MOVQ    120(DX), R12
	VMOVDQU Y5, (R12)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_7x6_loop
	VZEROUPPER

mulAvxTwo_7x6_end:
	RET

// func mulAvxTwo_7x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_7x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 110 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_7x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), BX
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R11

mulAvxTwo_7x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (BP)(R11*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 1 to 7 outputs
	VMOVDQU (SI)(R11*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 448(CX), Y8
	VMOVDQU 480(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 512(CX), Y8
	VMOVDQU 544(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 576(CX), Y8
	VMOVDQU 608(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 640(CX), Y8
	VMOVDQU 672(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 704(CX), Y8
	VMOVDQU 736(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 768(CX), Y8
	VMOVDQU 800(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 832(CX), Y8
	VMOVDQU 864(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 2 to 7 outputs
	VMOVDQU (DI)(R11*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 896(CX), Y8
	VMOVDQU 928(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 960(CX), Y8
	VMOVDQU 992(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1024(CX), Y8
	VMOVDQU 1056(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1088(CX), Y8
	VMOVDQU 1120(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1152(CX), Y8
	VMOVDQU 1184(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1216(CX), Y8
	VMOVDQU 1248(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1280(CX), Y8
	VMOVDQU 1312(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 3 to 7 outputs
	VMOVDQU (R8)(R11*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1344(CX), Y8
	VMOVDQU 1376(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1408(CX), Y8
	VMOVDQU 1440(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1472(CX), Y8
	VMOVDQU 1504(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1536(CX), Y8
	VMOVDQU 1568(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1600(CX), Y8
	VMOVDQU 1632(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1664(CX), Y8
	VMOVDQU 1696(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1728(CX), Y8
	VMOVDQU 1760(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 4 to 7 outputs
	VMOVDQU (R9)(R11*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1792(CX), Y8
	VMOVDQU 1824(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1856(CX), Y8
	VMOVDQU 1888(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1920(CX), Y8
	VMOVDQU 1952(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1984(CX), Y8
	VMOVDQU 2016(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2048(CX), Y8
	VMOVDQU 2080(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2112(CX), Y8
	VMOVDQU 2144(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2176(CX), Y8
	VMOVDQU 2208(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 5 to 7 outputs
	VMOVDQU (R10)(R11*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 2240(CX), Y8
	VMOVDQU 2272(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 2304(CX), Y8
	VMOVDQU 2336(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 2368(CX), Y8
	VMOVDQU 2400(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 2432(CX), Y8
	VMOVDQU 2464(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2496(CX), Y8
	VMOVDQU 2528(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2560(CX), Y8
	VMOVDQU 2592(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2624(CX), Y8
	VMOVDQU 2656(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 6 to 7 outputs
	VMOVDQU (BX)(R11*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 2688(CX), Y8
	VMOVDQU 2720(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 2752(CX), Y8
	VMOVDQU 2784(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 2816(CX), Y8
	VMOVDQU 2848(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 2880(CX), Y8
	VMOVDQU 2912(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2944(CX), Y8
	VMOVDQU 2976(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 3008(CX), Y8
	VMOVDQU 3040(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 3072(CX), Y8
	VMOVDQU 3104(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	MOVQ    (DX), R12
	VMOVDQU Y0, (R12)(R11*1)
	MOVQ    24(DX), R12
	VMOVDQU Y1, (R12)(R11*1)
	MOVQ    48(DX), R12
	VMOVDQU Y2, (R12)(R11*1)
	MOVQ    72(DX), R12
	VMOVDQU Y3, (R12)(R11*1)
	MOVQ    96(DX), R12
	VMOVDQU Y4, (R12)(R11*1)
	MOVQ    120(DX), R12
	VMOVDQU Y5, (R12)(R11*1)
	MOVQ    144(DX), R12
	VMOVDQU Y6, (R12)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_7x7_loop
	VZEROUPPER

mulAvxTwo_7x7_end:
	RET

// func mulAvxTwo_7x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_7x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 125 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_7x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), BX
	MOVQ         $0x0000000f, R11
	MOVQ         R11, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R11

mulAvxTwo_7x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (BP)(R11*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 1 to 8 outputs
	VMOVDQU (SI)(R11*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 512(CX), Y9
	VMOVDQU 544(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 576(CX), Y9
	VMOVDQU 608(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 640(CX), Y9
	VMOVDQU 672(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 704(CX), Y9
	VMOVDQU 736(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 768(CX), Y9
	VMOVDQU 800(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 832(CX), Y9
	VMOVDQU 864(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 896(CX), Y9
	VMOVDQU 928(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 960(CX), Y9
	VMOVDQU 992(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 2 to 8 outputs
	VMOVDQU (DI)(R11*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1024(CX), Y9
	VMOVDQU 1056(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1088(CX), Y9
	VMOVDQU 1120(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1152(CX), Y9
	VMOVDQU 1184(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1216(CX), Y9
	VMOVDQU 1248(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1280(CX), Y9
	VMOVDQU 1312(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1344(CX), Y9
	VMOVDQU 1376(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1408(CX), Y9
	VMOVDQU 1440(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1472(CX), Y9
	VMOVDQU 1504(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 3 to 8 outputs
	VMOVDQU (R8)(R11*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1536(CX), Y9
	VMOVDQU 1568(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1600(CX), Y9
	VMOVDQU 1632(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1664(CX), Y9
	VMOVDQU 1696(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1728(CX), Y9
	VMOVDQU 1760(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1792(CX), Y9
	VMOVDQU 1824(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1856(CX), Y9
	VMOVDQU 1888(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1920(CX), Y9
	VMOVDQU 1952(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1984(CX), Y9
	VMOVDQU 2016(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 4 to 8 outputs
	VMOVDQU (R9)(R11*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2048(CX), Y9
	VMOVDQU 2080(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2112(CX), Y9
	VMOVDQU 2144(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2176(CX), Y9
	VMOVDQU 2208(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2240(CX), Y9
	VMOVDQU 2272(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2304(CX), Y9
	VMOVDQU 2336(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2368(CX), Y9
	VMOVDQU 2400(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2432(CX), Y9
	VMOVDQU 2464(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 2496(CX), Y9
	VMOVDQU 2528(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 5 to 8 outputs
	VMOVDQU (R10)(R11*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2560(CX), Y9
	VMOVDQU 2592(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2624(CX), Y9
	VMOVDQU 2656(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2688(CX), Y9
	VMOVDQU 2720(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2752(CX), Y9
	VMOVDQU 2784(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2816(CX), Y9
	VMOVDQU 2848(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2880(CX), Y9
	VMOVDQU 2912(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2944(CX), Y9
	VMOVDQU 2976(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 3008(CX), Y9
	VMOVDQU 3040(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 6 to 8 outputs
	VMOVDQU (BX)(R11*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 3072(CX), Y9
	VMOVDQU 3104(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 3136(CX), Y9
	VMOVDQU 3168(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 3200(CX), Y9
	VMOVDQU 3232(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 3264(CX), Y9
	VMOVDQU 3296(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 3328(CX), Y9
	VMOVDQU 3360(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 3392(CX), Y9
	VMOVDQU 3424(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 3456(CX), Y9
	VMOVDQU 3488(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 3520(CX), Y9
	VMOVDQU 3552(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	MOVQ    (DX), R12
	VMOVDQU Y0, (R12)(R11*1)
	MOVQ    24(DX), R12
	VMOVDQU Y1, (R12)(R11*1)
	MOVQ    48(DX), R12
	VMOVDQU Y2, (R12)(R11*1)
	MOVQ    72(DX), R12
	VMOVDQU Y3, (R12)(R11*1)
	MOVQ    96(DX), R12
	VMOVDQU Y4, (R12)(R11*1)
	MOVQ    120(DX), R12
	VMOVDQU Y5, (R12)(R11*1)
	MOVQ    144(DX), R12
	VMOVDQU Y6, (R12)(R11*1)
	MOVQ    168(DX), R12
	VMOVDQU Y7, (R12)(R11*1)

	// Prepare for next loop
	ADDQ $0x20, R11
	DECQ AX
	JNZ  mulAvxTwo_7x8_loop
	VZEROUPPER

mulAvxTwo_7x8_end:
	RET

// func mulAvxTwo_8x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_8x1(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 20 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_8x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), BX
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X1
	VPBROADCASTB X1, Y1
	MOVQ         start+72(FP), R12

mulAvxTwo_8x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (BP)(R12*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU (CX), Y2
	VMOVDQU 32(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 1 to 1 outputs
	VMOVDQU (SI)(R12*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 64(CX), Y2
	VMOVDQU 96(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 2 to 1 outputs
	VMOVDQU (DI)(R12*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 128(CX), Y2
	VMOVDQU 160(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 3 to 1 outputs
	VMOVDQU (R8)(R12*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 192(CX), Y2
	VMOVDQU 224(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 4 to 1 outputs
	VMOVDQU (R9)(R12*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 256(CX), Y2
	VMOVDQU 288(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 5 to 1 outputs
	VMOVDQU (R10)(R12*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 320(CX), Y2
	VMOVDQU 352(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 6 to 1 outputs
	VMOVDQU (R11)(R12*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 384(CX), Y2
	VMOVDQU 416(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 7 to 1 outputs
	VMOVDQU (BX)(R12*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 448(CX), Y2
	VMOVDQU 480(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_8x1_loop
	VZEROUPPER

mulAvxTwo_8x1_end:
	RET

// func mulAvxTwo_8x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_8x2(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 39 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_8x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	MOVQ         in_base+24(FP), BP
	MOVQ         (BP), SI
	MOVQ         24(BP), DI
	MOVQ         48(BP), R8
	MOVQ         72(BP), R9
	MOVQ         96(BP), R10
	MOVQ         120(BP), R11
	MOVQ         144(BP), R12
	MOVQ         168(BP), BP
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X2
	VPBROADCASTB X2, Y2
	MOVQ         start+72(FP), R13

mulAvxTwo_8x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (SI)(R13*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU (CX), Y3
	VMOVDQU 32(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 64(CX), Y3
	VMOVDQU 96(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 1 to 2 outputs
	VMOVDQU (DI)(R13*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 128(CX), Y3
	VMOVDQU 160(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 192(CX), Y3
	VMOVDQU 224(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 2 to 2 outputs
	VMOVDQU (R8)(R13*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 256(CX), Y3
	VMOVDQU 288(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 320(CX), Y3
	VMOVDQU 352(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 3 to 2 outputs
	VMOVDQU (R9)(R13*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 384(CX), Y3
	VMOVDQU 416(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 448(CX), Y3
	VMOVDQU 480(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 4 to 2 outputs
	VMOVDQU (R10)(R13*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 512(CX), Y3
	VMOVDQU 544(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 576(CX), Y3
	VMOVDQU 608(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 5 to 2 outputs
	VMOVDQU (R11)(R13*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 640(CX), Y3
	VMOVDQU 672(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 704(CX), Y3
	VMOVDQU 736(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 6 to 2 outputs
	VMOVDQU (R12)(R13*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 768(CX), Y3
	VMOVDQU 800(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 832(CX), Y3
	VMOVDQU 864(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 7 to 2 outputs
	VMOVDQU (BP)(R13*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 896(CX), Y3
	VMOVDQU 928(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 960(CX), Y3
	VMOVDQU 992(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(R13*1)
	VMOVDQU Y1, (DX)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_8x2_loop
	VZEROUPPER

mulAvxTwo_8x2_end:
	RET

// func mulAvxTwo_8x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_8x3(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 56 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_8x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), DX
	MOVQ         in_base+24(FP), SI
	MOVQ         (SI), DI
	MOVQ         24(SI), R8
	MOVQ         48(SI), R9
	MOVQ         72(SI), R10
	MOVQ         96(SI), R11
	MOVQ         120(SI), R12
	MOVQ         144(SI), R13
	MOVQ         168(SI), SI
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), R14

mulAvxTwo_8x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (DI)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU (CX), Y4
	VMOVDQU 32(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 64(CX), Y4
	VMOVDQU 96(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 128(CX), Y4
	VMOVDQU 160(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 1 to 3 outputs
	VMOVDQU (R8)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 192(CX), Y4
	VMOVDQU 224(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 256(CX), Y4
	VMOVDQU 288(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 320(CX), Y4
	VMOVDQU 352(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 2 to 3 outputs
	VMOVDQU (R9)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 384(CX), Y4
	VMOVDQU 416(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 448(CX), Y4
	VMOVDQU 480(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 512(CX), Y4
	VMOVDQU 544(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 3 to 3 outputs
	VMOVDQU (R10)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 576(CX), Y4
	VMOVDQU 608(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 640(CX), Y4
	VMOVDQU 672(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 704(CX), Y4
	VMOVDQU 736(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 4 to 3 outputs
	VMOVDQU (R11)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 768(CX), Y4
	VMOVDQU 800(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 832(CX), Y4
	VMOVDQU 864(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 896(CX), Y4
	VMOVDQU 928(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 5 to 3 outputs
	VMOVDQU (R12)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 960(CX), Y4
	VMOVDQU 992(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1024(CX), Y4
	VMOVDQU 1056(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1088(CX), Y4
	VMOVDQU 1120(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 6 to 3 outputs
	VMOVDQU (R13)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1152(CX), Y4
	VMOVDQU 1184(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1216(CX), Y4
	VMOVDQU 1248(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1280(CX), Y4
	VMOVDQU 1312(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 7 to 3 outputs
	VMOVDQU (SI)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1344(CX), Y4
	VMOVDQU 1376(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1408(CX), Y4
	VMOVDQU 1440(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1472(CX), Y4
	VMOVDQU 1504(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Store 3 outputs
	VMOVDQU Y0, (BX)(R14*1)
	VMOVDQU Y1, (BP)(R14*1)
	VMOVDQU Y2, (DX)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_8x3_loop
	VZEROUPPER

mulAvxTwo_8x3_end:
	RET

// func mulAvxTwo_8x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_8x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 73 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_8x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), SI
	MOVQ         72(DX), DX
	MOVQ         in_base+24(FP), DI
	MOVQ         (DI), R8
	MOVQ         24(DI), R9
	MOVQ         48(DI), R10
	MOVQ         72(DI), R11
	MOVQ         96(DI), R12
	MOVQ         120(DI), R13
	MOVQ         144(DI), R14
	MOVQ         168(DI), DI
	MOVQ         $0x0000000f, R15
	MOVQ         R15, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R15

mulAvxTwo_8x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (R8)(R15*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 1 to 4 outputs
	VMOVDQU (R9)(R15*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 256(CX), Y5
	VMOVDQU 288(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 320(CX), Y5
	VMOVDQU 352(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 384(CX), Y5
	VMOVDQU 416(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 448(CX), Y5
	VMOVDQU 480(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 2 to 4 outputs
	VMOVDQU (R10)(R15*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 512(CX), Y5
	VMOVDQU 544(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 576(CX), Y5
	VMOVDQU 608(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 640(CX), Y5
	VMOVDQU 672(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 704(CX), Y5
	VMOVDQU 736(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 3 to 4 outputs
	VMOVDQU (R11)(R15*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 768(CX), Y5
	VMOVDQU 800(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 832(CX), Y5
	VMOVDQU 864(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 896(CX), Y5
	VMOVDQU 928(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 960(CX), Y5
	VMOVDQU 992(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 4 to 4 outputs
	VMOVDQU (R12)(R15*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1024(CX), Y5
	VMOVDQU 1056(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1088(CX), Y5
	VMOVDQU 1120(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1152(CX), Y5
	VMOVDQU 1184(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1216(CX), Y5
	VMOVDQU 1248(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 5 to 4 outputs
	VMOVDQU (R13)(R15*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1280(CX), Y5
	VMOVDQU 1312(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1344(CX), Y5
	VMOVDQU 1376(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1408(CX), Y5
	VMOVDQU 1440(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1472(CX), Y5
	VMOVDQU 1504(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 6 to 4 outputs
	VMOVDQU (R14)(R15*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1536(CX), Y5
	VMOVDQU 1568(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1600(CX), Y5
	VMOVDQU 1632(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1664(CX), Y5
	VMOVDQU 1696(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1728(CX), Y5
	VMOVDQU 1760(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 7 to 4 outputs
	VMOVDQU (DI)(R15*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1792(CX), Y5
	VMOVDQU 1824(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1856(CX), Y5
	VMOVDQU 1888(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1920(CX), Y5
	VMOVDQU 1952(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1984(CX), Y5
	VMOVDQU 2016(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	VMOVDQU Y0, (BX)(R15*1)
	VMOVDQU Y1, (BP)(R15*1)
	VMOVDQU Y2, (SI)(R15*1)
	VMOVDQU Y3, (DX)(R15*1)

	// Prepare for next loop
	ADDQ $0x20, R15
	DECQ AX
	JNZ  mulAvxTwo_8x4_loop
	VZEROUPPER

mulAvxTwo_8x4_end:
	RET

// func mulAvxTwo_8x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_8x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 90 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_8x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), BX
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R12

mulAvxTwo_8x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (BP)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 1 to 5 outputs
	VMOVDQU (SI)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 320(CX), Y6
	VMOVDQU 352(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 384(CX), Y6
	VMOVDQU 416(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 448(CX), Y6
	VMOVDQU 480(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 512(CX), Y6
	VMOVDQU 544(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 576(CX), Y6
	VMOVDQU 608(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 2 to 5 outputs
	VMOVDQU (DI)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 640(CX), Y6
	VMOVDQU 672(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 704(CX), Y6
	VMOVDQU 736(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 768(CX), Y6
	VMOVDQU 800(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 832(CX), Y6
	VMOVDQU 864(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 896(CX), Y6
	VMOVDQU 928(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 3 to 5 outputs
	VMOVDQU (R8)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 960(CX), Y6
	VMOVDQU 992(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1024(CX), Y6
	VMOVDQU 1056(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1088(CX), Y6
	VMOVDQU 1120(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1152(CX), Y6
	VMOVDQU 1184(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1216(CX), Y6
	VMOVDQU 1248(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 4 to 5 outputs
	VMOVDQU (R9)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1280(CX), Y6
	VMOVDQU 1312(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1344(CX), Y6
	VMOVDQU 1376(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1408(CX), Y6
	VMOVDQU 1440(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1472(CX), Y6
	VMOVDQU 1504(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1536(CX), Y6
	VMOVDQU 1568(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 5 to 5 outputs
	VMOVDQU (R10)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1600(CX), Y6
	VMOVDQU 1632(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1664(CX), Y6
	VMOVDQU 1696(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1728(CX), Y6
	VMOVDQU 1760(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1792(CX), Y6
	VMOVDQU 1824(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1856(CX), Y6
	VMOVDQU 1888(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 6 to 5 outputs
	VMOVDQU (R11)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1920(CX), Y6
	VMOVDQU 1952(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1984(CX), Y6
	VMOVDQU 2016(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 2048(CX), Y6
	VMOVDQU 2080(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 2112(CX), Y6
	VMOVDQU 2144(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 2176(CX), Y6
	VMOVDQU 2208(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 7 to 5 outputs
	VMOVDQU (BX)(R12*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 2240(CX), Y6
	VMOVDQU 2272(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 2304(CX), Y6
	VMOVDQU 2336(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 2368(CX), Y6
	VMOVDQU 2400(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 2432(CX), Y6
	VMOVDQU 2464(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 2496(CX), Y6
	VMOVDQU 2528(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	MOVQ    (DX), R13
	VMOVDQU Y0, (R13)(R12*1)
	MOVQ    24(DX), R13
	VMOVDQU Y1, (R13)(R12*1)
	MOVQ    48(DX), R13
	VMOVDQU Y2, (R13)(R12*1)
	MOVQ    72(DX), R13
	VMOVDQU Y3, (R13)(R12*1)
	MOVQ    96(DX), R13
	VMOVDQU Y4, (R13)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_8x5_loop
	VZEROUPPER

mulAvxTwo_8x5_end:
	RET

// func mulAvxTwo_8x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_8x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 107 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_8x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), BX
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R12

mulAvxTwo_8x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (BP)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 1 to 6 outputs
	VMOVDQU (SI)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 384(CX), Y7
	VMOVDQU 416(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 448(CX), Y7
	VMOVDQU 480(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 512(CX), Y7
	VMOVDQU 544(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 576(CX), Y7
	VMOVDQU 608(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 640(CX), Y7
	VMOVDQU 672(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 704(CX), Y7
	VMOVDQU 736(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 2 to 6 outputs
	VMOVDQU (DI)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 768(CX), Y7
	VMOVDQU 800(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 832(CX), Y7
	VMOVDQU 864(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 896(CX), Y7
	VMOVDQU 928(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 960(CX), Y7
	VMOVDQU 992(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1024(CX), Y7
	VMOVDQU 1056(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1088(CX), Y7
	VMOVDQU 1120(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 3 to 6 outputs
	VMOVDQU (R8)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1152(CX), Y7
	VMOVDQU 1184(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1216(CX), Y7
	VMOVDQU 1248(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1280(CX), Y7
	VMOVDQU 1312(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1344(CX), Y7
	VMOVDQU 1376(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1408(CX), Y7
	VMOVDQU 1440(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1472(CX), Y7
	VMOVDQU 1504(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 4 to 6 outputs
	VMOVDQU (R9)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1536(CX), Y7
	VMOVDQU 1568(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1600(CX), Y7
	VMOVDQU 1632(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1664(CX), Y7
	VMOVDQU 1696(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1728(CX), Y7
	VMOVDQU 1760(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1792(CX), Y7
	VMOVDQU 1824(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1856(CX), Y7
	VMOVDQU 1888(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 5 to 6 outputs
	VMOVDQU (R10)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1920(CX), Y7
	VMOVDQU 1952(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1984(CX), Y7
	VMOVDQU 2016(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2048(CX), Y7
	VMOVDQU 2080(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2112(CX), Y7
	VMOVDQU 2144(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2176(CX), Y7
	VMOVDQU 2208(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 2240(CX), Y7
	VMOVDQU 2272(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 6 to 6 outputs
	VMOVDQU (R11)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 2304(CX), Y7
	VMOVDQU 2336(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 2368(CX), Y7
	VMOVDQU 2400(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2432(CX), Y7
	VMOVDQU 2464(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2496(CX), Y7
	VMOVDQU 2528(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2560(CX), Y7
	VMOVDQU 2592(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 2624(CX), Y7
	VMOVDQU 2656(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 7 to 6 outputs
	VMOVDQU (BX)(R12*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 2688(CX), Y7
	VMOVDQU 2720(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 2752(CX), Y7
	VMOVDQU 2784(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2816(CX), Y7
	VMOVDQU 2848(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2880(CX), Y7
	VMOVDQU 2912(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2944(CX), Y7
	VMOVDQU 2976(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 3008(CX), Y7
	VMOVDQU 3040(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	MOVQ    (DX), R13
	VMOVDQU Y0, (R13)(R12*1)
	MOVQ    24(DX), R13
	VMOVDQU Y1, (R13)(R12*1)
	MOVQ    48(DX), R13
	VMOVDQU Y2, (R13)(R12*1)
	MOVQ    72(DX), R13
	VMOVDQU Y3, (R13)(R12*1)
	MOVQ    96(DX), R13
	VMOVDQU Y4, (R13)(R12*1)
	MOVQ    120(DX), R13
	VMOVDQU Y5, (R13)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_8x6_loop
	VZEROUPPER

mulAvxTwo_8x6_end:
	RET

// func mulAvxTwo_8x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_8x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 124 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_8x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), BX
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R12

mulAvxTwo_8x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (BP)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 1 to 7 outputs
	VMOVDQU (SI)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 448(CX), Y8
	VMOVDQU 480(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 512(CX), Y8
	VMOVDQU 544(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 576(CX), Y8
	VMOVDQU 608(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 640(CX), Y8
	VMOVDQU 672(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 704(CX), Y8
	VMOVDQU 736(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 768(CX), Y8
	VMOVDQU 800(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 832(CX), Y8
	VMOVDQU 864(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 2 to 7 outputs
	VMOVDQU (DI)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 896(CX), Y8
	VMOVDQU 928(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 960(CX), Y8
	VMOVDQU 992(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1024(CX), Y8
	VMOVDQU 1056(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1088(CX), Y8
	VMOVDQU 1120(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1152(CX), Y8
	VMOVDQU 1184(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1216(CX), Y8
	VMOVDQU 1248(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1280(CX), Y8
	VMOVDQU 1312(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 3 to 7 outputs
	VMOVDQU (R8)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1344(CX), Y8
	VMOVDQU 1376(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1408(CX), Y8
	VMOVDQU 1440(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1472(CX), Y8
	VMOVDQU 1504(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1536(CX), Y8
	VMOVDQU 1568(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1600(CX), Y8
	VMOVDQU 1632(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1664(CX), Y8
	VMOVDQU 1696(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1728(CX), Y8
	VMOVDQU 1760(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 4 to 7 outputs
	VMOVDQU (R9)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1792(CX), Y8
	VMOVDQU 1824(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1856(CX), Y8
	VMOVDQU 1888(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1920(CX), Y8
	VMOVDQU 1952(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1984(CX), Y8
	VMOVDQU 2016(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2048(CX), Y8
	VMOVDQU 2080(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2112(CX), Y8
	VMOVDQU 2144(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2176(CX), Y8
	VMOVDQU 2208(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 5 to 7 outputs
	VMOVDQU (R10)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 2240(CX), Y8
	VMOVDQU 2272(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 2304(CX), Y8
	VMOVDQU 2336(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 2368(CX), Y8
	VMOVDQU 2400(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 2432(CX), Y8
	VMOVDQU 2464(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2496(CX), Y8
	VMOVDQU 2528(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2560(CX), Y8
	VMOVDQU 2592(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2624(CX), Y8
	VMOVDQU 2656(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 6 to 7 outputs
	VMOVDQU (R11)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 2688(CX), Y8
	VMOVDQU 2720(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 2752(CX), Y8
	VMOVDQU 2784(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 2816(CX), Y8
	VMOVDQU 2848(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 2880(CX), Y8
	VMOVDQU 2912(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2944(CX), Y8
	VMOVDQU 2976(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 3008(CX), Y8
	VMOVDQU 3040(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 3072(CX), Y8
	VMOVDQU 3104(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 7 to 7 outputs
	VMOVDQU (BX)(R12*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 3136(CX), Y8
	VMOVDQU 3168(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 3200(CX), Y8
	VMOVDQU 3232(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 3264(CX), Y8
	VMOVDQU 3296(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 3328(CX), Y8
	VMOVDQU 3360(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 3392(CX), Y8
	VMOVDQU 3424(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 3456(CX), Y8
	VMOVDQU 3488(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 3520(CX), Y8
	VMOVDQU 3552(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	MOVQ    (DX), R13
	VMOVDQU Y0, (R13)(R12*1)
	MOVQ    24(DX), R13
	VMOVDQU Y1, (R13)(R12*1)
	MOVQ    48(DX), R13
	VMOVDQU Y2, (R13)(R12*1)
	MOVQ    72(DX), R13
	VMOVDQU Y3, (R13)(R12*1)
	MOVQ    96(DX), R13
	VMOVDQU Y4, (R13)(R12*1)
	MOVQ    120(DX), R13
	VMOVDQU Y5, (R13)(R12*1)
	MOVQ    144(DX), R13
	VMOVDQU Y6, (R13)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_8x7_loop
	VZEROUPPER

mulAvxTwo_8x7_end:
	RET

// func mulAvxTwo_8x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_8x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 141 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_8x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), BX
	MOVQ         $0x0000000f, R12
	MOVQ         R12, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R12

mulAvxTwo_8x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (BP)(R12*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 1 to 8 outputs
	VMOVDQU (SI)(R12*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 512(CX), Y9
	VMOVDQU 544(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 576(CX), Y9
	VMOVDQU 608(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 640(CX), Y9
	VMOVDQU 672(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 704(CX), Y9
	VMOVDQU 736(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 768(CX), Y9
	VMOVDQU 800(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 832(CX), Y9
	VMOVDQU 864(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 896(CX), Y9
	VMOVDQU 928(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 960(CX), Y9
	VMOVDQU 992(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 2 to 8 outputs
	VMOVDQU (DI)(R12*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1024(CX), Y9
	VMOVDQU 1056(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1088(CX), Y9
	VMOVDQU 1120(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1152(CX), Y9
	VMOVDQU 1184(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1216(CX), Y9
	VMOVDQU 1248(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1280(CX), Y9
	VMOVDQU 1312(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1344(CX), Y9
	VMOVDQU 1376(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1408(CX), Y9
	VMOVDQU 1440(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1472(CX), Y9
	VMOVDQU 1504(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 3 to 8 outputs
	VMOVDQU (R8)(R12*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1536(CX), Y9
	VMOVDQU 1568(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1600(CX), Y9
	VMOVDQU 1632(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1664(CX), Y9
	VMOVDQU 1696(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1728(CX), Y9
	VMOVDQU 1760(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1792(CX), Y9
	VMOVDQU 1824(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1856(CX), Y9
	VMOVDQU 1888(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1920(CX), Y9
	VMOVDQU 1952(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1984(CX), Y9
	VMOVDQU 2016(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 4 to 8 outputs
	VMOVDQU (R9)(R12*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2048(CX), Y9
	VMOVDQU 2080(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2112(CX), Y9
	VMOVDQU 2144(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2176(CX), Y9
	VMOVDQU 2208(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2240(CX), Y9
	VMOVDQU 2272(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2304(CX), Y9
	VMOVDQU 2336(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2368(CX), Y9
	VMOVDQU 2400(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2432(CX), Y9
	VMOVDQU 2464(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 2496(CX), Y9
	VMOVDQU 2528(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 5 to 8 outputs
	VMOVDQU (R10)(R12*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2560(CX), Y9
	VMOVDQU 2592(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2624(CX), Y9
	VMOVDQU 2656(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2688(CX), Y9
	VMOVDQU 2720(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2752(CX), Y9
	VMOVDQU 2784(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2816(CX), Y9
	VMOVDQU 2848(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2880(CX), Y9
	VMOVDQU 2912(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2944(CX), Y9
	VMOVDQU 2976(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 3008(CX), Y9
	VMOVDQU 3040(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 6 to 8 outputs
	VMOVDQU (R11)(R12*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 3072(CX), Y9
	VMOVDQU 3104(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 3136(CX), Y9
	VMOVDQU 3168(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 3200(CX), Y9
	VMOVDQU 3232(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 3264(CX), Y9
	VMOVDQU 3296(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 3328(CX), Y9
	VMOVDQU 3360(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 3392(CX), Y9
	VMOVDQU 3424(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 3456(CX), Y9
	VMOVDQU 3488(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 3520(CX), Y9
	VMOVDQU 3552(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 7 to 8 outputs
	VMOVDQU (BX)(R12*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 3584(CX), Y9
	VMOVDQU 3616(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 3648(CX), Y9
	VMOVDQU 3680(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 3712(CX), Y9
	VMOVDQU 3744(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 3776(CX), Y9
	VMOVDQU 3808(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 3840(CX), Y9
	VMOVDQU 3872(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 3904(CX), Y9
	VMOVDQU 3936(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 3968(CX), Y9
	VMOVDQU 4000(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 4032(CX), Y9
	VMOVDQU 4064(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	MOVQ    (DX), R13
	VMOVDQU Y0, (R13)(R12*1)
	MOVQ    24(DX), R13
	VMOVDQU Y1, (R13)(R12*1)
	MOVQ    48(DX), R13
	VMOVDQU Y2, (R13)(R12*1)
	MOVQ    72(DX), R13
	VMOVDQU Y3, (R13)(R12*1)
	MOVQ    96(DX), R13
	VMOVDQU Y4, (R13)(R12*1)
	MOVQ    120(DX), R13
	VMOVDQU Y5, (R13)(R12*1)
	MOVQ    144(DX), R13
	VMOVDQU Y6, (R13)(R12*1)
	MOVQ    168(DX), R13
	VMOVDQU Y7, (R13)(R12*1)

	// Prepare for next loop
	ADDQ $0x20, R12
	DECQ AX
	JNZ  mulAvxTwo_8x8_loop
	VZEROUPPER

mulAvxTwo_8x8_end:
	RET

// func mulAvxTwo_9x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_9x1(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 22 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_9x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), BX
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X1
	VPBROADCASTB X1, Y1
	MOVQ         start+72(FP), R13

mulAvxTwo_9x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (BP)(R13*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU (CX), Y2
	VMOVDQU 32(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 1 to 1 outputs
	VMOVDQU (SI)(R13*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 64(CX), Y2
	VMOVDQU 96(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 2 to 1 outputs
	VMOVDQU (DI)(R13*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 128(CX), Y2
	VMOVDQU 160(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 3 to 1 outputs
	VMOVDQU (R8)(R13*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 192(CX), Y2
	VMOVDQU 224(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 4 to 1 outputs
	VMOVDQU (R9)(R13*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 256(CX), Y2
	VMOVDQU 288(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 5 to 1 outputs
	VMOVDQU (R10)(R13*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 320(CX), Y2
	VMOVDQU 352(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 6 to 1 outputs
	VMOVDQU (R11)(R13*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 384(CX), Y2
	VMOVDQU 416(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 7 to 1 outputs
	VMOVDQU (R12)(R13*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 448(CX), Y2
	VMOVDQU 480(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 8 to 1 outputs
	VMOVDQU (BX)(R13*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 512(CX), Y2
	VMOVDQU 544(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_9x1_loop
	VZEROUPPER

mulAvxTwo_9x1_end:
	RET

// func mulAvxTwo_9x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_9x2(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 43 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_9x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	MOVQ         in_base+24(FP), BP
	MOVQ         (BP), SI
	MOVQ         24(BP), DI
	MOVQ         48(BP), R8
	MOVQ         72(BP), R9
	MOVQ         96(BP), R10
	MOVQ         120(BP), R11
	MOVQ         144(BP), R12
	MOVQ         168(BP), R13
	MOVQ         192(BP), BP
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X2
	VPBROADCASTB X2, Y2
	MOVQ         start+72(FP), R14

mulAvxTwo_9x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (SI)(R14*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU (CX), Y3
	VMOVDQU 32(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 64(CX), Y3
	VMOVDQU 96(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 1 to 2 outputs
	VMOVDQU (DI)(R14*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 128(CX), Y3
	VMOVDQU 160(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 192(CX), Y3
	VMOVDQU 224(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 2 to 2 outputs
	VMOVDQU (R8)(R14*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 256(CX), Y3
	VMOVDQU 288(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 320(CX), Y3
	VMOVDQU 352(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 3 to 2 outputs
	VMOVDQU (R9)(R14*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 384(CX), Y3
	VMOVDQU 416(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 448(CX), Y3
	VMOVDQU 480(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 4 to 2 outputs
	VMOVDQU (R10)(R14*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 512(CX), Y3
	VMOVDQU 544(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 576(CX), Y3
	VMOVDQU 608(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 5 to 2 outputs
	VMOVDQU (R11)(R14*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 640(CX), Y3
	VMOVDQU 672(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 704(CX), Y3
	VMOVDQU 736(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 6 to 2 outputs
	VMOVDQU (R12)(R14*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 768(CX), Y3
	VMOVDQU 800(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 832(CX), Y3
	VMOVDQU 864(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 7 to 2 outputs
	VMOVDQU (R13)(R14*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 896(CX), Y3
	VMOVDQU 928(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 960(CX), Y3
	VMOVDQU 992(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 8 to 2 outputs
	VMOVDQU (BP)(R14*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 1024(CX), Y3
	VMOVDQU 1056(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 1088(CX), Y3
	VMOVDQU 1120(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(R14*1)
	VMOVDQU Y1, (DX)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_9x2_loop
	VZEROUPPER

mulAvxTwo_9x2_end:
	RET

// func mulAvxTwo_9x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_9x3(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 62 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_9x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), BP
	MOVQ         48(DX), DX
	MOVQ         in_base+24(FP), SI
	MOVQ         (SI), DI
	MOVQ         24(SI), R8
	MOVQ         48(SI), R9
	MOVQ         72(SI), R10
	MOVQ         96(SI), R11
	MOVQ         120(SI), R12
	MOVQ         144(SI), R13
	MOVQ         168(SI), R14
	MOVQ         192(SI), SI
	MOVQ         $0x0000000f, R15
	MOVQ         R15, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), R15

mulAvxTwo_9x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (DI)(R15*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU (CX), Y4
	VMOVDQU 32(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 64(CX), Y4
	VMOVDQU 96(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 128(CX), Y4
	VMOVDQU 160(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 1 to 3 outputs
	VMOVDQU (R8)(R15*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 192(CX), Y4
	VMOVDQU 224(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 256(CX), Y4
	VMOVDQU 288(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 320(CX), Y4
	VMOVDQU 352(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 2 to 3 outputs
	VMOVDQU (R9)(R15*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 384(CX), Y4
	VMOVDQU 416(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 448(CX), Y4
	VMOVDQU 480(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 512(CX), Y4
	VMOVDQU 544(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 3 to 3 outputs
	VMOVDQU (R10)(R15*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 576(CX), Y4
	VMOVDQU 608(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 640(CX), Y4
	VMOVDQU 672(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 704(CX), Y4
	VMOVDQU 736(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 4 to 3 outputs
	VMOVDQU (R11)(R15*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 768(CX), Y4
	VMOVDQU 800(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 832(CX), Y4
	VMOVDQU 864(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 896(CX), Y4
	VMOVDQU 928(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 5 to 3 outputs
	VMOVDQU (R12)(R15*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 960(CX), Y4
	VMOVDQU 992(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1024(CX), Y4
	VMOVDQU 1056(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1088(CX), Y4
	VMOVDQU 1120(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 6 to 3 outputs
	VMOVDQU (R13)(R15*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1152(CX), Y4
	VMOVDQU 1184(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1216(CX), Y4
	VMOVDQU 1248(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1280(CX), Y4
	VMOVDQU 1312(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 7 to 3 outputs
	VMOVDQU (R14)(R15*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1344(CX), Y4
	VMOVDQU 1376(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1408(CX), Y4
	VMOVDQU 1440(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1472(CX), Y4
	VMOVDQU 1504(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 8 to 3 outputs
	VMOVDQU (SI)(R15*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1536(CX), Y4
	VMOVDQU 1568(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1600(CX), Y4
	VMOVDQU 1632(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1664(CX), Y4
	VMOVDQU 1696(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Store 3 outputs
	VMOVDQU Y0, (BX)(R15*1)
	VMOVDQU Y1, (BP)(R15*1)
	VMOVDQU Y2, (DX)(R15*1)

	// Prepare for next loop
	ADDQ $0x20, R15
	DECQ AX
	JNZ  mulAvxTwo_9x3_loop
	VZEROUPPER

mulAvxTwo_9x3_end:
	RET

// func mulAvxTwo_9x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_9x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 81 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_9x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), BX
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R13

mulAvxTwo_9x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (BP)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 1 to 4 outputs
	VMOVDQU (SI)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 256(CX), Y5
	VMOVDQU 288(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 320(CX), Y5
	VMOVDQU 352(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 384(CX), Y5
	VMOVDQU 416(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 448(CX), Y5
	VMOVDQU 480(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 2 to 4 outputs
	VMOVDQU (DI)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 512(CX), Y5
	VMOVDQU 544(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 576(CX), Y5
	VMOVDQU 608(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 640(CX), Y5
	VMOVDQU 672(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 704(CX), Y5
	VMOVDQU 736(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 3 to 4 outputs
	VMOVDQU (R8)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 768(CX), Y5
	VMOVDQU 800(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 832(CX), Y5
	VMOVDQU 864(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 896(CX), Y5
	VMOVDQU 928(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 960(CX), Y5
	VMOVDQU 992(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 4 to 4 outputs
	VMOVDQU (R9)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1024(CX), Y5
	VMOVDQU 1056(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1088(CX), Y5
	VMOVDQU 1120(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1152(CX), Y5
	VMOVDQU 1184(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1216(CX), Y5
	VMOVDQU 1248(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 5 to 4 outputs
	VMOVDQU (R10)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1280(CX), Y5
	VMOVDQU 1312(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1344(CX), Y5
	VMOVDQU 1376(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1408(CX), Y5
	VMOVDQU 1440(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1472(CX), Y5
	VMOVDQU 1504(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 6 to 4 outputs
	VMOVDQU (R11)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1536(CX), Y5
	VMOVDQU 1568(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1600(CX), Y5
	VMOVDQU 1632(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1664(CX), Y5
	VMOVDQU 1696(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1728(CX), Y5
	VMOVDQU 1760(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 7 to 4 outputs
	VMOVDQU (R12)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1792(CX), Y5
	VMOVDQU 1824(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1856(CX), Y5
	VMOVDQU 1888(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1920(CX), Y5
	VMOVDQU 1952(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1984(CX), Y5
	VMOVDQU 2016(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 8 to 4 outputs
	VMOVDQU (BX)(R13*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 2048(CX), Y5
	VMOVDQU 2080(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 2112(CX), Y5
	VMOVDQU 2144(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 2176(CX), Y5
	VMOVDQU 2208(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 2240(CX), Y5
	VMOVDQU 2272(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	MOVQ    (DX), R14
	VMOVDQU Y0, (R14)(R13*1)
	MOVQ    24(DX), R14
	VMOVDQU Y1, (R14)(R13*1)
	MOVQ    48(DX), R14
	VMOVDQU Y2, (R14)(R13*1)
	MOVQ    72(DX), R14
	VMOVDQU Y3, (R14)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_9x4_loop
	VZEROUPPER

mulAvxTwo_9x4_end:
	RET

// func mulAvxTwo_9x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_9x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 100 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_9x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), BX
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R13

mulAvxTwo_9x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (BP)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 1 to 5 outputs
	VMOVDQU (SI)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 320(CX), Y6
	VMOVDQU 352(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 384(CX), Y6
	VMOVDQU 416(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 448(CX), Y6
	VMOVDQU 480(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 512(CX), Y6
	VMOVDQU 544(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 576(CX), Y6
	VMOVDQU 608(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 2 to 5 outputs
	VMOVDQU (DI)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 640(CX), Y6
	VMOVDQU 672(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 704(CX), Y6
	VMOVDQU 736(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 768(CX), Y6
	VMOVDQU 800(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 832(CX), Y6
	VMOVDQU 864(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 896(CX), Y6
	VMOVDQU 928(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 3 to 5 outputs
	VMOVDQU (R8)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 960(CX), Y6
	VMOVDQU 992(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1024(CX), Y6
	VMOVDQU 1056(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1088(CX), Y6
	VMOVDQU 1120(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1152(CX), Y6
	VMOVDQU 1184(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1216(CX), Y6
	VMOVDQU 1248(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 4 to 5 outputs
	VMOVDQU (R9)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1280(CX), Y6
	VMOVDQU 1312(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1344(CX), Y6
	VMOVDQU 1376(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1408(CX), Y6
	VMOVDQU 1440(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1472(CX), Y6
	VMOVDQU 1504(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1536(CX), Y6
	VMOVDQU 1568(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 5 to 5 outputs
	VMOVDQU (R10)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1600(CX), Y6
	VMOVDQU 1632(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1664(CX), Y6
	VMOVDQU 1696(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1728(CX), Y6
	VMOVDQU 1760(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1792(CX), Y6
	VMOVDQU 1824(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1856(CX), Y6
	VMOVDQU 1888(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 6 to 5 outputs
	VMOVDQU (R11)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1920(CX), Y6
	VMOVDQU 1952(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1984(CX), Y6
	VMOVDQU 2016(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 2048(CX), Y6
	VMOVDQU 2080(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 2112(CX), Y6
	VMOVDQU 2144(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 2176(CX), Y6
	VMOVDQU 2208(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 7 to 5 outputs
	VMOVDQU (R12)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 2240(CX), Y6
	VMOVDQU 2272(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 2304(CX), Y6
	VMOVDQU 2336(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 2368(CX), Y6
	VMOVDQU 2400(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 2432(CX), Y6
	VMOVDQU 2464(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 2496(CX), Y6
	VMOVDQU 2528(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 8 to 5 outputs
	VMOVDQU (BX)(R13*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 2560(CX), Y6
	VMOVDQU 2592(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 2624(CX), Y6
	VMOVDQU 2656(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 2688(CX), Y6
	VMOVDQU 2720(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 2752(CX), Y6
	VMOVDQU 2784(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 2816(CX), Y6
	VMOVDQU 2848(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	MOVQ    (DX), R14
	VMOVDQU Y0, (R14)(R13*1)
	MOVQ    24(DX), R14
	VMOVDQU Y1, (R14)(R13*1)
	MOVQ    48(DX), R14
	VMOVDQU Y2, (R14)(R13*1)
	MOVQ    72(DX), R14
	VMOVDQU Y3, (R14)(R13*1)
	MOVQ    96(DX), R14
	VMOVDQU Y4, (R14)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_9x5_loop
	VZEROUPPER

mulAvxTwo_9x5_end:
	RET

// func mulAvxTwo_9x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_9x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 119 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_9x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), BX
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R13

mulAvxTwo_9x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (BP)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 1 to 6 outputs
	VMOVDQU (SI)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 384(CX), Y7
	VMOVDQU 416(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 448(CX), Y7
	VMOVDQU 480(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 512(CX), Y7
	VMOVDQU 544(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 576(CX), Y7
	VMOVDQU 608(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 640(CX), Y7
	VMOVDQU 672(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 704(CX), Y7
	VMOVDQU 736(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 2 to 6 outputs
	VMOVDQU (DI)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 768(CX), Y7
	VMOVDQU 800(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 832(CX), Y7
	VMOVDQU 864(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 896(CX), Y7
	VMOVDQU 928(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 960(CX), Y7
	VMOVDQU 992(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1024(CX), Y7
	VMOVDQU 1056(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1088(CX), Y7
	VMOVDQU 1120(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 3 to 6 outputs
	VMOVDQU (R8)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1152(CX), Y7
	VMOVDQU 1184(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1216(CX), Y7
	VMOVDQU 1248(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1280(CX), Y7
	VMOVDQU 1312(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1344(CX), Y7
	VMOVDQU 1376(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1408(CX), Y7
	VMOVDQU 1440(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1472(CX), Y7
	VMOVDQU 1504(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 4 to 6 outputs
	VMOVDQU (R9)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1536(CX), Y7
	VMOVDQU 1568(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1600(CX), Y7
	VMOVDQU 1632(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1664(CX), Y7
	VMOVDQU 1696(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1728(CX), Y7
	VMOVDQU 1760(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1792(CX), Y7
	VMOVDQU 1824(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1856(CX), Y7
	VMOVDQU 1888(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 5 to 6 outputs
	VMOVDQU (R10)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1920(CX), Y7
	VMOVDQU 1952(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1984(CX), Y7
	VMOVDQU 2016(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2048(CX), Y7
	VMOVDQU 2080(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2112(CX), Y7
	VMOVDQU 2144(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2176(CX), Y7
	VMOVDQU 2208(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 2240(CX), Y7
	VMOVDQU 2272(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 6 to 6 outputs
	VMOVDQU (R11)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 2304(CX), Y7
	VMOVDQU 2336(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 2368(CX), Y7
	VMOVDQU 2400(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2432(CX), Y7
	VMOVDQU 2464(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2496(CX), Y7
	VMOVDQU 2528(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2560(CX), Y7
	VMOVDQU 2592(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 2624(CX), Y7
	VMOVDQU 2656(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 7 to 6 outputs
	VMOVDQU (R12)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 2688(CX), Y7
	VMOVDQU 2720(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 2752(CX), Y7
	VMOVDQU 2784(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2816(CX), Y7
	VMOVDQU 2848(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2880(CX), Y7
	VMOVDQU 2912(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2944(CX), Y7
	VMOVDQU 2976(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 3008(CX), Y7
	VMOVDQU 3040(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 8 to 6 outputs
	VMOVDQU (BX)(R13*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 3072(CX), Y7
	VMOVDQU 3104(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 3136(CX), Y7
	VMOVDQU 3168(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 3200(CX), Y7
	VMOVDQU 3232(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 3264(CX), Y7
	VMOVDQU 3296(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 3328(CX), Y7
	VMOVDQU 3360(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 3392(CX), Y7
	VMOVDQU 3424(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	MOVQ    (DX), R14
	VMOVDQU Y0, (R14)(R13*1)
	MOVQ    24(DX), R14
	VMOVDQU Y1, (R14)(R13*1)
	MOVQ    48(DX), R14
	VMOVDQU Y2, (R14)(R13*1)
	MOVQ    72(DX), R14
	VMOVDQU Y3, (R14)(R13*1)
	MOVQ    96(DX), R14
	VMOVDQU Y4, (R14)(R13*1)
	MOVQ    120(DX), R14
	VMOVDQU Y5, (R14)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_9x6_loop
	VZEROUPPER

mulAvxTwo_9x6_end:
	RET

// func mulAvxTwo_9x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_9x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 138 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_9x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), BX
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R13

mulAvxTwo_9x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (BP)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 1 to 7 outputs
	VMOVDQU (SI)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 448(CX), Y8
	VMOVDQU 480(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 512(CX), Y8
	VMOVDQU 544(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 576(CX), Y8
	VMOVDQU 608(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 640(CX), Y8
	VMOVDQU 672(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 704(CX), Y8
	VMOVDQU 736(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 768(CX), Y8
	VMOVDQU 800(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 832(CX), Y8
	VMOVDQU 864(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 2 to 7 outputs
	VMOVDQU (DI)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 896(CX), Y8
	VMOVDQU 928(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 960(CX), Y8
	VMOVDQU 992(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1024(CX), Y8
	VMOVDQU 1056(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1088(CX), Y8
	VMOVDQU 1120(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1152(CX), Y8
	VMOVDQU 1184(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1216(CX), Y8
	VMOVDQU 1248(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1280(CX), Y8
	VMOVDQU 1312(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 3 to 7 outputs
	VMOVDQU (R8)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1344(CX), Y8
	VMOVDQU 1376(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1408(CX), Y8
	VMOVDQU 1440(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1472(CX), Y8
	VMOVDQU 1504(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1536(CX), Y8
	VMOVDQU 1568(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1600(CX), Y8
	VMOVDQU 1632(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1664(CX), Y8
	VMOVDQU 1696(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1728(CX), Y8
	VMOVDQU 1760(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 4 to 7 outputs
	VMOVDQU (R9)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1792(CX), Y8
	VMOVDQU 1824(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1856(CX), Y8
	VMOVDQU 1888(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1920(CX), Y8
	VMOVDQU 1952(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1984(CX), Y8
	VMOVDQU 2016(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2048(CX), Y8
	VMOVDQU 2080(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2112(CX), Y8
	VMOVDQU 2144(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2176(CX), Y8
	VMOVDQU 2208(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 5 to 7 outputs
	VMOVDQU (R10)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 2240(CX), Y8
	VMOVDQU 2272(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 2304(CX), Y8
	VMOVDQU 2336(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 2368(CX), Y8
	VMOVDQU 2400(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 2432(CX), Y8
	VMOVDQU 2464(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2496(CX), Y8
	VMOVDQU 2528(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2560(CX), Y8
	VMOVDQU 2592(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2624(CX), Y8
	VMOVDQU 2656(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 6 to 7 outputs
	VMOVDQU (R11)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 2688(CX), Y8
	VMOVDQU 2720(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 2752(CX), Y8
	VMOVDQU 2784(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 2816(CX), Y8
	VMOVDQU 2848(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 2880(CX), Y8
	VMOVDQU 2912(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2944(CX), Y8
	VMOVDQU 2976(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 3008(CX), Y8
	VMOVDQU 3040(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 3072(CX), Y8
	VMOVDQU 3104(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 7 to 7 outputs
	VMOVDQU (R12)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 3136(CX), Y8
	VMOVDQU 3168(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 3200(CX), Y8
	VMOVDQU 3232(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 3264(CX), Y8
	VMOVDQU 3296(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 3328(CX), Y8
	VMOVDQU 3360(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 3392(CX), Y8
	VMOVDQU 3424(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 3456(CX), Y8
	VMOVDQU 3488(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 3520(CX), Y8
	VMOVDQU 3552(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 8 to 7 outputs
	VMOVDQU (BX)(R13*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 3584(CX), Y8
	VMOVDQU 3616(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 3648(CX), Y8
	VMOVDQU 3680(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 3712(CX), Y8
	VMOVDQU 3744(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 3776(CX), Y8
	VMOVDQU 3808(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 3840(CX), Y8
	VMOVDQU 3872(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 3904(CX), Y8
	VMOVDQU 3936(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 3968(CX), Y8
	VMOVDQU 4000(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	MOVQ    (DX), R14
	VMOVDQU Y0, (R14)(R13*1)
	MOVQ    24(DX), R14
	VMOVDQU Y1, (R14)(R13*1)
	MOVQ    48(DX), R14
	VMOVDQU Y2, (R14)(R13*1)
	MOVQ    72(DX), R14
	VMOVDQU Y3, (R14)(R13*1)
	MOVQ    96(DX), R14
	VMOVDQU Y4, (R14)(R13*1)
	MOVQ    120(DX), R14
	VMOVDQU Y5, (R14)(R13*1)
	MOVQ    144(DX), R14
	VMOVDQU Y6, (R14)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_9x7_loop
	VZEROUPPER

mulAvxTwo_9x7_end:
	RET

// func mulAvxTwo_9x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_9x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 157 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_9x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), BX
	MOVQ         $0x0000000f, R13
	MOVQ         R13, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R13

mulAvxTwo_9x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (BP)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 1 to 8 outputs
	VMOVDQU (SI)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 512(CX), Y9
	VMOVDQU 544(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 576(CX), Y9
	VMOVDQU 608(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 640(CX), Y9
	VMOVDQU 672(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 704(CX), Y9
	VMOVDQU 736(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 768(CX), Y9
	VMOVDQU 800(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 832(CX), Y9
	VMOVDQU 864(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 896(CX), Y9
	VMOVDQU 928(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 960(CX), Y9
	VMOVDQU 992(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 2 to 8 outputs
	VMOVDQU (DI)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1024(CX), Y9
	VMOVDQU 1056(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1088(CX), Y9
	VMOVDQU 1120(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1152(CX), Y9
	VMOVDQU 1184(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1216(CX), Y9
	VMOVDQU 1248(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1280(CX), Y9
	VMOVDQU 1312(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1344(CX), Y9
	VMOVDQU 1376(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1408(CX), Y9
	VMOVDQU 1440(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1472(CX), Y9
	VMOVDQU 1504(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 3 to 8 outputs
	VMOVDQU (R8)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1536(CX), Y9
	VMOVDQU 1568(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1600(CX), Y9
	VMOVDQU 1632(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1664(CX), Y9
	VMOVDQU 1696(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1728(CX), Y9
	VMOVDQU 1760(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1792(CX), Y9
	VMOVDQU 1824(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1856(CX), Y9
	VMOVDQU 1888(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1920(CX), Y9
	VMOVDQU 1952(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1984(CX), Y9
	VMOVDQU 2016(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 4 to 8 outputs
	VMOVDQU (R9)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2048(CX), Y9
	VMOVDQU 2080(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2112(CX), Y9
	VMOVDQU 2144(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2176(CX), Y9
	VMOVDQU 2208(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2240(CX), Y9
	VMOVDQU 2272(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2304(CX), Y9
	VMOVDQU 2336(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2368(CX), Y9
	VMOVDQU 2400(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2432(CX), Y9
	VMOVDQU 2464(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 2496(CX), Y9
	VMOVDQU 2528(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 5 to 8 outputs
	VMOVDQU (R10)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2560(CX), Y9
	VMOVDQU 2592(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2624(CX), Y9
	VMOVDQU 2656(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2688(CX), Y9
	VMOVDQU 2720(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2752(CX), Y9
	VMOVDQU 2784(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2816(CX), Y9
	VMOVDQU 2848(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2880(CX), Y9
	VMOVDQU 2912(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2944(CX), Y9
	VMOVDQU 2976(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 3008(CX), Y9
	VMOVDQU 3040(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 6 to 8 outputs
	VMOVDQU (R11)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 3072(CX), Y9
	VMOVDQU 3104(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 3136(CX), Y9
	VMOVDQU 3168(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 3200(CX), Y9
	VMOVDQU 3232(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 3264(CX), Y9
	VMOVDQU 3296(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 3328(CX), Y9
	VMOVDQU 3360(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 3392(CX), Y9
	VMOVDQU 3424(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 3456(CX), Y9
	VMOVDQU 3488(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 3520(CX), Y9
	VMOVDQU 3552(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 7 to 8 outputs
	VMOVDQU (R12)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 3584(CX), Y9
	VMOVDQU 3616(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 3648(CX), Y9
	VMOVDQU 3680(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 3712(CX), Y9
	VMOVDQU 3744(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 3776(CX), Y9
	VMOVDQU 3808(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 3840(CX), Y9
	VMOVDQU 3872(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 3904(CX), Y9
	VMOVDQU 3936(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 3968(CX), Y9
	VMOVDQU 4000(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 4032(CX), Y9
	VMOVDQU 4064(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 8 to 8 outputs
	VMOVDQU (BX)(R13*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 4096(CX), Y9
	VMOVDQU 4128(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 4160(CX), Y9
	VMOVDQU 4192(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 4224(CX), Y9
	VMOVDQU 4256(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 4288(CX), Y9
	VMOVDQU 4320(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 4352(CX), Y9
	VMOVDQU 4384(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 4416(CX), Y9
	VMOVDQU 4448(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 4480(CX), Y9
	VMOVDQU 4512(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 4544(CX), Y9
	VMOVDQU 4576(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	MOVQ    (DX), R14
	VMOVDQU Y0, (R14)(R13*1)
	MOVQ    24(DX), R14
	VMOVDQU Y1, (R14)(R13*1)
	MOVQ    48(DX), R14
	VMOVDQU Y2, (R14)(R13*1)
	MOVQ    72(DX), R14
	VMOVDQU Y3, (R14)(R13*1)
	MOVQ    96(DX), R14
	VMOVDQU Y4, (R14)(R13*1)
	MOVQ    120(DX), R14
	VMOVDQU Y5, (R14)(R13*1)
	MOVQ    144(DX), R14
	VMOVDQU Y6, (R14)(R13*1)
	MOVQ    168(DX), R14
	VMOVDQU Y7, (R14)(R13*1)

	// Prepare for next loop
	ADDQ $0x20, R13
	DECQ AX
	JNZ  mulAvxTwo_9x8_loop
	VZEROUPPER

mulAvxTwo_9x8_end:
	RET

// func mulAvxTwo_10x1(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_10x1(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 24 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_10x1_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), R13
	MOVQ         216(BX), BX
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X1
	VPBROADCASTB X1, Y1
	MOVQ         start+72(FP), R14

mulAvxTwo_10x1_loop:
	// Clear 1 outputs
	VPXOR Y0, Y0, Y0

	// Load and process 32 bytes from input 0 to 1 outputs
	VMOVDQU (BP)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU (CX), Y2
	VMOVDQU 32(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 1 to 1 outputs
	VMOVDQU (SI)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 64(CX), Y2
	VMOVDQU 96(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 2 to 1 outputs
	VMOVDQU (DI)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 128(CX), Y2
	VMOVDQU 160(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 3 to 1 outputs
	VMOVDQU (R8)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 192(CX), Y2
	VMOVDQU 224(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 4 to 1 outputs
	VMOVDQU (R9)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 256(CX), Y2
	VMOVDQU 288(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 5 to 1 outputs
	VMOVDQU (R10)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 320(CX), Y2
	VMOVDQU 352(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 6 to 1 outputs
	VMOVDQU (R11)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 384(CX), Y2
	VMOVDQU 416(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 7 to 1 outputs
	VMOVDQU (R12)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 448(CX), Y2
	VMOVDQU 480(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 8 to 1 outputs
	VMOVDQU (R13)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 512(CX), Y2
	VMOVDQU 544(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Load and process 32 bytes from input 9 to 1 outputs
	VMOVDQU (BX)(R14*1), Y4
	VPSRLQ  $0x04, Y4, Y5
	VPAND   Y1, Y4, Y4
	VPAND   Y1, Y5, Y5
	VMOVDQU 576(CX), Y2
	VMOVDQU 608(CX), Y3
	VPSHUFB Y4, Y2, Y2
	VPSHUFB Y5, Y3, Y3
	VPXOR   Y2, Y3, Y2
	VPXOR   Y2, Y0, Y0

	// Store 1 outputs
	VMOVDQU Y0, (DX)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_10x1_loop
	VZEROUPPER

mulAvxTwo_10x1_end:
	RET

// func mulAvxTwo_10x2(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_10x2(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 47 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_10x2_end
	MOVQ         out_base+48(FP), DX
	MOVQ         (DX), BX
	MOVQ         24(DX), DX
	MOVQ         in_base+24(FP), BP
	MOVQ         (BP), SI
	MOVQ         24(BP), DI
	MOVQ         48(BP), R8
	MOVQ         72(BP), R9
	MOVQ         96(BP), R10
	MOVQ         120(BP), R11
	MOVQ         144(BP), R12
	MOVQ         168(BP), R13
	MOVQ         192(BP), R14
	MOVQ         216(BP), BP
	MOVQ         $0x0000000f, R15
	MOVQ         R15, X2
	VPBROADCASTB X2, Y2
	MOVQ         start+72(FP), R15

mulAvxTwo_10x2_loop:
	// Clear 2 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1

	// Load and process 32 bytes from input 0 to 2 outputs
	VMOVDQU (SI)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU (CX), Y3
	VMOVDQU 32(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 64(CX), Y3
	VMOVDQU 96(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 1 to 2 outputs
	VMOVDQU (DI)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 128(CX), Y3
	VMOVDQU 160(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 192(CX), Y3
	VMOVDQU 224(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 2 to 2 outputs
	VMOVDQU (R8)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 256(CX), Y3
	VMOVDQU 288(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 320(CX), Y3
	VMOVDQU 352(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 3 to 2 outputs
	VMOVDQU (R9)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 384(CX), Y3
	VMOVDQU 416(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 448(CX), Y3
	VMOVDQU 480(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 4 to 2 outputs
	VMOVDQU (R10)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 512(CX), Y3
	VMOVDQU 544(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 576(CX), Y3
	VMOVDQU 608(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 5 to 2 outputs
	VMOVDQU (R11)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 640(CX), Y3
	VMOVDQU 672(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 704(CX), Y3
	VMOVDQU 736(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 6 to 2 outputs
	VMOVDQU (R12)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 768(CX), Y3
	VMOVDQU 800(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 832(CX), Y3
	VMOVDQU 864(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 7 to 2 outputs
	VMOVDQU (R13)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 896(CX), Y3
	VMOVDQU 928(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 960(CX), Y3
	VMOVDQU 992(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 8 to 2 outputs
	VMOVDQU (R14)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 1024(CX), Y3
	VMOVDQU 1056(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 1088(CX), Y3
	VMOVDQU 1120(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Load and process 32 bytes from input 9 to 2 outputs
	VMOVDQU (BP)(R15*1), Y5
	VPSRLQ  $0x04, Y5, Y6
	VPAND   Y2, Y5, Y5
	VPAND   Y2, Y6, Y6
	VMOVDQU 1152(CX), Y3
	VMOVDQU 1184(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y0, Y0
	VMOVDQU 1216(CX), Y3
	VMOVDQU 1248(CX), Y4
	VPSHUFB Y5, Y3, Y3
	VPSHUFB Y6, Y4, Y4
	VPXOR   Y3, Y4, Y3
	VPXOR   Y3, Y1, Y1

	// Store 2 outputs
	VMOVDQU Y0, (BX)(R15*1)
	VMOVDQU Y1, (DX)(R15*1)

	// Prepare for next loop
	ADDQ $0x20, R15
	DECQ AX
	JNZ  mulAvxTwo_10x2_loop
	VZEROUPPER

mulAvxTwo_10x2_end:
	RET

// func mulAvxTwo_10x3(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_10x3(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 68 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_10x3_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), R13
	MOVQ         216(BX), BX
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X3
	VPBROADCASTB X3, Y3
	MOVQ         start+72(FP), R14

mulAvxTwo_10x3_loop:
	// Clear 3 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2

	// Load and process 32 bytes from input 0 to 3 outputs
	VMOVDQU (BP)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU (CX), Y4
	VMOVDQU 32(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 64(CX), Y4
	VMOVDQU 96(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 128(CX), Y4
	VMOVDQU 160(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 1 to 3 outputs
	VMOVDQU (SI)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 192(CX), Y4
	VMOVDQU 224(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 256(CX), Y4
	VMOVDQU 288(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 320(CX), Y4
	VMOVDQU 352(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 2 to 3 outputs
	VMOVDQU (DI)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 384(CX), Y4
	VMOVDQU 416(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 448(CX), Y4
	VMOVDQU 480(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 512(CX), Y4
	VMOVDQU 544(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 3 to 3 outputs
	VMOVDQU (R8)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 576(CX), Y4
	VMOVDQU 608(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 640(CX), Y4
	VMOVDQU 672(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 704(CX), Y4
	VMOVDQU 736(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 4 to 3 outputs
	VMOVDQU (R9)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 768(CX), Y4
	VMOVDQU 800(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 832(CX), Y4
	VMOVDQU 864(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 896(CX), Y4
	VMOVDQU 928(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 5 to 3 outputs
	VMOVDQU (R10)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 960(CX), Y4
	VMOVDQU 992(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1024(CX), Y4
	VMOVDQU 1056(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1088(CX), Y4
	VMOVDQU 1120(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 6 to 3 outputs
	VMOVDQU (R11)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1152(CX), Y4
	VMOVDQU 1184(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1216(CX), Y4
	VMOVDQU 1248(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1280(CX), Y4
	VMOVDQU 1312(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 7 to 3 outputs
	VMOVDQU (R12)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1344(CX), Y4
	VMOVDQU 1376(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1408(CX), Y4
	VMOVDQU 1440(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1472(CX), Y4
	VMOVDQU 1504(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 8 to 3 outputs
	VMOVDQU (R13)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1536(CX), Y4
	VMOVDQU 1568(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1600(CX), Y4
	VMOVDQU 1632(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1664(CX), Y4
	VMOVDQU 1696(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Load and process 32 bytes from input 9 to 3 outputs
	VMOVDQU (BX)(R14*1), Y6
	VPSRLQ  $0x04, Y6, Y7
	VPAND   Y3, Y6, Y6
	VPAND   Y3, Y7, Y7
	VMOVDQU 1728(CX), Y4
	VMOVDQU 1760(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y0, Y0
	VMOVDQU 1792(CX), Y4
	VMOVDQU 1824(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y1, Y1
	VMOVDQU 1856(CX), Y4
	VMOVDQU 1888(CX), Y5
	VPSHUFB Y6, Y4, Y4
	VPSHUFB Y7, Y5, Y5
	VPXOR   Y4, Y5, Y4
	VPXOR   Y4, Y2, Y2

	// Store 3 outputs
	MOVQ    (DX), R15
	VMOVDQU Y0, (R15)(R14*1)
	MOVQ    24(DX), R15
	VMOVDQU Y1, (R15)(R14*1)
	MOVQ    48(DX), R15
	VMOVDQU Y2, (R15)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_10x3_loop
	VZEROUPPER

mulAvxTwo_10x3_end:
	RET

// func mulAvxTwo_10x4(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_10x4(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 89 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_10x4_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), R13
	MOVQ         216(BX), BX
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X4
	VPBROADCASTB X4, Y4
	MOVQ         start+72(FP), R14

mulAvxTwo_10x4_loop:
	// Clear 4 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3

	// Load and process 32 bytes from input 0 to 4 outputs
	VMOVDQU (BP)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU (CX), Y5
	VMOVDQU 32(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 64(CX), Y5
	VMOVDQU 96(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 128(CX), Y5
	VMOVDQU 160(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 192(CX), Y5
	VMOVDQU 224(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 1 to 4 outputs
	VMOVDQU (SI)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 256(CX), Y5
	VMOVDQU 288(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 320(CX), Y5
	VMOVDQU 352(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 384(CX), Y5
	VMOVDQU 416(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 448(CX), Y5
	VMOVDQU 480(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 2 to 4 outputs
	VMOVDQU (DI)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 512(CX), Y5
	VMOVDQU 544(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 576(CX), Y5
	VMOVDQU 608(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 640(CX), Y5
	VMOVDQU 672(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 704(CX), Y5
	VMOVDQU 736(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 3 to 4 outputs
	VMOVDQU (R8)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 768(CX), Y5
	VMOVDQU 800(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 832(CX), Y5
	VMOVDQU 864(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 896(CX), Y5
	VMOVDQU 928(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 960(CX), Y5
	VMOVDQU 992(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 4 to 4 outputs
	VMOVDQU (R9)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1024(CX), Y5
	VMOVDQU 1056(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1088(CX), Y5
	VMOVDQU 1120(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1152(CX), Y5
	VMOVDQU 1184(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1216(CX), Y5
	VMOVDQU 1248(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 5 to 4 outputs
	VMOVDQU (R10)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1280(CX), Y5
	VMOVDQU 1312(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1344(CX), Y5
	VMOVDQU 1376(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1408(CX), Y5
	VMOVDQU 1440(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1472(CX), Y5
	VMOVDQU 1504(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 6 to 4 outputs
	VMOVDQU (R11)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1536(CX), Y5
	VMOVDQU 1568(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1600(CX), Y5
	VMOVDQU 1632(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1664(CX), Y5
	VMOVDQU 1696(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1728(CX), Y5
	VMOVDQU 1760(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 7 to 4 outputs
	VMOVDQU (R12)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 1792(CX), Y5
	VMOVDQU 1824(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 1856(CX), Y5
	VMOVDQU 1888(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 1920(CX), Y5
	VMOVDQU 1952(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 1984(CX), Y5
	VMOVDQU 2016(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 8 to 4 outputs
	VMOVDQU (R13)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 2048(CX), Y5
	VMOVDQU 2080(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 2112(CX), Y5
	VMOVDQU 2144(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 2176(CX), Y5
	VMOVDQU 2208(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 2240(CX), Y5
	VMOVDQU 2272(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Load and process 32 bytes from input 9 to 4 outputs
	VMOVDQU (BX)(R14*1), Y7
	VPSRLQ  $0x04, Y7, Y8
	VPAND   Y4, Y7, Y7
	VPAND   Y4, Y8, Y8
	VMOVDQU 2304(CX), Y5
	VMOVDQU 2336(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y0, Y0
	VMOVDQU 2368(CX), Y5
	VMOVDQU 2400(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y1, Y1
	VMOVDQU 2432(CX), Y5
	VMOVDQU 2464(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y2, Y2
	VMOVDQU 2496(CX), Y5
	VMOVDQU 2528(CX), Y6
	VPSHUFB Y7, Y5, Y5
	VPSHUFB Y8, Y6, Y6
	VPXOR   Y5, Y6, Y5
	VPXOR   Y5, Y3, Y3

	// Store 4 outputs
	MOVQ    (DX), R15
	VMOVDQU Y0, (R15)(R14*1)
	MOVQ    24(DX), R15
	VMOVDQU Y1, (R15)(R14*1)
	MOVQ    48(DX), R15
	VMOVDQU Y2, (R15)(R14*1)
	MOVQ    72(DX), R15
	VMOVDQU Y3, (R15)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_10x4_loop
	VZEROUPPER

mulAvxTwo_10x4_end:
	RET

// func mulAvxTwo_10x5(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_10x5(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 110 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_10x5_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), R13
	MOVQ         216(BX), BX
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X5
	VPBROADCASTB X5, Y5
	MOVQ         start+72(FP), R14

mulAvxTwo_10x5_loop:
	// Clear 5 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4

	// Load and process 32 bytes from input 0 to 5 outputs
	VMOVDQU (BP)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU (CX), Y6
	VMOVDQU 32(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 64(CX), Y6
	VMOVDQU 96(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 128(CX), Y6
	VMOVDQU 160(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 192(CX), Y6
	VMOVDQU 224(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 256(CX), Y6
	VMOVDQU 288(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 1 to 5 outputs
	VMOVDQU (SI)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 320(CX), Y6
	VMOVDQU 352(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 384(CX), Y6
	VMOVDQU 416(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 448(CX), Y6
	VMOVDQU 480(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 512(CX), Y6
	VMOVDQU 544(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 576(CX), Y6
	VMOVDQU 608(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 2 to 5 outputs
	VMOVDQU (DI)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 640(CX), Y6
	VMOVDQU 672(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 704(CX), Y6
	VMOVDQU 736(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 768(CX), Y6
	VMOVDQU 800(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 832(CX), Y6
	VMOVDQU 864(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 896(CX), Y6
	VMOVDQU 928(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 3 to 5 outputs
	VMOVDQU (R8)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 960(CX), Y6
	VMOVDQU 992(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1024(CX), Y6
	VMOVDQU 1056(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1088(CX), Y6
	VMOVDQU 1120(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1152(CX), Y6
	VMOVDQU 1184(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1216(CX), Y6
	VMOVDQU 1248(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 4 to 5 outputs
	VMOVDQU (R9)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1280(CX), Y6
	VMOVDQU 1312(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1344(CX), Y6
	VMOVDQU 1376(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1408(CX), Y6
	VMOVDQU 1440(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1472(CX), Y6
	VMOVDQU 1504(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1536(CX), Y6
	VMOVDQU 1568(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 5 to 5 outputs
	VMOVDQU (R10)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1600(CX), Y6
	VMOVDQU 1632(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1664(CX), Y6
	VMOVDQU 1696(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 1728(CX), Y6
	VMOVDQU 1760(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 1792(CX), Y6
	VMOVDQU 1824(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 1856(CX), Y6
	VMOVDQU 1888(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 6 to 5 outputs
	VMOVDQU (R11)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 1920(CX), Y6
	VMOVDQU 1952(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 1984(CX), Y6
	VMOVDQU 2016(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 2048(CX), Y6
	VMOVDQU 2080(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 2112(CX), Y6
	VMOVDQU 2144(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 2176(CX), Y6
	VMOVDQU 2208(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 7 to 5 outputs
	VMOVDQU (R12)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 2240(CX), Y6
	VMOVDQU 2272(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 2304(CX), Y6
	VMOVDQU 2336(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 2368(CX), Y6
	VMOVDQU 2400(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 2432(CX), Y6
	VMOVDQU 2464(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 2496(CX), Y6
	VMOVDQU 2528(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 8 to 5 outputs
	VMOVDQU (R13)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 2560(CX), Y6
	VMOVDQU 2592(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 2624(CX), Y6
	VMOVDQU 2656(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 2688(CX), Y6
	VMOVDQU 2720(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 2752(CX), Y6
	VMOVDQU 2784(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 2816(CX), Y6
	VMOVDQU 2848(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Load and process 32 bytes from input 9 to 5 outputs
	VMOVDQU (BX)(R14*1), Y8
	VPSRLQ  $0x04, Y8, Y9
	VPAND   Y5, Y8, Y8
	VPAND   Y5, Y9, Y9
	VMOVDQU 2880(CX), Y6
	VMOVDQU 2912(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y0, Y0
	VMOVDQU 2944(CX), Y6
	VMOVDQU 2976(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y1, Y1
	VMOVDQU 3008(CX), Y6
	VMOVDQU 3040(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y2, Y2
	VMOVDQU 3072(CX), Y6
	VMOVDQU 3104(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y3, Y3
	VMOVDQU 3136(CX), Y6
	VMOVDQU 3168(CX), Y7
	VPSHUFB Y8, Y6, Y6
	VPSHUFB Y9, Y7, Y7
	VPXOR   Y6, Y7, Y6
	VPXOR   Y6, Y4, Y4

	// Store 5 outputs
	MOVQ    (DX), R15
	VMOVDQU Y0, (R15)(R14*1)
	MOVQ    24(DX), R15
	VMOVDQU Y1, (R15)(R14*1)
	MOVQ    48(DX), R15
	VMOVDQU Y2, (R15)(R14*1)
	MOVQ    72(DX), R15
	VMOVDQU Y3, (R15)(R14*1)
	MOVQ    96(DX), R15
	VMOVDQU Y4, (R15)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_10x5_loop
	VZEROUPPER

mulAvxTwo_10x5_end:
	RET

// func mulAvxTwo_10x6(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_10x6(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 131 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_10x6_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), R13
	MOVQ         216(BX), BX
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X6
	VPBROADCASTB X6, Y6
	MOVQ         start+72(FP), R14

mulAvxTwo_10x6_loop:
	// Clear 6 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5

	// Load and process 32 bytes from input 0 to 6 outputs
	VMOVDQU (BP)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU (CX), Y7
	VMOVDQU 32(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 64(CX), Y7
	VMOVDQU 96(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 128(CX), Y7
	VMOVDQU 160(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 192(CX), Y7
	VMOVDQU 224(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 256(CX), Y7
	VMOVDQU 288(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 320(CX), Y7
	VMOVDQU 352(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 1 to 6 outputs
	VMOVDQU (SI)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 384(CX), Y7
	VMOVDQU 416(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 448(CX), Y7
	VMOVDQU 480(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 512(CX), Y7
	VMOVDQU 544(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 576(CX), Y7
	VMOVDQU 608(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 640(CX), Y7
	VMOVDQU 672(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 704(CX), Y7
	VMOVDQU 736(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 2 to 6 outputs
	VMOVDQU (DI)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 768(CX), Y7
	VMOVDQU 800(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 832(CX), Y7
	VMOVDQU 864(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 896(CX), Y7
	VMOVDQU 928(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 960(CX), Y7
	VMOVDQU 992(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1024(CX), Y7
	VMOVDQU 1056(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1088(CX), Y7
	VMOVDQU 1120(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 3 to 6 outputs
	VMOVDQU (R8)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1152(CX), Y7
	VMOVDQU 1184(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1216(CX), Y7
	VMOVDQU 1248(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1280(CX), Y7
	VMOVDQU 1312(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1344(CX), Y7
	VMOVDQU 1376(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1408(CX), Y7
	VMOVDQU 1440(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1472(CX), Y7
	VMOVDQU 1504(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 4 to 6 outputs
	VMOVDQU (R9)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1536(CX), Y7
	VMOVDQU 1568(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1600(CX), Y7
	VMOVDQU 1632(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 1664(CX), Y7
	VMOVDQU 1696(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 1728(CX), Y7
	VMOVDQU 1760(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 1792(CX), Y7
	VMOVDQU 1824(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 1856(CX), Y7
	VMOVDQU 1888(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 5 to 6 outputs
	VMOVDQU (R10)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 1920(CX), Y7
	VMOVDQU 1952(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 1984(CX), Y7
	VMOVDQU 2016(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2048(CX), Y7
	VMOVDQU 2080(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2112(CX), Y7
	VMOVDQU 2144(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2176(CX), Y7
	VMOVDQU 2208(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 2240(CX), Y7
	VMOVDQU 2272(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 6 to 6 outputs
	VMOVDQU (R11)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 2304(CX), Y7
	VMOVDQU 2336(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 2368(CX), Y7
	VMOVDQU 2400(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2432(CX), Y7
	VMOVDQU 2464(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2496(CX), Y7
	VMOVDQU 2528(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2560(CX), Y7
	VMOVDQU 2592(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 2624(CX), Y7
	VMOVDQU 2656(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 7 to 6 outputs
	VMOVDQU (R12)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 2688(CX), Y7
	VMOVDQU 2720(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 2752(CX), Y7
	VMOVDQU 2784(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 2816(CX), Y7
	VMOVDQU 2848(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 2880(CX), Y7
	VMOVDQU 2912(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 2944(CX), Y7
	VMOVDQU 2976(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 3008(CX), Y7
	VMOVDQU 3040(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 8 to 6 outputs
	VMOVDQU (R13)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 3072(CX), Y7
	VMOVDQU 3104(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 3136(CX), Y7
	VMOVDQU 3168(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 3200(CX), Y7
	VMOVDQU 3232(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 3264(CX), Y7
	VMOVDQU 3296(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 3328(CX), Y7
	VMOVDQU 3360(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 3392(CX), Y7
	VMOVDQU 3424(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Load and process 32 bytes from input 9 to 6 outputs
	VMOVDQU (BX)(R14*1), Y9
	VPSRLQ  $0x04, Y9, Y10
	VPAND   Y6, Y9, Y9
	VPAND   Y6, Y10, Y10
	VMOVDQU 3456(CX), Y7
	VMOVDQU 3488(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y0, Y0
	VMOVDQU 3520(CX), Y7
	VMOVDQU 3552(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y1, Y1
	VMOVDQU 3584(CX), Y7
	VMOVDQU 3616(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y2, Y2
	VMOVDQU 3648(CX), Y7
	VMOVDQU 3680(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y3, Y3
	VMOVDQU 3712(CX), Y7
	VMOVDQU 3744(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y4, Y4
	VMOVDQU 3776(CX), Y7
	VMOVDQU 3808(CX), Y8
	VPSHUFB Y9, Y7, Y7
	VPSHUFB Y10, Y8, Y8
	VPXOR   Y7, Y8, Y7
	VPXOR   Y7, Y5, Y5

	// Store 6 outputs
	MOVQ    (DX), R15
	VMOVDQU Y0, (R15)(R14*1)
	MOVQ    24(DX), R15
	VMOVDQU Y1, (R15)(R14*1)
	MOVQ    48(DX), R15
	VMOVDQU Y2, (R15)(R14*1)
	MOVQ    72(DX), R15
	VMOVDQU Y3, (R15)(R14*1)
	MOVQ    96(DX), R15
	VMOVDQU Y4, (R15)(R14*1)
	MOVQ    120(DX), R15
	VMOVDQU Y5, (R15)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_10x6_loop
	VZEROUPPER

mulAvxTwo_10x6_end:
	RET

// func mulAvxTwo_10x7(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_10x7(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 152 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_10x7_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), R13
	MOVQ         216(BX), BX
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X7
	VPBROADCASTB X7, Y7
	MOVQ         start+72(FP), R14

mulAvxTwo_10x7_loop:
	// Clear 7 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6

	// Load and process 32 bytes from input 0 to 7 outputs
	VMOVDQU (BP)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU (CX), Y8
	VMOVDQU 32(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 64(CX), Y8
	VMOVDQU 96(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 128(CX), Y8
	VMOVDQU 160(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 192(CX), Y8
	VMOVDQU 224(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 256(CX), Y8
	VMOVDQU 288(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 320(CX), Y8
	VMOVDQU 352(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 384(CX), Y8
	VMOVDQU 416(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 1 to 7 outputs
	VMOVDQU (SI)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 448(CX), Y8
	VMOVDQU 480(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 512(CX), Y8
	VMOVDQU 544(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 576(CX), Y8
	VMOVDQU 608(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 640(CX), Y8
	VMOVDQU 672(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 704(CX), Y8
	VMOVDQU 736(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 768(CX), Y8
	VMOVDQU 800(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 832(CX), Y8
	VMOVDQU 864(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 2 to 7 outputs
	VMOVDQU (DI)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 896(CX), Y8
	VMOVDQU 928(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 960(CX), Y8
	VMOVDQU 992(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1024(CX), Y8
	VMOVDQU 1056(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1088(CX), Y8
	VMOVDQU 1120(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1152(CX), Y8
	VMOVDQU 1184(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1216(CX), Y8
	VMOVDQU 1248(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1280(CX), Y8
	VMOVDQU 1312(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 3 to 7 outputs
	VMOVDQU (R8)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1344(CX), Y8
	VMOVDQU 1376(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1408(CX), Y8
	VMOVDQU 1440(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1472(CX), Y8
	VMOVDQU 1504(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1536(CX), Y8
	VMOVDQU 1568(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 1600(CX), Y8
	VMOVDQU 1632(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 1664(CX), Y8
	VMOVDQU 1696(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 1728(CX), Y8
	VMOVDQU 1760(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 4 to 7 outputs
	VMOVDQU (R9)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 1792(CX), Y8
	VMOVDQU 1824(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 1856(CX), Y8
	VMOVDQU 1888(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 1920(CX), Y8
	VMOVDQU 1952(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 1984(CX), Y8
	VMOVDQU 2016(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2048(CX), Y8
	VMOVDQU 2080(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2112(CX), Y8
	VMOVDQU 2144(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2176(CX), Y8
	VMOVDQU 2208(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 5 to 7 outputs
	VMOVDQU (R10)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 2240(CX), Y8
	VMOVDQU 2272(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 2304(CX), Y8
	VMOVDQU 2336(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 2368(CX), Y8
	VMOVDQU 2400(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 2432(CX), Y8
	VMOVDQU 2464(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2496(CX), Y8
	VMOVDQU 2528(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 2560(CX), Y8
	VMOVDQU 2592(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 2624(CX), Y8
	VMOVDQU 2656(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 6 to 7 outputs
	VMOVDQU (R11)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 2688(CX), Y8
	VMOVDQU 2720(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 2752(CX), Y8
	VMOVDQU 2784(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 2816(CX), Y8
	VMOVDQU 2848(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 2880(CX), Y8
	VMOVDQU 2912(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 2944(CX), Y8
	VMOVDQU 2976(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 3008(CX), Y8
	VMOVDQU 3040(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 3072(CX), Y8
	VMOVDQU 3104(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 7 to 7 outputs
	VMOVDQU (R12)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 3136(CX), Y8
	VMOVDQU 3168(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 3200(CX), Y8
	VMOVDQU 3232(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 3264(CX), Y8
	VMOVDQU 3296(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 3328(CX), Y8
	VMOVDQU 3360(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 3392(CX), Y8
	VMOVDQU 3424(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 3456(CX), Y8
	VMOVDQU 3488(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 3520(CX), Y8
	VMOVDQU 3552(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 8 to 7 outputs
	VMOVDQU (R13)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 3584(CX), Y8
	VMOVDQU 3616(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 3648(CX), Y8
	VMOVDQU 3680(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 3712(CX), Y8
	VMOVDQU 3744(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 3776(CX), Y8
	VMOVDQU 3808(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 3840(CX), Y8
	VMOVDQU 3872(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 3904(CX), Y8
	VMOVDQU 3936(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 3968(CX), Y8
	VMOVDQU 4000(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Load and process 32 bytes from input 9 to 7 outputs
	VMOVDQU (BX)(R14*1), Y10
	VPSRLQ  $0x04, Y10, Y11
	VPAND   Y7, Y10, Y10
	VPAND   Y7, Y11, Y11
	VMOVDQU 4032(CX), Y8
	VMOVDQU 4064(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y0, Y0
	VMOVDQU 4096(CX), Y8
	VMOVDQU 4128(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y1, Y1
	VMOVDQU 4160(CX), Y8
	VMOVDQU 4192(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y2, Y2
	VMOVDQU 4224(CX), Y8
	VMOVDQU 4256(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y3, Y3
	VMOVDQU 4288(CX), Y8
	VMOVDQU 4320(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y4, Y4
	VMOVDQU 4352(CX), Y8
	VMOVDQU 4384(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y5, Y5
	VMOVDQU 4416(CX), Y8
	VMOVDQU 4448(CX), Y9
	VPSHUFB Y10, Y8, Y8
	VPSHUFB Y11, Y9, Y9
	VPXOR   Y8, Y9, Y8
	VPXOR   Y8, Y6, Y6

	// Store 7 outputs
	MOVQ    (DX), R15
	VMOVDQU Y0, (R15)(R14*1)
	MOVQ    24(DX), R15
	VMOVDQU Y1, (R15)(R14*1)
	MOVQ    48(DX), R15
	VMOVDQU Y2, (R15)(R14*1)
	MOVQ    72(DX), R15
	VMOVDQU Y3, (R15)(R14*1)
	MOVQ    96(DX), R15
	VMOVDQU Y4, (R15)(R14*1)
	MOVQ    120(DX), R15
	VMOVDQU Y5, (R15)(R14*1)
	MOVQ    144(DX), R15
	VMOVDQU Y6, (R15)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_10x7_loop
	VZEROUPPER

mulAvxTwo_10x7_end:
	RET

// func mulAvxTwo_10x8(matrix []byte, in [][]byte, out [][]byte, start int, n int)
// Requires: AVX, AVX2, SSE2
TEXT ·mulAvxTwo_10x8(SB), $0-88
	// Loading no tables to registers
	// Full registers estimated 173 YMM used
	MOVQ         n+80(FP), AX
	MOVQ         matrix_base+0(FP), CX
	SHRQ         $0x05, AX
	TESTQ        AX, AX
	JZ           mulAvxTwo_10x8_end
	MOVQ         out_base+48(FP), DX
	MOVQ         in_base+24(FP), BX
	MOVQ         (BX), BP
	MOVQ         24(BX), SI
	MOVQ         48(BX), DI
	MOVQ         72(BX), R8
	MOVQ         96(BX), R9
	MOVQ         120(BX), R10
	MOVQ         144(BX), R11
	MOVQ         168(BX), R12
	MOVQ         192(BX), R13
	MOVQ         216(BX), BX
	MOVQ         $0x0000000f, R14
	MOVQ         R14, X8
	VPBROADCASTB X8, Y8
	MOVQ         start+72(FP), R14

mulAvxTwo_10x8_loop:
	// Clear 8 outputs
	VPXOR Y0, Y0, Y0
	VPXOR Y1, Y1, Y1
	VPXOR Y2, Y2, Y2
	VPXOR Y3, Y3, Y3
	VPXOR Y4, Y4, Y4
	VPXOR Y5, Y5, Y5
	VPXOR Y6, Y6, Y6
	VPXOR Y7, Y7, Y7

	// Load and process 32 bytes from input 0 to 8 outputs
	VMOVDQU (BP)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU (CX), Y9
	VMOVDQU 32(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 64(CX), Y9
	VMOVDQU 96(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 128(CX), Y9
	VMOVDQU 160(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 192(CX), Y9
	VMOVDQU 224(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 256(CX), Y9
	VMOVDQU 288(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 320(CX), Y9
	VMOVDQU 352(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 384(CX), Y9
	VMOVDQU 416(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 448(CX), Y9
	VMOVDQU 480(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 1 to 8 outputs
	VMOVDQU (SI)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 512(CX), Y9
	VMOVDQU 544(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 576(CX), Y9
	VMOVDQU 608(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 640(CX), Y9
	VMOVDQU 672(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 704(CX), Y9
	VMOVDQU 736(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 768(CX), Y9
	VMOVDQU 800(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 832(CX), Y9
	VMOVDQU 864(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 896(CX), Y9
	VMOVDQU 928(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 960(CX), Y9
	VMOVDQU 992(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 2 to 8 outputs
	VMOVDQU (DI)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1024(CX), Y9
	VMOVDQU 1056(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1088(CX), Y9
	VMOVDQU 1120(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1152(CX), Y9
	VMOVDQU 1184(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1216(CX), Y9
	VMOVDQU 1248(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1280(CX), Y9
	VMOVDQU 1312(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1344(CX), Y9
	VMOVDQU 1376(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1408(CX), Y9
	VMOVDQU 1440(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1472(CX), Y9
	VMOVDQU 1504(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 3 to 8 outputs
	VMOVDQU (R8)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 1536(CX), Y9
	VMOVDQU 1568(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 1600(CX), Y9
	VMOVDQU 1632(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 1664(CX), Y9
	VMOVDQU 1696(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 1728(CX), Y9
	VMOVDQU 1760(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 1792(CX), Y9
	VMOVDQU 1824(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 1856(CX), Y9
	VMOVDQU 1888(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 1920(CX), Y9
	VMOVDQU 1952(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 1984(CX), Y9
	VMOVDQU 2016(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 4 to 8 outputs
	VMOVDQU (R9)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2048(CX), Y9
	VMOVDQU 2080(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2112(CX), Y9
	VMOVDQU 2144(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2176(CX), Y9
	VMOVDQU 2208(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2240(CX), Y9
	VMOVDQU 2272(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2304(CX), Y9
	VMOVDQU 2336(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2368(CX), Y9
	VMOVDQU 2400(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2432(CX), Y9
	VMOVDQU 2464(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 2496(CX), Y9
	VMOVDQU 2528(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 5 to 8 outputs
	VMOVDQU (R10)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 2560(CX), Y9
	VMOVDQU 2592(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 2624(CX), Y9
	VMOVDQU 2656(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 2688(CX), Y9
	VMOVDQU 2720(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 2752(CX), Y9
	VMOVDQU 2784(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 2816(CX), Y9
	VMOVDQU 2848(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 2880(CX), Y9
	VMOVDQU 2912(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 2944(CX), Y9
	VMOVDQU 2976(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 3008(CX), Y9
	VMOVDQU 3040(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 6 to 8 outputs
	VMOVDQU (R11)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 3072(CX), Y9
	VMOVDQU 3104(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 3136(CX), Y9
	VMOVDQU 3168(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 3200(CX), Y9
	VMOVDQU 3232(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 3264(CX), Y9
	VMOVDQU 3296(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 3328(CX), Y9
	VMOVDQU 3360(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 3392(CX), Y9
	VMOVDQU 3424(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 3456(CX), Y9
	VMOVDQU 3488(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 3520(CX), Y9
	VMOVDQU 3552(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 7 to 8 outputs
	VMOVDQU (R12)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 3584(CX), Y9
	VMOVDQU 3616(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 3648(CX), Y9
	VMOVDQU 3680(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 3712(CX), Y9
	VMOVDQU 3744(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 3776(CX), Y9
	VMOVDQU 3808(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 3840(CX), Y9
	VMOVDQU 3872(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 3904(CX), Y9
	VMOVDQU 3936(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 3968(CX), Y9
	VMOVDQU 4000(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 4032(CX), Y9
	VMOVDQU 4064(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 8 to 8 outputs
	VMOVDQU (R13)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 4096(CX), Y9
	VMOVDQU 4128(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 4160(CX), Y9
	VMOVDQU 4192(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 4224(CX), Y9
	VMOVDQU 4256(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 4288(CX), Y9
	VMOVDQU 4320(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 4352(CX), Y9
	VMOVDQU 4384(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 4416(CX), Y9
	VMOVDQU 4448(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 4480(CX), Y9
	VMOVDQU 4512(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 4544(CX), Y9
	VMOVDQU 4576(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Load and process 32 bytes from input 9 to 8 outputs
	VMOVDQU (BX)(R14*1), Y11
	VPSRLQ  $0x04, Y11, Y12
	VPAND   Y8, Y11, Y11
	VPAND   Y8, Y12, Y12
	VMOVDQU 4608(CX), Y9
	VMOVDQU 4640(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y0, Y0
	VMOVDQU 4672(CX), Y9
	VMOVDQU 4704(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y1, Y1
	VMOVDQU 4736(CX), Y9
	VMOVDQU 4768(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y2, Y2
	VMOVDQU 4800(CX), Y9
	VMOVDQU 4832(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y3, Y3
	VMOVDQU 4864(CX), Y9
	VMOVDQU 4896(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y4, Y4
	VMOVDQU 4928(CX), Y9
	VMOVDQU 4960(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y5, Y5
	VMOVDQU 4992(CX), Y9
	VMOVDQU 5024(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y6, Y6
	VMOVDQU 5056(CX), Y9
	VMOVDQU 5088(CX), Y10
	VPSHUFB Y11, Y9, Y9
	VPSHUFB Y12, Y10, Y10
	VPXOR   Y9, Y10, Y9
	VPXOR   Y9, Y7, Y7

	// Store 8 outputs
	MOVQ    (DX), R15
	VMOVDQU Y0, (R15)(R14*1)
	MOVQ    24(DX), R15
	VMOVDQU Y1, (R15)(R14*1)
	MOVQ    48(DX), R15
	VMOVDQU Y2, (R15)(R14*1)
	MOVQ    72(DX), R15
	VMOVDQU Y3, (R15)(R14*1)
	MOVQ    96(DX), R15
	VMOVDQU Y4, (R15)(R14*1)
	MOVQ    120(DX), R15
	VMOVDQU Y5, (R15)(R14*1)
	MOVQ    144(DX), R15
	VMOVDQU Y6, (R15)(R14*1)
	MOVQ    168(DX), R15
	VMOVDQU Y7, (R15)(R14*1)

	// Prepare for next loop
	ADDQ $0x20, R14
	DECQ AX
	JNZ  mulAvxTwo_10x8_loop
	VZEROUPPER

mulAvxTwo_10x8_end:
	RET




© 2015 - 2024 Weber Informatics LLC | Privacy Policy