Skip to content

Commit

Permalink
crypto: mips/chacha - wire up accelerated 32r2 code from Zinc
Browse files Browse the repository at this point in the history
This integrates the accelerated MIPS 32r2 implementation of ChaCha
into both the API and library interfaces of the kernel crypto stack.

The significance of this is that, in addition to becoming available
as an accelerated library implementation, it can also be used by
existing crypto API code such as Adiantum (for block encryption on
ultra low performance cores) or IPsec using chacha20poly1305. These
are use cases that have already opted into using the abstract crypto
API. In order to support Adiantum, the core assembler routine has
been adapted to take the round count as a function argument rather
than hardcoding it to 20.

Co-developed-by: René van Dorst <opensource@vdorst.com>
Signed-off-by: René van Dorst <opensource@vdorst.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  • Loading branch information
ardbiesheuvel authored and herbertx committed Nov 17, 2019
1 parent 49aa7c0 commit 3a2f58f
Show file tree
Hide file tree
Showing 5 changed files with 277 additions and 44 deletions.
2 changes: 1 addition & 1 deletion arch/mips/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/mips/math-emu/
# See arch/mips/Kbuild for content of core part of the kernel
core-y += arch/mips/

drivers-$(CONFIG_MIPS_CRC_SUPPORT) += arch/mips/crypto/
drivers-y += arch/mips/crypto/
drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/

# suspend and hibernation support
Expand Down
4 changes: 4 additions & 0 deletions arch/mips/crypto/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,7 @@
#

obj-$(CONFIG_CRYPTO_CRC32_MIPS) += crc32-mips.o

obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
chacha-mips-y := chacha-core.o chacha-glue.o
AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
159 changes: 116 additions & 43 deletions arch/mips/crypto/chacha-core.S
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@
#define CONCAT3(a,b,c) _CONCAT3(a,b,c)

#define STORE_UNALIGNED(x) \
CONCAT3(.Lchacha20_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
CONCAT3(.Lchacha_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
.if (x != 12); \
lw T0, (x*4)(STATE); \
.endif; \
Expand All @@ -142,7 +142,7 @@ CONCAT3(.Lchacha20_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
swr X ## x, (x*4)+LSB ## (OUT);

#define STORE_ALIGNED(x) \
CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
.if (x != 12); \
lw T0, (x*4)(STATE); \
.endif; \
Expand All @@ -162,9 +162,9 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
* Every jumptable entry must be equal in size.
*/
#define JMPTBL_ALIGNED(x) \
.Lchacha20_mips_jmptbl_aligned_ ## x: ; \
.Lchacha_mips_jmptbl_aligned_ ## x: ; \
.set noreorder; \
b .Lchacha20_mips_xor_aligned_ ## x ## _b; \
b .Lchacha_mips_xor_aligned_ ## x ## _b; \
.if (x == 12); \
addu SAVED_X, X ## x, NONCE_0; \
.else; \
Expand All @@ -173,9 +173,9 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
.set reorder

#define JMPTBL_UNALIGNED(x) \
.Lchacha20_mips_jmptbl_unaligned_ ## x: ; \
.Lchacha_mips_jmptbl_unaligned_ ## x: ; \
.set noreorder; \
b .Lchacha20_mips_xor_unaligned_ ## x ## _b; \
b .Lchacha_mips_xor_unaligned_ ## x ## _b; \
.if (x == 12); \
addu SAVED_X, X ## x, NONCE_0; \
.else; \
Expand All @@ -200,15 +200,18 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
.text
.set reorder
.set noat
.globl chacha20_mips
.ent chacha20_mips
chacha20_mips:
.globl chacha_crypt_arch
.ent chacha_crypt_arch
chacha_crypt_arch:
.frame $sp, STACK_SIZE, $ra

/* Load number of rounds */
lw $at, 16($sp)

addiu $sp, -STACK_SIZE

/* Return bytes = 0. */
beqz BYTES, .Lchacha20_mips_end
beqz BYTES, .Lchacha_mips_end

lw NONCE_0, 48(STATE)

Expand All @@ -228,18 +231,15 @@ chacha20_mips:
or IS_UNALIGNED, IN, OUT
andi IS_UNALIGNED, 0x3

/* Set number of rounds */
li $at, 20

b .Lchacha20_rounds_start
b .Lchacha_rounds_start

.align 4
.Loop_chacha20_rounds:
.Loop_chacha_rounds:
addiu IN, CHACHA20_BLOCK_SIZE
addiu OUT, CHACHA20_BLOCK_SIZE
addiu NONCE_0, 1

.Lchacha20_rounds_start:
.Lchacha_rounds_start:
lw X0, 0(STATE)
lw X1, 4(STATE)
lw X2, 8(STATE)
Expand All @@ -259,7 +259,7 @@ chacha20_mips:
lw X14, 56(STATE)
lw X15, 60(STATE)

.Loop_chacha20_xor_rounds:
.Loop_chacha_xor_rounds:
addiu $at, -2
AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
Expand All @@ -269,31 +269,31 @@ chacha20_mips:
AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
bnez $at, .Loop_chacha20_xor_rounds
bnez $at, .Loop_chacha_xor_rounds

addiu BYTES, -(CHACHA20_BLOCK_SIZE)

/* Is data src/dst unaligned? Jump */
bnez IS_UNALIGNED, .Loop_chacha20_unaligned
bnez IS_UNALIGNED, .Loop_chacha_unaligned

/* Set number rounds here to fill delayslot. */
li $at, 20
lw $at, (STACK_SIZE+16)($sp)

/* BYTES < 0, it has no full block. */
bltz BYTES, .Lchacha20_mips_no_full_block_aligned
bltz BYTES, .Lchacha_mips_no_full_block_aligned

FOR_EACH_WORD_REV(STORE_ALIGNED)

/* BYTES > 0? Loop again. */
bgtz BYTES, .Loop_chacha20_rounds
bgtz BYTES, .Loop_chacha_rounds

/* Place this here to fill delay slot */
addiu NONCE_0, 1

/* BYTES < 0? Handle last bytes */
bltz BYTES, .Lchacha20_mips_xor_bytes
bltz BYTES, .Lchacha_mips_xor_bytes

.Lchacha20_mips_xor_done:
.Lchacha_mips_xor_done:
/* Restore used registers */
lw $s0, 0($sp)
lw $s1, 4($sp)
Expand All @@ -307,19 +307,19 @@ chacha20_mips:
/* Write NONCE_0 back to right location in state */
sw NONCE_0, 48(STATE)

.Lchacha20_mips_end:
.Lchacha_mips_end:
addiu $sp, STACK_SIZE
jr $ra

.Lchacha20_mips_no_full_block_aligned:
.Lchacha_mips_no_full_block_aligned:
/* Restore the offset on BYTES */
addiu BYTES, CHACHA20_BLOCK_SIZE

/* Get number of full WORDS */
andi $at, BYTES, MASK_U32

/* Load upper half of jump table addr */
lui T0, %hi(.Lchacha20_mips_jmptbl_aligned_0)
lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0)

/* Calculate lower half jump table offset */
ins T0, $at, 1, 6
Expand All @@ -328,7 +328,7 @@ chacha20_mips:
addu T1, STATE, $at

/* Add lower half jump table addr */
addiu T0, %lo(.Lchacha20_mips_jmptbl_aligned_0)
addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0)

/* Read value from STATE */
lw SAVED_CA, 0(T1)
Expand All @@ -342,31 +342,31 @@ chacha20_mips:
FOR_EACH_WORD(JMPTBL_ALIGNED)


.Loop_chacha20_unaligned:
.Loop_chacha_unaligned:
/* Set number rounds here to fill delayslot. */
li $at, 20
lw $at, (STACK_SIZE+16)($sp)

/* BYTES > 0, it has no full block. */
bltz BYTES, .Lchacha20_mips_no_full_block_unaligned
bltz BYTES, .Lchacha_mips_no_full_block_unaligned

FOR_EACH_WORD_REV(STORE_UNALIGNED)

/* BYTES > 0? Loop again. */
bgtz BYTES, .Loop_chacha20_rounds
bgtz BYTES, .Loop_chacha_rounds

/* Write NONCE_0 back to right location in state */
sw NONCE_0, 48(STATE)

.set noreorder
/* Fall through to byte handling */
bgez BYTES, .Lchacha20_mips_xor_done
.Lchacha20_mips_xor_unaligned_0_b:
.Lchacha20_mips_xor_aligned_0_b:
bgez BYTES, .Lchacha_mips_xor_done
.Lchacha_mips_xor_unaligned_0_b:
.Lchacha_mips_xor_aligned_0_b:
/* Place this here to fill delay slot */
addiu NONCE_0, 1
.set reorder

.Lchacha20_mips_xor_bytes:
.Lchacha_mips_xor_bytes:
addu IN, $at
addu OUT, $at
/* First byte */
Expand All @@ -376,30 +376,30 @@ chacha20_mips:
ROTR(SAVED_X)
xor T1, SAVED_X
sb T1, 0(OUT)
beqz $at, .Lchacha20_mips_xor_done
beqz $at, .Lchacha_mips_xor_done
/* Second byte */
lbu T1, 1(IN)
addiu $at, BYTES, 2
ROTx SAVED_X, 8
xor T1, SAVED_X
sb T1, 1(OUT)
beqz $at, .Lchacha20_mips_xor_done
beqz $at, .Lchacha_mips_xor_done
/* Third byte */
lbu T1, 2(IN)
ROTx SAVED_X, 8
xor T1, SAVED_X
sb T1, 2(OUT)
b .Lchacha20_mips_xor_done
b .Lchacha_mips_xor_done

.Lchacha20_mips_no_full_block_unaligned:
.Lchacha_mips_no_full_block_unaligned:
/* Restore the offset on BYTES */
addiu BYTES, CHACHA20_BLOCK_SIZE

/* Get number of full WORDS */
andi $at, BYTES, MASK_U32

/* Load upper half of jump table addr */
lui T0, %hi(.Lchacha20_mips_jmptbl_unaligned_0)
lui T0, %hi(.Lchacha_mips_jmptbl_unaligned_0)

/* Calculate lower half jump table offset */
ins T0, $at, 1, 6
Expand All @@ -408,7 +408,7 @@ chacha20_mips:
addu T1, STATE, $at

/* Add lower half jump table addr */
addiu T0, %lo(.Lchacha20_mips_jmptbl_unaligned_0)
addiu T0, %lo(.Lchacha_mips_jmptbl_unaligned_0)

/* Read value from STATE */
lw SAVED_CA, 0(T1)
Expand All @@ -420,5 +420,78 @@ chacha20_mips:

/* Jump table */
FOR_EACH_WORD(JMPTBL_UNALIGNED)
.end chacha20_mips
.end chacha_crypt_arch
.set at

/* Input arguments
* STATE $a0
* OUT $a1
* NROUND $a2
*/

#undef X12
#undef X13
#undef X14
#undef X15

#define X12 $a3
#define X13 $at
#define X14 $v0
#define X15 STATE

.set noat
.globl hchacha_block_arch
.ent hchacha_block_arch
hchacha_block_arch:
.frame $sp, STACK_SIZE, $ra

addiu $sp, -STACK_SIZE

/* Save X11(s6) */
sw X11, 0($sp)

lw X0, 0(STATE)
lw X1, 4(STATE)
lw X2, 8(STATE)
lw X3, 12(STATE)
lw X4, 16(STATE)
lw X5, 20(STATE)
lw X6, 24(STATE)
lw X7, 28(STATE)
lw X8, 32(STATE)
lw X9, 36(STATE)
lw X10, 40(STATE)
lw X11, 44(STATE)
lw X12, 48(STATE)
lw X13, 52(STATE)
lw X14, 56(STATE)
lw X15, 60(STATE)

.Loop_hchacha_xor_rounds:
addiu $a2, -2
AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
bnez $a2, .Loop_hchacha_xor_rounds

/* Restore used register */
lw X11, 0($sp)

sw X0, 0(OUT)
sw X1, 4(OUT)
sw X2, 8(OUT)
sw X3, 12(OUT)
sw X12, 16(OUT)
sw X13, 20(OUT)
sw X14, 24(OUT)
sw X15, 28(OUT)

addiu $sp, STACK_SIZE
jr $ra
.end hchacha_block_arch
.set at
Loading

0 comments on commit 3a2f58f

Please sign in to comment.