From 13dc15a3f5fd7f884e4bfa8c011a0ae868df12ae Mon Sep 17 00:00:00 2001 From: John Allen Date: Wed, 18 May 2022 15:31:26 +0000 Subject: [PATCH 01/89] crypto: ccp - Use kzalloc for sev ioctl interfaces to prevent kernel memory leak For some sev ioctl interfaces, input may be passed that is less than or equal to SEV_FW_BLOB_MAX_SIZE, but larger than the data that PSP firmware returns. In this case, kmalloc will allocate memory that is the size of the input rather than the size of the data. Since PSP firmware doesn't fully overwrite the buffer, the sev ioctl interfaces with the issue may return uninitialized slab memory. Currently, all of the ioctl interfaces in the ccp driver are safe, but to prevent future problems, change all ioctl interfaces that allocate memory with kmalloc to use kzalloc and memset the data buffer to zero in sev_ioctl_do_platform_status. Fixes: 38103671aad3 ("crypto: ccp: Use the stack and common buffer for status commands") Fixes: e799035609e15 ("crypto: ccp: Implement SEV_PEK_CSR ioctl command") Fixes: 76a2b524a4b1d ("crypto: ccp: Implement SEV_PDH_CERT_EXPORT ioctl command") Fixes: d6112ea0cb344 ("crypto: ccp - introduce SEV_GET_ID2 command") Cc: stable@vger.kernel.org Reported-by: Andy Nguyen Suggested-by: David Rientjes Suggested-by: Peter Gonda Signed-off-by: John Allen Reviewed-by: Peter Gonda Acked-by: David Rientjes Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 799b476fc3e82..0c92d940ac4ef 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -577,6 +577,8 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) struct sev_user_data_status data; int ret; + memset(&data, 0, sizeof(data)); + ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); if (ret) return ret; @@ -630,7 +632,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) if (input.length > SEV_FW_BLOB_MAX_SIZE) return -EFAULT; - blob = kmalloc(input.length, GFP_KERNEL); + blob = kzalloc(input.length, GFP_KERNEL); if (!blob) return -ENOMEM; @@ -854,7 +856,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) input_address = (void __user *)input.address; if (input.address && input.length) { - id_blob = kmalloc(input.length, GFP_KERNEL); + id_blob = kzalloc(input.length, GFP_KERNEL); if (!id_blob) return -ENOMEM; @@ -973,14 +975,14 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) return -EFAULT; - pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL); + pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL); if (!pdh_blob) return -ENOMEM; data.pdh_cert_address = __psp_pa(pdh_blob); data.pdh_cert_len = input.pdh_cert_len; - cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL); + cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL); if (!cert_blob) { ret = -ENOMEM; goto e_free_pdh; From d2765e1b9ac4b2d5a5d5bf17f468c9b3566c3770 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 18 May 2022 20:33:44 +0300 Subject: [PATCH 02/89] crypto: sun8i-ss - fix error codes in allocate_flows() These failure paths should return -ENOMEM. Currently they return success. Fixes: 359e893e8af4 ("crypto: sun8i-ss - rework handling of IV") Fixes: 8eec4563f152 ("crypto: sun8i-ss - do not allocate memory when handling hash requests") Signed-off-by: Dan Carpenter Acked-by: Corentin Labbe Tested-by: Corentin Labbe Signed-off-by: Herbert Xu --- .../crypto/allwinner/sun8i-ss/sun8i-ss-core.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 98593a0cff694..ac2329e2b0e58 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -528,25 +528,33 @@ static int allocate_flows(struct sun8i_ss_dev *ss) ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); - if (!ss->flows[i].biv) + if (!ss->flows[i].biv) { + err = -ENOMEM; goto error_engine; + } for (j = 0; j < MAX_SG; j++) { ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); - if (!ss->flows[i].iv[j]) + if (!ss->flows[i].iv[j]) { + err = -ENOMEM; goto error_engine; + } } /* the padding could be up to two block. */ ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE, GFP_KERNEL | GFP_DMA); - if (!ss->flows[i].pad) + if (!ss->flows[i].pad) { + err = -ENOMEM; goto error_engine; + } ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE, GFP_KERNEL | GFP_DMA); - if (!ss->flows[i].result) + if (!ss->flows[i].result) { + err = -ENOMEM; goto error_engine; + } ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); if (!ss->flows[i].engine) { From 6cb3f9b25c55928b95a02b9ed8e87ed653b3cce8 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 18 May 2022 20:33:54 +0300 Subject: [PATCH 03/89] crypto: sun8i-ss - Fix error codes for dma_mapping_error() If there is a dma_mapping_error() then return negative error codes. Currently this code returns success. Fixes: 801b7d572c0a ("crypto: sun8i-ss - add hmac(sha1)") Signed-off-by: Dan Carpenter Acked-by: Corentin Labbe Tested-by: Corentin Labbe Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index ac417a6b39e5f..845019bd95911 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -586,7 +586,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) rctx->t_dst[k + 1].len = rctx->t_dst[k].len; } addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE); - if (dma_mapping_error(ss->dev, addr_xpad)) { + err = dma_mapping_error(ss->dev, addr_xpad); + if (err) { dev_err(ss->dev, "Fail to create DMA mapping of ipad\n"); goto err_dma_xpad; } @@ -612,7 +613,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) goto err_dma_result; } addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE); - if (dma_mapping_error(ss->dev, addr_xpad)) { + err = dma_mapping_error(ss->dev, addr_xpad); + if (err) { dev_err(ss->dev, "Fail to create DMA mapping of opad\n"); goto err_dma_xpad; } From 7df7563b16aa0281cb811785e4bb3681b46e2a28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 20 May 2022 19:21:00 +0200 Subject: [PATCH 04/89] crypto: atmel-ecc - Remove duplicated error reporting in .remove() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Returning an error value in an i2c remove callback results in an error message being emitted by the i2c core, but otherwise it doesn't make a difference. The device goes away anyhow and the devm cleanups are called. As atmel_ecc_remove() already emits an error message on failure and the additional error message by the i2c core doesn't add any useful information, change the return value to zero to suppress this message. Also make the error message a bit more drastical because when the device is still busy on remove, it's likely that it will access freed memory soon. This patch is a preparation for making i2c remove callbacks return void. Signed-off-by: Uwe Kleine-König Reviewed-by: Tudor Ambarus Signed-off-by: Herbert Xu --- drivers/crypto/atmel-ecc.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index 59a57279e77bf..a4b13d326cfc6 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -349,8 +349,16 @@ static int atmel_ecc_remove(struct i2c_client *client) /* Return EBUSY if i2c client already allocated. */ if (atomic_read(&i2c_priv->tfm_count)) { - dev_err(&client->dev, "Device is busy\n"); - return -EBUSY; + /* + * After we return here, the memory backing the device is freed. + * That happens no matter what the return value of this function + * is because in the Linux device model there is no error + * handling for unbinding a driver. + * If there is still some action pending, it probably involves + * accessing the freed memory. + */ + dev_emerg(&client->dev, "Device is busy, expect memory corruption.\n"); + return 0; } crypto_unregister_kpp(&atmel_ecdh_nist_p256); From 17fee07a2ac577da18b44dd658a9c3c864281c49 Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 20 May 2022 18:14:53 +0000 Subject: [PATCH 05/89] crypto: xctr - Add XCTR support Add a generic implementation of XCTR mode as a template. XCTR is a blockcipher mode similar to CTR mode. XCTR uses XORs and little-endian addition rather than big-endian arithmetic which has two advantages: It is slightly faster on little-endian CPUs and it is less likely to be implemented incorrect since integer overflows are not possible on practical input sizes. XCTR is used as a component to implement HCTR2. More information on XCTR mode can be found in the HCTR2 paper: https://eprint.iacr.org/2021/1441.pdf Signed-off-by: Nathan Huckleberry Reviewed-by: Eric Biggers Reviewed-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 + crypto/Makefile | 1 + crypto/tcrypt.c | 1 + crypto/testmgr.c | 6 + crypto/testmgr.h | 693 +++++++++++++++++++++++++++++++++++++++++++++++ crypto/xctr.c | 191 +++++++++++++ 6 files changed, 901 insertions(+) create mode 100644 crypto/xctr.c diff --git a/crypto/Kconfig b/crypto/Kconfig index 19197469cfab3..b9e4d511bf5ad 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -460,6 +460,15 @@ config CRYPTO_PCBC PCBC: Propagating Cipher Block Chaining mode This block cipher algorithm is required for RxRPC. +config CRYPTO_XCTR + tristate + select CRYPTO_SKCIPHER + select CRYPTO_MANAGER + help + XCTR: XOR Counter mode. This blockcipher mode is a variant of CTR mode + using XORs and little-endian addition rather than big-endian arithmetic. + XCTR mode is used to implement HCTR2. + config CRYPTO_XTS tristate "XTS support" select CRYPTO_SKCIPHER diff --git a/crypto/Makefile b/crypto/Makefile index 43bc33e247d19..93d0afeb3a77f 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -94,6 +94,7 @@ obj-$(CONFIG_CRYPTO_CTS) += cts.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o +obj-$(CONFIG_CRYPTO_XCTR) += xctr.o obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 2bacf8384f59f..fd671d0e2012a 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1556,6 +1556,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("rfc3686(ctr(aes))"); ret += tcrypt_test("ofb(aes)"); ret += tcrypt_test("cfb(aes)"); + ret += tcrypt_test("xctr(aes)"); break; case 11: diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5801a8f9f7134..0ea77dcdc6c0d 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5548,6 +5548,12 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(xchacha20_tv_template) }, + }, { + .alg = "xctr(aes)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aes_xctr_tv_template) + } }, { .alg = "xts(aes)", .generic_driver = "xts(ecb(aes-generic))", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 4d7449fc6a655..7179df0a39b65 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -34251,4 +34251,697 @@ static const struct hash_testvec blakes2s_256_tv_template[] = {{ 0xd5, 0x06, 0xb5, 0x3a, 0x7c, 0x7a, 0x65, 0x1d, }, }}; +/* + * Test vectors generated using https://github.com/google/hctr2 + */ +static const struct cipher_testvec aes_xctr_tv_template[] = { + { + .key = "\x9c\x8d\xc4\xbd\x71\x36\xdc\x82" + "\x7c\xa1\xca\xa3\x23\x5a\xdb\xa4", + .iv = "\x8d\xe7\xa5\x6a\x95\x86\x42\xde" + "\xba\xea\x6e\x69\x03\x33\x86\x0f", + .ptext = "\xbd", + .ctext = "\xb9", + .klen = 16, + .len = 1, + }, + { + .key = "\xbc\x1b\x12\x0c\x3f\x18\xcc\x1f" + "\x5a\x1d\xab\x81\xa8\x68\x7c\x63", + .iv = "\x22\xc1\xdd\x25\x0b\x18\xcb\xa5" + "\x4a\xda\x15\x07\x73\xd9\x88\x10", + .ptext = "\x24\x6e\x64\xc6\x15\x26\x9c\xda" + "\x2a\x4b\x57\x12\xff\x7c\xd6\xb5", + .ctext = "\xd6\x47\x8d\x58\x92\xb2\x84\xf9" + "\xb7\xee\x0d\x98\xa1\x39\x4d\x8f", + .klen = 16, + .len = 16, + }, + { + .key = "\x44\x03\xbf\x4c\x30\xf0\xa7\xd6" + "\xbd\x54\xbb\x66\x8e\xa6\x0e\x8a", + .iv = "\xe6\xf7\x26\xdf\x8c\x3c\xaa\x88" + "\xce\xc1\xbd\x43\x3b\x09\x62\xad", + .ptext = "\x3c\xe3\x46\xb9\x8f\x9d\x3f\x8d" + "\xef\xf2\x53\xab\x24\xe2\x29\x08" + "\xf8\x7e\x1d\xa6\x6d\x86\x7d\x60" + "\x97\x63\x93\x29\x71\x94\xb4", + .ctext = "\xd4\xa3\xc6\xb8\xc1\x6f\x70\x1a" + "\x52\x0c\xed\x4c\xaf\x51\x56\x23" + "\x48\x45\x07\x10\x34\xc5\xba\x71" + "\xe5\xf8\x1e\xd8\xcb\xa6\xe7", + .klen = 16, + .len = 31, + }, + { + .key = "\x5b\x17\x30\x94\x19\x31\xa1\xae" + "\x24\x8e\x42\x1e\x82\xe6\xec\xb8", + .iv = "\xd1\x2e\xb9\xb8\xf8\x49\xeb\x68" + "\x06\xeb\x65\x33\x34\xa2\xeb\xf0", + .ptext = "\x19\x75\xec\x59\x60\x1b\x7a\x3e" + "\x62\x46\x87\xf0\xde\xab\x81\x36" + "\x63\x53\x11\xa0\x1f\xce\x25\x85" + "\x49\x6b\x28\xfa\x1c\x92\xe5\x18" + "\x38\x14\x00\x79\xf2\x9e\xeb\xfc" + "\x36\xa7\x6b\xe1\xe5\xcf\x04\x48" + "\x44\x6d\xbd\x64\xb3\xcb\x78\x05" + "\x8d\x7f\x9a\xaf\x3c\xcf\x6c\x45" + "\x6c\x7c\x46\x4c\xa8\xc0\x1e\xe4" + "\x33\xa5\x7b\xbb\x26\xd9\xc0\x32" + "\x9d\x8a\xb3\xf3\x3d\x52\xe6\x48" + "\x4c\x9b\x4c\x6e\xa4\xa3\xad\x66" + "\x56\x48\xd5\x98\x3a\x93\xc4\x85" + "\xe9\x89\xca\xa6\xc1\xc8\xe7\xf8" + "\xc3\xe9\xef\xbe\x77\xe6\xd1\x3a" + "\xa6\x99\xc8\x2d\xdf\x40\x0f\x44", + .ctext = "\xc6\x1a\x01\x1a\x00\xba\x04\xff" + "\x10\xd1\x7e\x5d\xad\x91\xde\x8c" + "\x08\x55\x95\xae\xd7\x22\x77\x40" + "\xf0\x33\x1b\x51\xef\xfe\x3d\x67" + "\xdf\xc4\x9f\x39\x47\x67\x93\xab" + "\xaa\x37\x55\xfe\x41\xe0\xba\xcd" + "\x25\x02\x7c\x61\x51\xa1\xcc\x72" + "\x7a\x20\x26\xb9\x06\x68\xbd\x19" + "\xc5\x2e\x1b\x75\x4a\x40\xb2\xd2" + "\xc4\xee\xd8\x5b\xa4\x55\x7d\x25" + "\xfc\x01\x4d\x6f\x0a\xfd\x37\x5d" + "\x3e\x67\xc0\x35\x72\x53\x7b\xe2" + "\xd6\x19\x5b\x92\x6c\x3a\x8c\x2a" + "\xe2\xc2\xa2\x4f\x2a\xf2\xb5\x15" + "\x65\xc5\x8d\x97\xf9\xbf\x8c\x98" + "\xe4\x50\x1a\xf2\x76\x55\x07\x49", + .klen = 16, + .len = 128, + }, + { + .key = "\x17\xa6\x01\x3d\x5d\xd6\xef\x2d" + "\x69\x8f\x4c\x54\x5b\xae\x43\xf0", + .iv = "\xa9\x1b\x47\x60\x26\x82\xf7\x1c" + "\x80\xf8\x88\xdd\xfb\x44\xd9\xda", + .ptext = "\xf7\x67\xcd\xa6\x04\x65\x53\x99" + "\x90\x5c\xa2\x56\x74\xd7\x9d\xf2" + "\x0b\x03\x7f\x4e\xa7\x84\x72\x2b" + "\xf0\xa5\xbf\xe6\x9a\x62\x3a\xfe" + "\x69\x5c\x93\x79\x23\x86\x64\x85" + "\xeb\x13\xb1\x5a\xd5\x48\x39\xa0" + "\x70\xfb\x06\x9a\xd7\x12\x5a\xb9" + "\xbe\xed\x2c\x81\x64\xf7\xcf\x80" + "\xee\xe6\x28\x32\x2d\x37\x4c\x32" + "\xf4\x1f\x23\x21\xe9\xc8\xc9\xbf" + "\x54\xbc\xcf\xb4\xc2\x65\x39\xdf" + "\xa5\xfb\x14\x11\xed\x62\x38\xcf" + "\x9b\x58\x11\xdd\xe9\xbd\x37\x57" + "\x75\x4c\x9e\xd5\x67\x0a\x48\xc6" + "\x0d\x05\x4e\xb1\x06\xd7\xec\x2e" + "\x9e\x59\xde\x4f\xab\x38\xbb\xe5" + "\x87\x04\x5a\x2c\x2a\xa2\x8f\x3c" + "\xe7\xe1\x46\xa9\x49\x9f\x24\xad" + "\x2d\xb0\x55\x40\x64\xd5\xda\x7e" + "\x1e\x77\xb8\x29\x72\x73\xc3\x84" + "\xcd\xf3\x94\x90\x58\x76\xc9\x2c" + "\x2a\xad\x56\xde\x33\x18\xb6\x3b" + "\x10\xe9\xe9\x8d\xf0\xa9\x7f\x05" + "\xf7\xb5\x8c\x13\x7e\x11\x3d\x1e" + "\x02\xbb\x5b\xea\x69\xff\x85\xcf" + "\x6a\x18\x97\x45\xe3\x96\xba\x4d" + "\x2d\x7a\x70\x78\x15\x2c\xe9\xdc" + "\x4e\x09\x92\x57\x04\xd8\x0b\xa6" + "\x20\x71\x76\x47\x76\x96\x89\xa0" + "\xd9\x29\xa2\x5a\x06\xdb\x56\x39" + "\x60\x33\x59\x04\x95\x89\xf6\x18" + "\x1d\x70\x75\x85\x3a\xb7\x6e", + .ctext = "\xe1\xe7\x3f\xd3\x6a\xb9\x2f\x64" + "\x37\xc5\xa4\xe9\xca\x0a\xa1\xd6" + "\xea\x7d\x39\xe5\xe6\xcc\x80\x54" + "\x74\x31\x2a\x04\x33\x79\x8c\x8e" + "\x4d\x47\x84\x28\x27\x9b\x3c\x58" + "\x54\x58\x20\x4f\x70\x01\x52\x5b" + "\xac\x95\x61\x49\x5f\xef\xba\xce" + "\xd7\x74\x56\xe7\xbb\xe0\x3c\xd0" + "\x7f\xa9\x23\x57\x33\x2a\xf6\xcb" + "\xbe\x42\x14\x95\xa8\xf9\x7a\x7e" + "\x12\x53\x3a\xe2\x13\xfe\x2d\x89" + "\xeb\xac\xd7\xa8\xa5\xf8\x27\xf3" + "\x74\x9a\x65\x63\xd1\x98\x3a\x7e" + "\x27\x7b\xc0\x20\x00\x4d\xf4\xe5" + "\x7b\x69\xa6\xa8\x06\x50\x85\xb6" + "\x7f\xac\x7f\xda\x1f\xf5\x37\x56" + "\x9b\x2f\xd3\x86\x6b\x70\xbd\x0e" + "\x55\x9a\x9d\x4b\x08\xb5\x5b\x7b" + "\xd4\x7c\xb4\x71\x49\x92\x4a\x1e" + "\xed\x6d\x11\x09\x47\x72\x32\x6a" + "\x97\x53\x36\xaf\xf3\x06\x06\x2c" + "\x69\xf1\x59\x00\x36\x95\x28\x2a" + "\xb6\xcd\x10\x21\x84\x73\x5c\x96" + "\x86\x14\x2c\x3d\x02\xdb\x53\x9a" + "\x61\xde\xea\x99\x84\x7a\x27\xf6" + "\xf7\xc8\x49\x73\x4b\xb8\xeb\xd3" + "\x41\x33\xdd\x09\x68\xe2\x64\xb8" + "\x5f\x75\x74\x97\x91\x54\xda\xc2" + "\x73\x2c\x1e\x5a\x84\x48\x01\x1a" + "\x0d\x8b\x0a\xdf\x07\x2e\xee\x77" + "\x1d\x17\x41\x7a\xc9\x33\x63\xfa" + "\x9f\xc3\x74\x57\x5f\x03\x4c", + .klen = 16, + .len = 255, + }, + { + .key = "\xe5\xf1\x48\x2e\x88\xdb\xc7\x28" + "\xa2\x55\x5d\x2f\x90\x02\xdc\xd3" + "\xf5\xd3\x9e\x87\xd5\x58\x30\x4a", + .iv = "\xa6\x40\x39\xf9\x63\x6c\x2d\xd4" + "\x1b\x71\x05\xa4\x88\x86\x11\xd3", + .ptext = "\xb6\x06\xae\x15\x11\x96\xc1\x44" + "\x44\xc2\x98\xf9\xa8\x0a\x0b", + .ctext = "\x27\x3b\x68\x40\xa9\x5e\x74\x6b" + "\x74\x67\x18\xf9\x37\xed\xed", + .klen = 24, + .len = 15, + }, + { + .key = "\xc8\xa0\x27\x67\x04\x3f\xed\xa5" + "\xb4\x0c\x51\x91\x2d\x27\x77\x33" + "\xa5\xfc\x2a\x9f\x78\xd8\x1c\x68", + .iv = "\x83\x99\x1a\xe2\x84\xca\xa9\x16" + "\x8d\xc4\x2d\x1b\x67\xc8\x86\x21", + .ptext = "\xd6\x22\x85\xb8\x5d\x7e\x26\x2e" + "\xbe\x04\x9d\x0c\x03\x91\x45\x4a" + "\x36", + .ctext = "\x0f\x44\xa9\x62\x72\xec\x12\x26" + "\x3a\xc6\x83\x26\x62\x5e\xb7\x13" + "\x05", + .klen = 24, + .len = 17, + }, + { + .key = "\xc5\x87\x18\x09\x0a\x4e\x66\x3e" + "\x50\x90\x19\x93\xc0\x33\xcf\x80" + "\x3a\x36\x6b\x6c\x43\xd7\xe4\x93", + .iv = "\xdd\x0b\x75\x1f\xee\x2f\xb4\x52" + "\x10\x82\x1f\x79\x8a\xa4\x9b\x87", + .ptext = "\x56\xf9\x13\xce\x9f\x30\x10\x11" + "\x1b\x59\xfd\x39\x5a\x29\xa3\x44" + "\x78\x97\x8c\xf6\x99\x6d\x26\xf1" + "\x32\x60\x6a\xeb\x04\x47\x29\x4c" + "\x7e\x14\xef\x4d\x55\x29\xfe\x36" + "\x37\xcf\x0b\x6e\xf3\xce\x15\xd2", + .ctext = "\x8f\x98\xe1\x5a\x7f\xfe\xc7\x05" + "\x76\xb0\xd5\xde\x90\x52\x2b\xa8" + "\xf3\x6e\x3c\x77\xa5\x33\x63\xdd" + "\x6f\x62\x12\xb0\x80\x10\xc1\x28" + "\x58\xe5\xd6\x24\x44\x04\x55\xf3" + "\x6d\x94\xcb\x2c\x7e\x7a\x85\x79", + .klen = 24, + .len = 48, + }, + { + .key = "\x84\x9b\xe8\x10\x4c\xb3\xd1\x7a" + "\xb3\xab\x4e\x6f\x90\x12\x07\xf8" + "\xef\xde\x42\x09\xbf\x34\x95\xb2", + .iv = "\x66\x62\xf9\x48\x9d\x17\xf7\xdf" + "\x06\x67\xf4\x6d\xf2\xbc\xa2\xe5", + .ptext = "\x2f\xd6\x16\x6b\xf9\x4b\x44\x14" + "\x90\x93\xe5\xfd\x05\xaa\x00\x26" + "\xbd\xab\x11\xb8\xf0\xcb\x11\x72" + "\xdd\xc5\x15\x4f\x4e\x1b\xf8\xc9" + "\x8f\x4a\xd5\x69\xf8\x9e\xfb\x05" + "\x8a\x37\x46\xfe\xfa\x58\x9b\x0e" + "\x72\x90\x9a\x06\xa5\x42\xf4\x7c" + "\x35\xd5\x64\x70\x72\x67\xfc\x8b" + "\xab\x5a\x2f\x64\x9b\xa1\xec\xe7" + "\xe6\x92\x69\xdb\x62\xa4\xe7\x44" + "\x88\x28\xd4\x52\x64\x19\xa9\xd7" + "\x0c\x00\xe6\xe7\xc1\x28\xc1\xf5" + "\x72\xc5\xfa\x09\x22\x2e\xf4\x82" + "\xa3\xdc\xc1\x68\xf9\x29\x55\x8d" + "\x04\x67\x13\xa6\x52\x04\x3c\x0c" + "\x14\xf2\x87\x23\x61\xab\x82\xcb" + "\x49\x5b\x6b\xd4\x4f\x0d\xd4\x95" + "\x82\xcd\xe3\x69\x47\x1b\x31\x73" + "\x73\x77\xc1\x53\x7d\x43\x5e\x4a" + "\x80\x3a\xca\x9c\xc7\x04\x1a\x31" + "\x8e\xe6\x76\x7f\xe1\xb3\xd0\x57" + "\xa2\xb2\xf6\x09\x51\xc9\x6d\xbc" + "\x79\xed\x57\x50\x36\xd2\x93\xa4" + "\x40\x5d\xac\x3a\x3b\xb6\x2d\x89" + "\x78\xa2\xbd\x23\xec\x35\x06\xf0" + "\xa8\xc8\xc9\xb0\xe3\x28\x2b\xba" + "\x70\xa0\xfe\xed\x13\xc4\xd7\x90" + "\xb1\x6a\xe0\xe1\x30\x71\x15\xd0" + "\xe2\xb3\xa6\x4e\xb0\x01\xf9\xe7" + "\x59\xc6\x1e\xed\x46\x2b\xe3\xa8" + "\x22\xeb\x7f\x1c\xd9\xcd\xe0\xa6" + "\x72\x42\x2c\x06\x75\xbb\xb7\x6b" + "\xca\x49\x5e\xa1\x47\x8d\x9e\xfe" + "\x60\xcc\x34\x95\x8e\xfa\x1e\x3e" + "\x85\x4b\x03\x54\xea\x34\x1c\x41" + "\x90\x45\xa6\xbe\xcf\x58\x4f\xca" + "\x2c\x79\xc0\x3e\x8f\xd7\x3b\xd4" + "\x55\x74\xa8\xe1\x57\x09\xbf\xab" + "\x2c\xf9\xe4\xdd\x17\x99\x57\x60" + "\x4b\x88\x2a\x7f\x43\x86\xb9\x9a" + "\x60\xbf\x4c\xcf\x9b\x41\xb8\x99" + "\x69\x15\x4f\x91\x4d\xeb\xdf\x6f" + "\xcc\x4c\xf9\x6f\xf2\x33\x23\xe7" + "\x02\x44\xaa\xa2\xfa\xb1\x39\xa5" + "\xff\x88\xf5\x37\x02\x33\x24\xfc" + "\x79\x11\x4c\x94\xc2\x31\x87\x9c" + "\x53\x19\x99\x32\xe4\xde\x18\xf4" + "\x8f\xe2\xe8\xa3\xfb\x0b\xaa\x7c" + "\xdb\x83\x0f\xf6\xc0\x8a\x9b\xcd" + "\x7b\x16\x05\x5b\xe4\xb4\x34\x03" + "\xe3\x8f\xc9\x4b\x56\x84\x2a\x4c" + "\x36\x72\x3c\x84\x4f\xba\xa2\x7f" + "\xf7\x1b\xba\x4d\x8a\xb8\x5d\x51" + "\x36\xfb\xef\x23\x18\x6f\x33\x2d" + "\xbb\x06\x24\x8e\x33\x98\x6e\xcd" + "\x63\x11\x18\x6b\xcc\x1b\x66\xb9" + "\x38\x8d\x06\x8d\x98\x1a\xef\xaa" + "\x35\x4a\x90\xfa\xb1\xd3\xcc\x11" + "\x50\x4c\x54\x18\x60\x5d\xe4\x11" + "\xfc\x19\xe1\x53\x20\x5c\xe7\xef" + "\x8a\x2b\xa8\x82\x51\x5f\x5d\x43" + "\x34\xe5\xcf\x7b\x1b\x6f\x81\x19" + "\xb7\xdf\xa8\x9e\x81\x89\x5f\x33" + "\x69\xaf\xde\x89\x68\x88\xf0\x71", + .ctext = "\xab\x15\x46\x5b\xed\x4f\xa8\xac" + "\xbf\x31\x30\x84\x55\xa4\xb8\x98" + "\x79\xba\xa0\x15\xa4\x55\x20\xec" + "\xf9\x94\x71\xe6\x6a\x6f\xee\x87" + "\x2e\x3a\xa2\x95\xae\x6e\x56\x09" + "\xe9\xc0\x0f\xe2\xc6\xb7\x30\xa9" + "\x73\x8e\x59\x7c\xfd\xe3\x71\xf7" + "\xae\x8b\x91\xab\x5e\x36\xe9\xa8" + "\xff\x17\xfa\xa2\x94\x93\x11\x42" + "\x67\x96\x99\xc5\xf0\xad\x2a\x57" + "\xf9\xa6\x70\x4a\xdf\x71\xff\xc0" + "\xe2\xaf\x9a\xae\x57\x58\x13\x3b" + "\x2d\xf1\xc7\x8f\xdb\x8a\xcc\xce" + "\x53\x1a\x69\x55\x39\xc8\xbe\xc3" + "\x2d\xb1\x03\xd9\xa3\x99\xf4\x8d" + "\xd9\x2d\x27\xae\xa5\xe7\x77\x7f" + "\xbb\x88\x84\xea\xfa\x19\x3f\x44" + "\x61\x21\x8a\x1f\xbe\xac\x60\xb4" + "\xaf\xe9\x00\xab\xef\x3c\x53\x56" + "\xcd\x4b\x53\xd8\x9b\xfe\x88\x23" + "\x5b\x85\x76\x08\xec\xd1\x6e\x4a" + "\x87\xa4\x7d\x29\x4e\x4f\x3f\xc9" + "\xa4\xab\x63\xea\xdd\xef\x9f\x79" + "\x38\x18\x7d\x90\x90\xf9\x12\x57" + "\x1d\x89\xea\xfe\xd4\x47\x45\x32" + "\x6a\xf6\xe7\xde\x22\x7e\xee\xc1" + "\xbc\x2d\xc3\xbb\xe5\xd4\x13\xac" + "\x63\xff\x5b\xb1\x05\x96\xd5\xf3" + "\x07\x9a\x62\xb6\x30\xea\x7d\x1e" + "\xee\x75\x0a\x1b\xcc\x6e\x4d\xa7" + "\xf7\x4d\x74\xd8\x60\x32\x5e\xd0" + "\x93\xd7\x19\x90\x4e\x26\xdb\xe4" + "\x5e\xd4\xa8\xb9\x76\xba\x56\x91" + "\xc4\x75\x04\x1e\xc2\x77\x24\x6f" + "\xf9\xe8\x4a\xec\x7f\x86\x95\xb3" + "\x5c\x2c\x97\xab\xf0\xf7\x74\x5b" + "\x0b\xc2\xda\x42\x40\x34\x16\xed" + "\x06\xc1\x25\x53\x17\x0d\x81\x4e" + "\xe6\xf2\x0f\x6d\x94\x3c\x90\x7a" + "\xae\x20\xe9\x3f\xf8\x18\x67\x6a" + "\x49\x1e\x41\xb6\x46\xab\xc8\xa7" + "\xcb\x19\x96\xf5\x99\xc0\x66\x3e" + "\x77\xcf\x73\x52\x83\x2a\xe2\x48" + "\x27\x6c\xeb\xe7\xe7\xc4\xd5\x6a" + "\x40\x67\xbc\xbf\x6b\x3c\xf3\xbb" + "\x51\x5e\x31\xac\x03\x81\xab\x61" + "\xfa\xa5\xa6\x7d\x8b\xc3\x8a\x75" + "\x28\x7a\x71\x9c\xac\x8f\x76\xfc" + "\xf9\x6c\x5d\x9b\xd7\xf6\x36\x2d" + "\x61\xd5\x61\xaa\xdd\x01\xfc\x57" + "\x91\x10\xcd\xcd\x6d\x27\x63\x24" + "\x67\x46\x7a\xbb\x61\x56\x39\xb1" + "\xd6\x79\xfe\x77\xca\xd6\x73\x59" + "\x6e\x58\x11\x90\x03\x26\x74\x2a" + "\xfa\x52\x12\x47\xfb\x12\xeb\x3e" + "\x88\xf0\x52\x6c\xc0\x54\x7a\x88" + "\x8c\xe5\xde\x9e\xba\xb9\xf2\xe1" + "\x97\x2e\x5c\xbd\xf4\x13\x7e\xf3" + "\xc4\xe1\x87\xa5\x35\xfa\x7c\x71" + "\x1a\xc9\xf4\xa8\x57\xe2\x5a\x6b" + "\x14\xe0\x73\xaf\x56\x6b\xa0\x00" + "\x9e\x5f\x64\xac\x00\xfb\xc4\x92" + "\xe5\xe2\x8a\xb2\x9e\x75\x49\x85" + "\x25\x66\xa5\x1a\xf9\x7d\x1d\x60", + .klen = 24, + .len = 512, + }, + { + .key = "\x05\x60\x3a\x7e\x60\x90\x46\x18" + "\x6c\x60\xba\xeb\x12\xd7\xbe\xd1" + "\xd3\xf6\x10\x46\x9d\xf1\x0c\xb4" + "\x73\xe3\x93\x27\xa8\x2c\x13\xaa", + .iv = "\xf5\x96\xd1\xb6\xcb\x44\xd8\xd0" + "\x3e\xdb\x92\x80\x08\x94\xcd\xd3", + .ptext = "\x78", + .ctext = "\xc5", + .klen = 32, + .len = 1, + }, + { + .key = "\x35\xca\x38\xf3\xd9\xd6\x34\xef" + "\xcd\xee\xa3\x26\x86\xba\xfb\x45" + "\x01\xfa\x52\x67\xff\xc5\x9d\xaa" + "\x64\x9a\x05\xbb\x85\x20\xa7\xf2", + .iv = "\xe3\xda\xf5\xff\x42\x59\x87\x86" + "\xee\x7b\xd6\xb4\x6a\x25\x44\xff", + .ptext = "\x44\x67\x1e\x04\x53\xd2\x4b\xd9" + "\x96\x33\x07\x54\xe4\x8e\x20", + .ctext = "\xcc\x55\x40\x79\x47\x5c\x8b\xa6" + "\xca\x7b\x9f\x50\xe3\x21\xea", + .klen = 32, + .len = 15, + }, + { + .key = "\xaf\xd9\x14\x14\xd5\xdb\xc9\xce" + "\x76\x5c\x5a\xbf\x43\x05\x29\x24" + "\xc4\x13\x68\xcc\xe8\x37\xbd\xb9" + "\x41\x20\xf5\x53\x48\xd0\xa2\xd6", + .iv = "\xa7\xb4\x00\x08\x79\x10\xae\xf5" + "\x02\xbf\x85\xb2\x69\x4c\xc6\x04", + .ptext = "\xac\x6a\xa8\x0c\xb0\x84\xbf\x4c" + "\xae\x94\x20\x58\x7e\x00\x93\x89", + .ctext = "\xd5\xaa\xe2\xe9\x86\x4c\x95\x4e" + "\xde\xb6\x15\xcb\xdc\x1f\x13\x38", + .klen = 32, + .len = 16, + }, + { + .key = "\xed\xe3\x8b\xe7\x1c\x17\xbf\x4a" + "\x02\xe2\xfc\x76\xac\xf5\x3c\x00" + "\x5d\xdc\xfc\x83\xeb\x45\xb4\xcb" + "\x59\x62\x60\xec\x69\x9c\x16\x45", + .iv = "\xe4\x0e\x2b\x90\xd2\xfa\x94\x2e" + "\x10\xe5\x64\x2b\x97\x28\x15\xc7", + .ptext = "\xe6\x53\xff\x60\x0e\xc4\x51\xe4" + "\x93\x4d\xe5\x55\xc5\xd9\xad\x48" + "\x52", + .ctext = "\xba\x25\x28\xf5\xcf\x31\x91\x80" + "\xda\x2b\x95\x5f\x20\xcb\xfb\x9f" + "\xc6", + .klen = 32, + .len = 17, + }, + { + .key = "\x77\x5c\xc0\x73\x9a\x64\x97\x91" + "\x2f\xee\xe0\x20\xc2\x04\x59\x2e" + "\x97\xd2\xa7\x70\xb3\xb0\x21\x6b" + "\x8f\xbf\xb8\x51\xa8\xea\x0f\x62", + .iv = "\x31\x8e\x1f\xcd\xfd\x23\xeb\x7f" + "\x8a\x1f\x1b\x23\x53\x27\x44\xe5", + .ptext = "\xcd\xff\x8c\x9b\x94\x5a\x51\x3f" + "\x40\x93\x56\x93\x66\x39\x63\x1f" + "\xbf\xe6\xa4\xfa\xbe\x79\x93\x03" + "\xf5\x66\x74\x16\xfc\xe4\xce", + .ctext = "\x8b\xd3\xc3\xce\x66\xf8\x66\x4c" + "\xad\xd6\xf5\x0f\xd8\x99\x5a\x75" + "\xa1\x3c\xab\x0b\x21\x36\x57\x72" + "\x88\x29\xe9\xea\x4a\x8d\xe9", + .klen = 32, + .len = 31, + }, + { + .key = "\xa1\x2f\x4d\xde\xfe\xa1\xff\xa8" + "\x73\xdd\xe3\xe2\x95\xfc\xea\x9c" + "\xd0\x80\x42\x0c\xb8\x43\x3e\x99" + "\x39\x38\x0a\x8c\xe8\x45\x3a\x7b", + .iv = "\x32\xc4\x6f\xb1\x14\x43\xd1\x87" + "\xe2\x6f\x5a\x58\x02\x36\x7e\x2a", + .ptext = "\x9e\x5c\x1e\xf1\xd6\x7d\x09\x57" + "\x18\x48\x55\xda\x7d\x44\xf9\x6d" + "\xac\xcd\x59\xbb\x10\xa2\x94\x67" + "\xd1\x6f\xfe\x6b\x4a\x11\xe8\x04" + "\x09\x26\x4f\x8d\x5d\xa1\x7b\x42" + "\xf9\x4b\x66\x76\x38\x12\xfe\xfe", + .ctext = "\x42\xbc\xa7\x64\x15\x9a\x04\x71" + "\x2c\x5f\x94\xba\x89\x3a\xad\xbc" + "\x87\xb3\xf4\x09\x4f\x57\x06\x18" + "\xdc\x84\x20\xf7\x64\x85\xca\x3b" + "\xab\xe6\x33\x56\x34\x60\x5d\x4b" + "\x2e\x16\x13\xd4\x77\xde\x2d\x2b", + .klen = 32, + .len = 48, + }, + { + .key = "\xfb\xf5\xb7\x3d\xa6\x95\x42\xbf" + "\xd2\x94\x6c\x74\x0f\xbc\x5a\x28" + "\x35\x3c\x51\x58\x84\xfb\x7d\x11" + "\x16\x1e\x00\x97\x37\x08\xb7\x16", + .iv = "\x9b\x53\x57\x40\xe6\xd9\xa7\x27" + "\x78\xd4\x9b\xd2\x29\x1d\x24\xa9", + .ptext = "\x8b\x02\x60\x0a\x3e\xb7\x10\x59" + "\xc3\xac\xd5\x2a\x75\x81\xf2\xdb" + "\x55\xca\x65\x86\x44\xfb\xfe\x91" + "\x26\xbb\x45\xb2\x46\x22\x3e\x08" + "\xa2\xbf\x46\xcb\x68\x7d\x45\x7b" + "\xa1\x6a\x3c\x6e\x25\xeb\xed\x31" + "\x7a\x8b\x47\xf9\xde\xec\x3d\x87" + "\x09\x20\x2e\xfa\xba\x8b\x9b\xc5" + "\x6c\x25\x9c\x9d\x2a\xe8\xab\x90" + "\x3f\x86\xee\x61\x13\x21\xd4\xde" + "\xe1\x0c\x95\xfc\x5c\x8a\x6e\x0a" + "\x73\xcf\x08\x69\x44\x4e\xde\x25" + "\xaf\xaa\x56\x04\xc4\xb3\x60\x44" + "\x3b\x8b\x3d\xee\xae\x42\x4b\xd2" + "\x9a\x6c\xa0\x8e\x52\x06\xb2\xd1" + "\x5d\x38\x30\x6d\x27\x9b\x1a\xd8", + .ctext = "\xa3\x78\x33\x78\x95\x95\x97\x07" + "\x53\xa3\xa1\x5b\x18\x32\x27\xf7" + "\x09\x12\x53\x70\x83\xb5\x6a\x9f" + "\x26\x6d\x10\x0d\xe0\x1c\xe6\x2b" + "\x70\x00\xdc\xa1\x60\xef\x1b\xee" + "\xc5\xa5\x51\x17\xae\xcc\xf2\xed" + "\xc4\x60\x07\xdf\xd5\x7a\xe9\x90" + "\x3c\x9f\x96\x5d\x72\x65\x5d\xef" + "\xd0\x94\x32\xc4\x85\x90\x78\xa1" + "\x2e\x64\xf6\xee\x8e\x74\x3f\x20" + "\x2f\x12\x3b\x3d\xd5\x39\x8e\x5a" + "\xf9\x8f\xce\x94\x5d\x82\x18\x66" + "\x14\xaf\x4c\xfe\xe0\x91\xc3\x4a" + "\x85\xcf\xe7\xe8\xf7\xcb\xf0\x31" + "\x88\x7d\xc9\x5b\x71\x9d\x5f\xd2" + "\xfa\xed\xa6\x24\xda\xbb\xb1\x84", + .klen = 32, + .len = 128, + }, + { + .key = "\x32\x37\x2b\x8f\x7b\xb1\x23\x79" + "\x05\x52\xde\x05\xf1\x68\x3f\x6c" + "\xa4\xae\xbc\x21\xc2\xc6\xf0\xbd" + "\x0f\x20\xb7\xa4\xc5\x05\x7b\x64", + .iv = "\xff\x26\x4e\x67\x48\xdd\xcf\xfe" + "\x42\x09\x04\x98\x5f\x1e\xfa\x80", + .ptext = "\x99\xdc\x3b\x19\x41\xf9\xff\x6e" + "\x76\xb5\x03\xfa\x61\xed\xf8\x44" + "\x70\xb9\xf0\x83\x80\x6e\x31\x77" + "\x77\xe4\xc7\xb4\x77\x02\xab\x91" + "\x82\xc6\xf8\x7c\x46\x61\x03\x69" + "\x09\xa0\xf7\x12\xb7\x81\x6c\xa9" + "\x10\x5c\xbb\x55\xb3\x44\xed\xb5" + "\xa2\x52\x48\x71\x90\x5d\xda\x40" + "\x0b\x7f\x4a\x11\x6d\xa7\x3d\x8e" + "\x1b\xcd\x9d\x4e\x75\x8b\x7d\x87" + "\xe5\x39\x34\x32\x1e\xe6\x8d\x51" + "\xd4\x1f\xe3\x1d\x50\xa0\x22\x37" + "\x7c\xb0\xd9\xfb\xb6\xb2\x16\xf6" + "\x6d\x26\xa0\x4e\x8c\x6a\xe6\xb6" + "\xbe\x4c\x7c\xe3\x88\x10\x18\x90" + "\x11\x50\x19\x90\xe7\x19\x3f\xd0" + "\x31\x15\x0f\x06\x96\xfe\xa7\x7b" + "\xc3\x32\x88\x69\xa4\x12\xe3\x64" + "\x02\x30\x17\x74\x6c\x88\x7c\x9b" + "\xd6\x6d\x75\xdf\x11\x86\x70\x79" + "\x48\x7d\x34\x3e\x33\x58\x07\x8b" + "\xd2\x50\xac\x35\x15\x45\x05\xb4" + "\x4d\x31\x97\x19\x87\x23\x4b\x87" + "\x53\xdc\xa9\x19\x78\xf1\xbf\x35" + "\x30\x04\x14\xd4\xcf\xb2\x8c\x87" + "\x7d\xdb\x69\xc9\xcd\xfe\x40\x3e" + "\x8d\x66\x5b\x61\xe5\xf0\x2d\x87" + "\x93\x3a\x0c\x2b\x04\x98\x05\xc2" + "\x56\x4d\xc4\x6c\xcd\x7a\x98\x7e" + "\xe2\x2d\x79\x07\x91\x9f\xdf\x2f" + "\x72\xc9\x8f\xcb\x0b\x87\x1b\xb7" + "\x04\x86\xcb\x47\xfa\x5d\x03", + .ctext = "\x0b\x00\xf7\xf2\xc8\x6a\xba\x9a" + "\x0a\x97\x18\x7a\x00\xa0\xdb\xf4" + "\x5e\x8e\x4a\xb7\xe0\x51\xf1\x75" + "\x17\x8b\xb4\xf1\x56\x11\x05\x9f" + "\x2f\x2e\xba\x67\x04\xe1\xb4\xa5" + "\xfc\x7c\x8c\xad\xc6\xb9\xd1\x64" + "\xca\xbd\x5d\xaf\xdb\x65\x48\x4f" + "\x1b\xb3\x94\x5c\x0b\xd0\xee\xcd" + "\xb5\x7f\x43\x8a\xd8\x8b\x66\xde" + "\xd2\x9c\x13\x65\xa4\x47\xa7\x03" + "\xc5\xa1\x46\x8f\x2f\x84\xbc\xef" + "\x48\x9d\x9d\xb5\xbd\x43\xff\xd2" + "\xd2\x7a\x5a\x13\xbf\xb4\xf6\x05" + "\x17\xcd\x01\x12\xf0\x35\x27\x96" + "\xf4\xc1\x65\xf7\x69\xef\x64\x1b" + "\x6e\x4a\xe8\x77\xce\x83\x01\xb7" + "\x60\xe6\x45\x2a\xcd\x41\x4a\xb5" + "\x8e\xcc\x45\x93\xf1\xd6\x64\x5f" + "\x32\x60\xe4\x29\x4a\x82\x6c\x86" + "\x16\xe4\xcc\xdb\x5f\xc8\x11\xa6" + "\xfe\x88\xd6\xc3\xe5\x5c\xbb\x67" + "\xec\xa5\x7b\xf5\xa8\x4f\x77\x25" + "\x5d\x0c\x2a\x99\xf9\xb9\xd1\xae" + "\x3c\x83\x2a\x93\x9b\x66\xec\x68" + "\x2c\x93\x02\x8a\x8a\x1e\x2f\x50" + "\x09\x37\x19\x5c\x2a\x3a\xc2\xcb" + "\xcb\x89\x82\x81\xb7\xbb\xef\x73" + "\x8b\xc9\xae\x42\x96\xef\x70\xc0" + "\x89\xc7\x3e\x6a\x26\xc3\xe4\x39" + "\x53\xa9\xcf\x63\x7d\x05\xf3\xff" + "\x52\x04\xf6\x7f\x23\x96\xe9\xf7" + "\xff\xd6\x50\xa3\x0e\x20\x71", + .klen = 32, + .len = 255, + }, + { + .key = "\x39\x5f\xf4\x9c\x90\x3a\x9a\x25" + "\x15\x11\x79\x39\xed\x26\x5e\xf6" + "\xda\xcf\x33\x4f\x82\x97\xab\x10" + "\xc1\x55\x48\x82\x80\xa8\x02\xb2", + .iv = "\x82\x60\xd9\x06\xeb\x40\x99\x76" + "\x08\xc5\xa4\x83\x45\xb8\x38\x5a", + .ptext = "\xa1\xa8\xac\xac\x08\xaf\x8f\x84" + "\xbf\xcc\x79\x31\x5e\x61\x01\xd1" + "\x4d\x5f\x9b\xcd\x91\x92\x9a\xa1" + "\x99\x0d\x49\xb2\xd7\xfd\x25\x93" + "\x51\x96\xbd\x91\x8b\x08\xf1\xc6" + "\x0d\x17\xf6\xef\xfd\xd2\x78\x16" + "\xc8\x08\x27\x7b\xca\x98\xc6\x12" + "\x86\x11\xdb\xd5\x08\x3d\x5a\x2c" + "\xcf\x15\x0e\x9b\x42\x78\xeb\x1f" + "\x52\xbc\xd7\x5a\x8a\x33\x6c\x14" + "\xfc\x61\xad\x2e\x1e\x03\x66\xea" + "\x79\x0e\x88\x88\xde\x93\xe3\x81" + "\xb5\xc4\x1c\xe6\x9c\x08\x18\x8e" + "\xa0\x87\xda\xe6\xf8\xcb\x30\x44" + "\x2d\x4e\xc0\xa3\x60\xf9\x62\x7b" + "\x4b\xd5\x61\x6d\xe2\x67\x95\x54" + "\x10\xd1\xca\x22\xe8\xb6\xb1\x3a" + "\x2d\xd7\x35\x5b\x22\x88\x55\x67" + "\x3d\x83\x8f\x07\x98\xa8\xf2\xcf" + "\x04\xb7\x9e\x52\xca\xe0\x98\x72" + "\x5c\xc1\x00\xd4\x1f\x2c\x61\xf3" + "\xe8\x40\xaf\x4a\xee\x66\x41\xa0" + "\x02\x77\x29\x30\x65\x59\x4b\x20" + "\x7b\x0d\x80\x97\x27\x7f\xd5\x90" + "\xbb\x9d\x76\x90\xe5\x43\x43\x72" + "\xd0\xd4\x14\x75\x66\xb3\xb6\xaf" + "\x09\xe4\x23\xb0\x62\xad\x17\x28" + "\x39\x26\xab\xf5\xf7\x5c\xb6\x33" + "\xbd\x27\x09\x5b\x29\xe4\x40\x0b" + "\xc1\x26\x32\xdb\x9a\xdf\xf9\x5a" + "\xae\x03\x2c\xa4\x40\x84\x9a\xb7" + "\x4e\x47\xa8\x0f\x23\xc7\xbb\xcf" + "\x2b\xf2\x32\x6c\x35\x6a\x91\xba" + "\x0e\xea\xa2\x8b\x2f\xbd\xb5\xea" + "\x6e\xbc\xb5\x4b\x03\xb3\x86\xe0" + "\x86\xcf\xba\xcb\x38\x2c\x32\xa6" + "\x6d\xe5\x28\xa6\xad\xd2\x7f\x73" + "\x43\x14\xf8\xb1\x99\x12\x2d\x2b" + "\xdf\xcd\xf2\x81\x43\x94\xdf\xb1" + "\x17\xc9\x33\xa6\x3d\xef\x96\xb8" + "\xd6\x0d\x00\xec\x49\x66\x85\x5d" + "\x44\x62\x12\x04\x55\x5c\x48\xd3" + "\xbd\x73\xac\x54\x8f\xbf\x97\x8e" + "\x85\xfd\xc2\xa1\x25\x32\x38\x6a" + "\x1f\xac\x57\x3c\x4f\x56\x73\xf2" + "\x1d\xb6\x48\x68\xc7\x0c\xe7\x60" + "\xd2\x8e\x4d\xfb\xc7\x20\x7b\xb7" + "\x45\x28\x12\xc6\x26\xae\xea\x7c" + "\x5d\xe2\x46\xb5\xae\xe1\xc3\x98" + "\x6f\x72\xd5\xa2\xfd\xed\x40\xfd" + "\xf9\xdf\x61\xec\x45\x2c\x15\xe0" + "\x1e\xbb\xde\x71\x37\x5f\x73\xc2" + "\x11\xcc\x6e\x6d\xe1\xb5\x1b\xd2" + "\x2a\xdd\x19\x8a\xc2\xe1\xa0\xa4" + "\x26\xeb\xb2\x2c\x4f\x77\x52\xf1" + "\x42\x72\x6c\xad\xd7\x78\x5d\x72" + "\xc9\x16\x26\x25\x1b\x4c\xe6\x58" + "\x79\x57\xb5\x06\x15\x4f\xe5\xba" + "\xa2\x7f\x2d\x5b\x87\x8a\x44\x70" + "\xec\xc7\xef\x84\xae\x60\xa2\x61" + "\x86\xe9\x18\xcd\x28\xc4\xa4\xf5" + "\xbc\x84\xb8\x86\xa0\xba\xf1\xf1" + "\x08\x3b\x32\x75\x35\x22\x7a\x65" + "\xca\x48\xe8\xef\x6e\xe2\x8e\x00", + .ctext = "\x2f\xae\xd8\x67\xeb\x15\xde\x75" + "\x53\xa3\x0e\x5a\xcf\x1c\xbe\xea" + "\xde\xf9\xcf\xc2\x9f\xfd\x0f\x44" + "\xc0\xe0\x7a\x76\x1d\xcb\x4a\xf8" + "\x35\xd6\xe3\x95\x98\x6b\x3f\x89" + "\xc4\xe6\xb6\x6f\xe1\x8b\x39\x4b" + "\x1c\x6c\x77\xe4\xe1\x8a\xbc\x61" + "\x00\x6a\xb1\x37\x2f\x45\xe6\x04" + "\x52\x0b\xfc\x1e\x32\xc1\xd8\x9d" + "\xfa\xdd\x67\x5c\xe0\x75\x83\xd0" + "\x21\x9e\x02\xea\xc0\x7f\xc0\x29" + "\xb3\x6c\xa5\x97\xb3\x29\x82\x1a" + "\x94\xa5\xb4\xb6\x49\xe5\xa5\xad" + "\x95\x40\x52\x7c\x84\x88\xa4\xa8" + "\x26\xe4\xd9\x5d\x41\xf2\x93\x7b" + "\xa4\x48\x1b\x66\x91\xb9\x7c\xc2" + "\x99\x29\xdf\xd8\x30\xac\xd4\x47" + "\x42\xa0\x14\x87\x67\xb8\xfd\x0b" + "\x1e\xcb\x5e\x5c\x9a\xc2\x04\x8b" + "\x17\x29\x9d\x99\x7f\x86\x4c\xe2" + "\x5c\x96\xa6\x0f\xb6\x47\x33\x5c" + "\xe4\x50\x49\xd5\x4f\x92\x0b\x9a" + "\xbc\x52\x4c\x41\xf5\xc9\x3e\x76" + "\x55\x55\xd4\xdc\x71\x14\x23\xfc" + "\x5f\xd5\x08\xde\xa0\xf7\x28\xc0" + "\xe1\x61\xac\x64\x66\xf6\xd1\x31" + "\xe4\xa4\xa9\xed\xbc\xad\x4f\x3b" + "\x59\xb9\x48\x1b\xe7\xb1\x6f\xc6" + "\xba\x40\x1c\x0b\xe7\x2f\x31\x65" + "\x85\xf5\xe9\x14\x0a\x31\xf5\xf3" + "\xc0\x1c\x20\x35\x73\x38\x0f\x8e" + "\x39\xf0\x68\xae\x08\x9c\x87\x4b" + "\x42\xfc\x22\x17\xee\x96\x51\x2a" + "\xd8\x57\x5a\x35\xea\x72\x74\xfc" + "\xb3\x0e\x69\x9a\xe1\x4f\x24\x90" + "\xc5\x4b\xe5\xd7\xe3\x82\x2f\xc5" + "\x62\x46\x3e\xab\x72\x4e\xe0\xf3" + "\x90\x09\x4c\xb2\xe1\xe8\xa0\xf5" + "\x46\x40\x2b\x47\x85\x3c\x21\x90" + "\x3d\xad\x25\x5a\x36\xdf\xe5\xbc" + "\x7e\x80\x4d\x53\x77\xf1\x79\xa6" + "\xec\x22\x80\x88\x68\xd6\x2d\x8b" + "\x3e\xf7\x52\xc7\x2a\x20\x42\x5c" + "\xed\x99\x4f\x32\x80\x00\x7e\x73" + "\xd7\x6d\x7f\x7d\x42\x54\x4a\xfe" + "\xff\x6f\x61\xca\x2a\xbb\x4f\xeb" + "\x4f\xe4\x4e\xaf\x2c\x4f\x82\xcd" + "\xa1\xa7\x11\xb3\x34\x33\xcf\x32" + "\x63\x0e\x24\x3a\x35\xbe\x06\xd5" + "\x17\xcb\x02\x30\x33\x6e\x8c\x49" + "\x40\x6e\x34\x8c\x07\xd4\x3e\xe6" + "\xaf\x78\x6d\x8c\x10\x5f\x21\x58" + "\x49\x26\xc5\xaf\x0d\x7d\xd4\xaf" + "\xcd\x5b\xa1\xe3\xf6\x39\x1c\x9b" + "\x8e\x00\xa1\xa7\x9e\x17\x4a\xc0" + "\x54\x56\x9e\xcf\xcf\x88\x79\x8d" + "\x50\xf7\x56\x8e\x0a\x73\x46\x6b" + "\xc3\xb9\x9b\x6c\x7d\xc4\xc8\xb6" + "\x03\x5f\x30\x62\x7d\xe6\xdb\x15" + "\xe1\x39\x02\x8c\xff\xda\xc8\x43" + "\xf2\xa9\xbf\x00\xe7\x3a\x61\x89" + "\xdf\xb0\xca\x7d\x8c\x8a\x6a\x9f" + "\x18\x89\x3d\x39\xac\x36\x6f\x05" + "\x1f\xb5\xda\x00\xea\xe1\x51\x21", + .klen = 32, + .len = 512, + }, + +}; + #endif /* _CRYPTO_TESTMGR_H */ diff --git a/crypto/xctr.c b/crypto/xctr.c new file mode 100644 index 0000000000000..5c00147e8ec40 --- /dev/null +++ b/crypto/xctr.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * XCTR: XOR Counter mode - Adapted from ctr.c + * + * (C) Copyright IBM Corp. 2007 - Joy Latten + * Copyright 2021 Google LLC + */ + +/* + * XCTR mode is a blockcipher mode of operation used to implement HCTR2. XCTR is + * closely related to the CTR mode of operation; the main difference is that CTR + * generates the keystream using E(CTR + IV) whereas XCTR generates the + * keystream using E(CTR ^ IV). This allows implementations to avoid dealing + * with multi-limb integers (as is required in CTR mode). XCTR is also specified + * using little-endian arithmetic which makes it slightly faster on LE machines. + * + * See the HCTR2 paper for more details: + * Length-preserving encryption with HCTR2 + * (https://eprint.iacr.org/2021/1441.pdf) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* For now this implementation is limited to 16-byte blocks for simplicity */ +#define XCTR_BLOCKSIZE 16 + +static void crypto_xctr_crypt_final(struct skcipher_walk *walk, + struct crypto_cipher *tfm, u32 byte_ctr) +{ + u8 keystream[XCTR_BLOCKSIZE]; + const u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + __le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1); + + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + crypto_cipher_encrypt_one(tfm, keystream, walk->iv); + crypto_xor_cpy(dst, keystream, src, nbytes); + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); +} + +static int crypto_xctr_crypt_segment(struct skcipher_walk *walk, + struct crypto_cipher *tfm, u32 byte_ctr) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_encrypt; + const u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + __le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1); + + do { + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + fn(crypto_cipher_tfm(tfm), dst, walk->iv); + crypto_xor(dst, src, XCTR_BLOCKSIZE); + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + + le32_add_cpu(&ctr32, 1); + + src += XCTR_BLOCKSIZE; + dst += XCTR_BLOCKSIZE; + } while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE); + + return nbytes; +} + +static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk, + struct crypto_cipher *tfm, u32 byte_ctr) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_encrypt; + unsigned long alignmask = crypto_cipher_alignmask(tfm); + unsigned int nbytes = walk->nbytes; + u8 *data = walk->src.virt.addr; + u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; + u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); + __le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1); + + do { + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + fn(crypto_cipher_tfm(tfm), keystream, walk->iv); + crypto_xor(data, keystream, XCTR_BLOCKSIZE); + crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32)); + + le32_add_cpu(&ctr32, 1); + + data += XCTR_BLOCKSIZE; + } while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE); + + return nbytes; +} + +static int crypto_xctr_crypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + u32 byte_ctr = 0; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes >= XCTR_BLOCKSIZE) { + if (walk.src.virt.addr == walk.dst.virt.addr) + nbytes = crypto_xctr_crypt_inplace(&walk, cipher, + byte_ctr); + else + nbytes = crypto_xctr_crypt_segment(&walk, cipher, + byte_ctr); + + byte_ctr += walk.nbytes - nbytes; + err = skcipher_walk_done(&walk, nbytes); + } + + if (walk.nbytes) { + crypto_xctr_crypt_final(&walk, cipher, byte_ctr); + err = skcipher_walk_done(&walk, 0); + } + + return err; +} + +static int crypto_xctr_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct skcipher_instance *inst; + struct crypto_alg *alg; + int err; + + inst = skcipher_alloc_instance_simple(tmpl, tb); + if (IS_ERR(inst)) + return PTR_ERR(inst); + + alg = skcipher_ialg_simple(inst); + + /* Block size must be 16 bytes. */ + err = -EINVAL; + if (alg->cra_blocksize != XCTR_BLOCKSIZE) + goto out_free_inst; + + /* XCTR mode is a stream cipher. */ + inst->alg.base.cra_blocksize = 1; + + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; + + inst->alg.encrypt = crypto_xctr_crypt; + inst->alg.decrypt = crypto_xctr_crypt; + + err = skcipher_register_instance(tmpl, inst); + if (err) { +out_free_inst: + inst->free(inst); + } + + return err; +} + +static struct crypto_template crypto_xctr_tmpl = { + .name = "xctr", + .create = crypto_xctr_create, + .module = THIS_MODULE, +}; + +static int __init crypto_xctr_module_init(void) +{ + return crypto_register_template(&crypto_xctr_tmpl); +} + +static void __exit crypto_xctr_module_exit(void) +{ + crypto_unregister_template(&crypto_xctr_tmpl); +} + +subsys_initcall(crypto_xctr_module_init); +module_exit(crypto_xctr_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("XCTR block cipher mode of operation"); +MODULE_ALIAS_CRYPTO("xctr"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); From f3c923a09c4c4f5861b1ed53cf75673992a6ba68 Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 20 May 2022 18:14:54 +0000 Subject: [PATCH 06/89] crypto: polyval - Add POLYVAL support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for POLYVAL, an ε-Δ-universal hash function similar to GHASH. This patch only uses POLYVAL as a component to implement HCTR2 mode. It should be noted that POLYVAL was originally specified for use in AES-GCM-SIV (RFC 8452), but the kernel does not currently support this mode. POLYVAL is implemented as an shash algorithm. The implementation is modified from ghash-generic.c. For more information on POLYVAL see: Length-preserving encryption with HCTR2: https://eprint.iacr.org/2021/1441.pdf AES-GCM-SIV: Nonce Misuse-Resistant Authenticated Encryption: https://datatracker.ietf.org/doc/html/rfc8452 Signed-off-by: Nathan Huckleberry Reviewed-by: Eric Biggers Reviewed-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- crypto/Kconfig | 8 ++ crypto/Makefile | 1 + crypto/polyval-generic.c | 205 +++++++++++++++++++++++++++++++++++++++ crypto/tcrypt.c | 4 + crypto/testmgr.c | 6 ++ crypto/testmgr.h | 171 ++++++++++++++++++++++++++++++++ include/crypto/polyval.h | 17 ++++ 7 files changed, 412 insertions(+) create mode 100644 crypto/polyval-generic.c create mode 100644 include/crypto/polyval.h diff --git a/crypto/Kconfig b/crypto/Kconfig index b9e4d511bf5ad..d59e70dca197f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -773,6 +773,14 @@ config CRYPTO_GHASH GHASH is the hash function used in GCM (Galois/Counter Mode). It is not a general-purpose cryptographic hash function. +config CRYPTO_POLYVAL + tristate + select CRYPTO_GF128MUL + select CRYPTO_HASH + help + POLYVAL is the hash function used in HCTR2. It is not a general-purpose + cryptographic hash function. + config CRYPTO_POLY1305 tristate "Poly1305 authenticator algorithm" select CRYPTO_HASH diff --git a/crypto/Makefile b/crypto/Makefile index 93d0afeb3a77f..7694ed0a44d5c 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -172,6 +172,7 @@ UBSAN_SANITIZE_jitterentropy.o = n jitterentropy_rng-y := jitterentropy.o jitterentropy-kcapi.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o +obj-$(CONFIG_CRYPTO_POLYVAL) += polyval-generic.o obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c new file mode 100644 index 0000000000000..bf2b03b7bfc04 --- /dev/null +++ b/crypto/polyval-generic.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * POLYVAL: hash function for HCTR2. + * + * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen + * Copyright (c) 2009 Intel Corp. + * Author: Huang Ying + * Copyright 2021 Google LLC + */ + +/* + * Code based on crypto/ghash-generic.c + * + * POLYVAL is a keyed hash function similar to GHASH. POLYVAL uses a different + * modulus for finite field multiplication which makes hardware accelerated + * implementations on little-endian machines faster. POLYVAL is used in the + * kernel to implement HCTR2, but was originally specified for AES-GCM-SIV + * (RFC 8452). + * + * For more information see: + * Length-preserving encryption with HCTR2: + * https://eprint.iacr.org/2021/1441.pdf + * AES-GCM-SIV: Nonce Misuse-Resistant Authenticated Encryption: + * https://datatracker.ietf.org/doc/html/rfc8452 + * + * Like GHASH, POLYVAL is not a cryptographic hash function and should + * not be used outside of crypto modes explicitly designed to use POLYVAL. + * + * This implementation uses a convenient trick involving the GHASH and POLYVAL + * fields. This trick allows multiplication in the POLYVAL field to be + * implemented by using multiplication in the GHASH field as a subroutine. An + * element of the POLYVAL field can be converted to an element of the GHASH + * field by computing x*REVERSE(a), where REVERSE reverses the byte-ordering of + * a. Similarly, an element of the GHASH field can be converted back to the + * POLYVAL field by computing REVERSE(x^{-1}*a). For more information, see: + * https://datatracker.ietf.org/doc/html/rfc8452#appendix-A + * + * By using this trick, we do not need to implement the POLYVAL field for the + * generic implementation. + * + * Warning: this generic implementation is not intended to be used in practice + * and is not constant time. For practical use, a hardware accelerated + * implementation of POLYVAL should be used instead. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct polyval_tfm_ctx { + struct gf128mul_4k *gf128; +}; + +struct polyval_desc_ctx { + union { + u8 buffer[POLYVAL_BLOCK_SIZE]; + be128 buffer128; + }; + u32 bytes; +}; + +static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE], + const u8 src[POLYVAL_BLOCK_SIZE]) +{ + u64 a = get_unaligned((const u64 *)&src[0]); + u64 b = get_unaligned((const u64 *)&src[8]); + + put_unaligned(swab64(a), (u64 *)&dst[8]); + put_unaligned(swab64(b), (u64 *)&dst[0]); +} + +static int polyval_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm); + be128 k; + + if (keylen != POLYVAL_BLOCK_SIZE) + return -EINVAL; + + gf128mul_free_4k(ctx->gf128); + + BUILD_BUG_ON(sizeof(k) != POLYVAL_BLOCK_SIZE); + copy_and_reverse((u8 *)&k, key); + gf128mul_x_lle(&k, &k); + + ctx->gf128 = gf128mul_init_4k_lle(&k); + memzero_explicit(&k, POLYVAL_BLOCK_SIZE); + + if (!ctx->gf128) + return -ENOMEM; + + return 0; +} + +static int polyval_init(struct shash_desc *desc) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + + memset(dctx, 0, sizeof(*dctx)); + + return 0; +} + +static int polyval_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm); + u8 *pos; + u8 tmp[POLYVAL_BLOCK_SIZE]; + int n; + + if (dctx->bytes) { + n = min(srclen, dctx->bytes); + pos = dctx->buffer + dctx->bytes - 1; + + dctx->bytes -= n; + srclen -= n; + + while (n--) + *pos-- ^= *src++; + + if (!dctx->bytes) + gf128mul_4k_lle(&dctx->buffer128, ctx->gf128); + } + + while (srclen >= POLYVAL_BLOCK_SIZE) { + copy_and_reverse(tmp, src); + crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE); + gf128mul_4k_lle(&dctx->buffer128, ctx->gf128); + src += POLYVAL_BLOCK_SIZE; + srclen -= POLYVAL_BLOCK_SIZE; + } + + if (srclen) { + dctx->bytes = POLYVAL_BLOCK_SIZE - srclen; + pos = dctx->buffer + POLYVAL_BLOCK_SIZE - 1; + while (srclen--) + *pos-- ^= *src++; + } + + return 0; +} + +static int polyval_final(struct shash_desc *desc, u8 *dst) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm); + + if (dctx->bytes) + gf128mul_4k_lle(&dctx->buffer128, ctx->gf128); + copy_and_reverse(dst, dctx->buffer); + return 0; +} + +static void polyval_exit_tfm(struct crypto_tfm *tfm) +{ + struct polyval_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + + gf128mul_free_4k(ctx->gf128); +} + +static struct shash_alg polyval_alg = { + .digestsize = POLYVAL_DIGEST_SIZE, + .init = polyval_init, + .update = polyval_update, + .final = polyval_final, + .setkey = polyval_setkey, + .descsize = sizeof(struct polyval_desc_ctx), + .base = { + .cra_name = "polyval", + .cra_driver_name = "polyval-generic", + .cra_priority = 100, + .cra_blocksize = POLYVAL_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct polyval_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_exit = polyval_exit_tfm, + }, +}; + +static int __init polyval_mod_init(void) +{ + return crypto_register_shash(&polyval_alg); +} + +static void __exit polyval_mod_exit(void) +{ + crypto_unregister_shash(&polyval_alg); +} + +subsys_initcall(polyval_mod_init); +module_exit(polyval_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("POLYVAL hash function"); +MODULE_ALIAS_CRYPTO("polyval"); +MODULE_ALIAS_CRYPTO("polyval-generic"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index fd671d0e2012a..dd9cf216029b7 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1730,6 +1730,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("ccm(sm4)"); break; + case 57: + ret += tcrypt_test("polyval"); + break; + case 100: ret += tcrypt_test("hmac(md5)"); break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 0ea77dcdc6c0d..0f40e260b5a97 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5342,6 +5342,12 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(poly1305_tv_template) } + }, { + .alg = "polyval", + .test = alg_test_hash, + .suite = { + .hash = __VECS(polyval_tv_template) + } }, { .alg = "rfc3686(ctr(aes))", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 7179df0a39b65..b58e139303077 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -34944,4 +34944,175 @@ static const struct cipher_testvec aes_xctr_tv_template[] = { }; +/* + * Test vectors generated using https://github.com/google/hctr2 + * + * To ensure compatibility with RFC 8452, some tests were sourced from + * https://datatracker.ietf.org/doc/html/rfc8452 + */ +static const struct hash_testvec polyval_tv_template[] = { + { // From RFC 8452 + .key = "\x31\x07\x28\xd9\x91\x1f\x1f\x38" + "\x37\xb2\x43\x16\xc3\xfa\xb9\xa0", + .plaintext = "\x65\x78\x61\x6d\x70\x6c\x65\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x48\x65\x6c\x6c\x6f\x20\x77\x6f" + "\x72\x6c\x64\x00\x00\x00\x00\x00" + "\x38\x00\x00\x00\x00\x00\x00\x00" + "\x58\x00\x00\x00\x00\x00\x00\x00", + .digest = "\xad\x7f\xcf\x0b\x51\x69\x85\x16" + "\x62\x67\x2f\x3c\x5f\x95\x13\x8f", + .psize = 48, + .ksize = 16, + }, + { // From RFC 8452 + .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a" + "\xc5\xdb\xc6\x98\x7a\xda\x73\x77", + .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .psize = 16, + .ksize = 16, + }, + { // From RFC 8452 + .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a" + "\xc5\xdb\xc6\x98\x7a\xda\x73\x77", + .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x40\x00\x00\x00\x00\x00\x00\x00", + .digest = "\xeb\x93\xb7\x74\x09\x62\xc5\xe4" + "\x9d\x2a\x90\xa7\xdc\x5c\xec\x74", + .psize = 32, + .ksize = 16, + }, + { // From RFC 8452 + .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a" + "\xc5\xdb\xc6\x98\x7a\xda\x73\x77", + .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x02\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x03\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x80\x01\x00\x00\x00\x00\x00\x00", + .digest = "\x81\x38\x87\x46\xbc\x22\xd2\x6b" + "\x2a\xbc\x3d\xcb\x15\x75\x42\x22", + .psize = 64, + .ksize = 16, + }, + { // From RFC 8452 + .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a" + "\xc5\xdb\xc6\x98\x7a\xda\x73\x77", + .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x02\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x03\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x04\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x02\x00\x00\x00\x00\x00\x00", + .digest = "\x1e\x39\xb6\xd3\x34\x4d\x34\x8f" + "\x60\x44\xf8\x99\x35\xd1\xcf\x78", + .psize = 80, + .ksize = 16, + }, + { // From RFC 8452 + .key = "\xd9\xb3\x60\x27\x96\x94\x94\x1a" + "\xc5\xdb\xc6\x98\x7a\xda\x73\x77", + .plaintext = "\x01\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x02\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x03\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x04\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x05\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x08\x00\x00\x00\x00\x00\x00\x00" + "\x00\x02\x00\x00\x00\x00\x00\x00", + .digest = "\xff\xcd\x05\xd5\x77\x0f\x34\xad" + "\x92\x67\xf0\xa5\x99\x94\xb1\x5a", + .psize = 96, + .ksize = 16, + }, + { // Random ( 1) + .key = "\x90\xcc\xac\xee\xba\xd7\xd4\x68" + "\x98\xa6\x79\x70\xdf\x66\x15\x6c", + .plaintext = "", + .digest = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .psize = 0, + .ksize = 16, + }, + { // Random ( 1) + .key = "\xc1\x45\x71\xf0\x30\x07\x94\xe7" + "\x3a\xdd\xe4\xc6\x19\x2d\x02\xa2", + .plaintext = "\xc1\x5d\x47\xc7\x4c\x7c\x5e\x07" + "\x85\x14\x8f\x79\xcc\x73\x83\xf7" + "\x35\xb8\xcb\x73\x61\xf0\x53\x31" + "\xbf\x84\xde\xb6\xde\xaf\xb0\xb8" + "\xb7\xd9\x11\x91\x89\xfd\x1e\x4c" + "\x84\x4a\x1f\x2a\x87\xa4\xaf\x62" + "\x8d\x7d\x58\xf6\x43\x35\xfc\x53" + "\x8f\x1a\xf6\x12\xe1\x13\x3f\x66" + "\x91\x4b\x13\xd6\x45\xfb\xb0\x7a" + "\xe0\x8b\x8e\x99\xf7\x86\x46\x37" + "\xd1\x22\x9e\x52\xf3\x3f\xd9\x75" + "\x2c\x2c\xc6\xbb\x0e\x08\x14\x29" + "\xe8\x50\x2f\xd8\xbe\xf4\xe9\x69" + "\x4a\xee\xf7\xae\x15\x65\x35\x1e", + .digest = "\x00\x4f\x5d\xe9\x3b\xc0\xd6\x50" + "\x3e\x38\x73\x86\xc6\xda\xca\x7f", + .psize = 112, + .ksize = 16, + }, + { // Random ( 1) + .key = "\x37\xbe\x68\x16\x50\xb9\x4e\xb0" + "\x47\xde\xe2\xbd\xde\xe4\x48\x09", + .plaintext = "\x87\xfc\x68\x9f\xff\xf2\x4a\x1e" + "\x82\x3b\x73\x8f\xc1\xb2\x1b\x7a" + "\x6c\x4f\x81\xbc\x88\x9b\x6c\xa3" + "\x9c\xc2\xa5\xbc\x14\x70\x4c\x9b" + "\x0c\x9f\x59\x92\x16\x4b\x91\x3d" + "\x18\x55\x22\x68\x12\x8c\x63\xb2" + "\x51\xcb\x85\x4b\xd2\xae\x0b\x1c" + "\x5d\x28\x9d\x1d\xb1\xc8\xf0\x77" + "\xe9\xb5\x07\x4e\x06\xc8\xee\xf8" + "\x1b\xed\x72\x2a\x55\x7d\x16\xc9" + "\xf2\x54\xe7\xe9\xe0\x44\x5b\x33" + "\xb1\x49\xee\xff\x43\xfb\x82\xcd" + "\x4a\x70\x78\x81\xa4\x34\x36\xe8" + "\x4c\x28\x54\xa6\x6c\xc3\x6b\x78" + "\xe7\xc0\x5d\xc6\x5d\x81\xab\x70" + "\x08\x86\xa1\xfd\xf4\x77\x55\xfd" + "\xa3\xe9\xe2\x1b\xdf\x99\xb7\x80" + "\xf9\x0a\x4f\x72\x4a\xd3\xaf\xbb" + "\xb3\x3b\xeb\x08\x58\x0f\x79\xce" + "\xa5\x99\x05\x12\x34\xd4\xf4\x86" + "\x37\x23\x1d\xc8\x49\xc0\x92\xae" + "\xa6\xac\x9b\x31\x55\xed\x15\xc6" + "\x05\x17\x37\x8d\x90\x42\xe4\x87" + "\x89\x62\x88\x69\x1c\x6a\xfd\xe3" + "\x00\x2b\x47\x1a\x73\xc1\x51\xc2" + "\xc0\x62\x74\x6a\x9e\xb2\xe5\x21" + "\xbe\x90\xb5\xb0\x50\xca\x88\x68" + "\xe1\x9d\x7a\xdf\x6c\xb7\xb9\x98" + "\xee\x28\x62\x61\x8b\xd1\x47\xf9" + "\x04\x7a\x0b\x5d\xcd\x2b\x65\xf5" + "\x12\xa3\xfe\x1a\xaa\x2c\x78\x42" + "\xb8\xbe\x7d\x74\xeb\x59\xba\xba", + .digest = "\xae\x11\xd4\x60\x2a\x5f\x9e\x42" + "\x89\x04\xc2\x34\x8d\x55\x94\x0a", + .psize = 256, + .ksize = 16, + }, + +}; + #endif /* _CRYPTO_TESTMGR_H */ diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h new file mode 100644 index 0000000000000..b14c38aa91663 --- /dev/null +++ b/include/crypto/polyval.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Polyval hash algorithm + * + * Copyright 2021 Google LLC + */ + +#ifndef _CRYPTO_POLYVAL_H +#define _CRYPTO_POLYVAL_H + +#include +#include + +#define POLYVAL_BLOCK_SIZE 16 +#define POLYVAL_DIGEST_SIZE 16 + +#endif From 7ff554ced7c7d7cf77586e07474e8633e011e2d0 Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 20 May 2022 18:14:55 +0000 Subject: [PATCH 07/89] crypto: hctr2 - Add HCTR2 support Add support for HCTR2 as a template. HCTR2 is a length-preserving encryption mode that is efficient on processors with instructions to accelerate AES and carryless multiplication, e.g. x86 processors with AES-NI and CLMUL, and ARM processors with the ARMv8 Crypto Extensions. As a length-preserving encryption mode, HCTR2 is suitable for applications such as storage encryption where ciphertext expansion is not possible, and thus authenticated encryption cannot be used. Currently, such applications usually use XTS, or in some cases Adiantum. XTS has the disadvantage that it is a narrow-block mode: a bitflip will only change 16 bytes in the resulting ciphertext or plaintext. This reveals more information to an attacker than necessary. HCTR2 is a wide-block mode, so it provides a stronger security property: a bitflip will change the entire message. HCTR2 is somewhat similar to Adiantum, which is also a wide-block mode. However, HCTR2 is designed to take advantage of existing crypto instructions, while Adiantum targets devices without such hardware support. Adiantum is also designed with longer messages in mind, while HCTR2 is designed to be efficient even on short messages. HCTR2 requires POLYVAL and XCTR as components. More information on HCTR2 can be found here: "Length-preserving encryption with HCTR2": https://eprint.iacr.org/2021/1441.pdf Signed-off-by: Nathan Huckleberry Reviewed-by: Ard Biesheuvel Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/Kconfig | 11 + crypto/Makefile | 1 + crypto/hctr2.c | 581 ++++++++++++++++++++++++++++++++++++++++ crypto/tcrypt.c | 5 + crypto/testmgr.c | 8 + crypto/testmgr.h | 672 +++++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 1278 insertions(+) create mode 100644 crypto/hctr2.c diff --git a/crypto/Kconfig b/crypto/Kconfig index d59e70dca197f..0601a2d2feeff 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -532,6 +532,17 @@ config CRYPTO_ADIANTUM If unsure, say N. +config CRYPTO_HCTR2 + tristate "HCTR2 support" + select CRYPTO_XCTR + select CRYPTO_POLYVAL + select CRYPTO_MANAGER + help + HCTR2 is a length-preserving encryption mode for storage encryption that + is efficient on processors with instructions to accelerate AES and + carryless multiplication, e.g. x86 processors with AES-NI and CLMUL, and + ARM processors with the ARMv8 crypto extensions. + config CRYPTO_ESSIV tristate "ESSIV support for block encryption" select CRYPTO_AUTHENC diff --git a/crypto/Makefile b/crypto/Makefile index 7694ed0a44d5c..3bbc0dd491608 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -95,6 +95,7 @@ obj-$(CONFIG_CRYPTO_LRW) += lrw.o obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o obj-$(CONFIG_CRYPTO_XCTR) += xctr.o +obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o diff --git a/crypto/hctr2.c b/crypto/hctr2.c new file mode 100644 index 0000000000000..7d00a3bcb6670 --- /dev/null +++ b/crypto/hctr2.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * HCTR2 length-preserving encryption mode + * + * Copyright 2021 Google LLC + */ + + +/* + * HCTR2 is a length-preserving encryption mode that is efficient on + * processors with instructions to accelerate AES and carryless + * multiplication, e.g. x86 processors with AES-NI and CLMUL, and ARM + * processors with the ARMv8 crypto extensions. + * + * For more details, see the paper: "Length-preserving encryption with HCTR2" + * (https://eprint.iacr.org/2021/1441.pdf) + */ + +#include +#include +#include +#include +#include +#include + +#define BLOCKCIPHER_BLOCK_SIZE 16 + +/* + * The specification allows variable-length tweaks, but Linux's crypto API + * currently only allows algorithms to support a single length. The "natural" + * tweak length for HCTR2 is 16, since that fits into one POLYVAL block for + * the best performance. But longer tweaks are useful for fscrypt, to avoid + * needing to derive per-file keys. So instead we use two blocks, or 32 bytes. + */ +#define TWEAK_SIZE 32 + +struct hctr2_instance_ctx { + struct crypto_cipher_spawn blockcipher_spawn; + struct crypto_skcipher_spawn xctr_spawn; + struct crypto_shash_spawn polyval_spawn; +}; + +struct hctr2_tfm_ctx { + struct crypto_cipher *blockcipher; + struct crypto_skcipher *xctr; + struct crypto_shash *polyval; + u8 L[BLOCKCIPHER_BLOCK_SIZE]; + int hashed_tweak_offset; + /* + * This struct is allocated with extra space for two exported hash + * states. Since the hash state size is not known at compile-time, we + * can't add these to the struct directly. + * + * hashed_tweaklen_divisible; + * hashed_tweaklen_remainder; + */ +}; + +struct hctr2_request_ctx { + u8 first_block[BLOCKCIPHER_BLOCK_SIZE]; + u8 xctr_iv[BLOCKCIPHER_BLOCK_SIZE]; + struct scatterlist *bulk_part_dst; + struct scatterlist *bulk_part_src; + struct scatterlist sg_src[2]; + struct scatterlist sg_dst[2]; + /* + * Sub-request sizes are unknown at compile-time, so they need to go + * after the members with known sizes. + */ + union { + struct shash_desc hash_desc; + struct skcipher_request xctr_req; + } u; + /* + * This struct is allocated with extra space for one exported hash + * state. Since the hash state size is not known at compile-time, we + * can't add it to the struct directly. + * + * hashed_tweak; + */ +}; + +static inline u8 *hctr2_hashed_tweaklen(const struct hctr2_tfm_ctx *tctx, + bool has_remainder) +{ + u8 *p = (u8 *)tctx + sizeof(*tctx); + + if (has_remainder) /* For messages not a multiple of block length */ + p += crypto_shash_statesize(tctx->polyval); + return p; +} + +static inline u8 *hctr2_hashed_tweak(const struct hctr2_tfm_ctx *tctx, + struct hctr2_request_ctx *rctx) +{ + return (u8 *)rctx + tctx->hashed_tweak_offset; +} + +/* + * The input data for each HCTR2 hash step begins with a 16-byte block that + * contains the tweak length and a flag that indicates whether the input is evenly + * divisible into blocks. Since this implementation only supports one tweak + * length, we precompute the two hash states resulting from hashing the two + * possible values of this initial block. This reduces by one block the amount of + * data that needs to be hashed for each encryption/decryption + * + * These precomputed hashes are stored in hctr2_tfm_ctx. + */ +static int hctr2_hash_tweaklen(struct hctr2_tfm_ctx *tctx, bool has_remainder) +{ + SHASH_DESC_ON_STACK(shash, tfm->polyval); + __le64 tweak_length_block[2]; + int err; + + shash->tfm = tctx->polyval; + memset(tweak_length_block, 0, sizeof(tweak_length_block)); + + tweak_length_block[0] = cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder); + err = crypto_shash_init(shash); + if (err) + return err; + err = crypto_shash_update(shash, (u8 *)tweak_length_block, + POLYVAL_BLOCK_SIZE); + if (err) + return err; + return crypto_shash_export(shash, hctr2_hashed_tweaklen(tctx, has_remainder)); +} + +static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + u8 hbar[BLOCKCIPHER_BLOCK_SIZE]; + int err; + + crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(tctx->blockcipher, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(tctx->blockcipher, key, keylen); + if (err) + return err; + + crypto_skcipher_clear_flags(tctx->xctr, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(tctx->xctr, + crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(tctx->xctr, key, keylen); + if (err) + return err; + + memset(hbar, 0, sizeof(hbar)); + crypto_cipher_encrypt_one(tctx->blockcipher, hbar, hbar); + + memset(tctx->L, 0, sizeof(tctx->L)); + tctx->L[0] = 0x01; + crypto_cipher_encrypt_one(tctx->blockcipher, tctx->L, tctx->L); + + crypto_shash_clear_flags(tctx->polyval, CRYPTO_TFM_REQ_MASK); + crypto_shash_set_flags(tctx->polyval, crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_shash_setkey(tctx->polyval, hbar, BLOCKCIPHER_BLOCK_SIZE); + if (err) + return err; + memzero_explicit(hbar, sizeof(hbar)); + + return hctr2_hash_tweaklen(tctx, true) ?: hctr2_hash_tweaklen(tctx, false); +} + +static int hctr2_hash_tweak(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + struct shash_desc *hash_desc = &rctx->u.hash_desc; + int err; + bool has_remainder = req->cryptlen % POLYVAL_BLOCK_SIZE; + + hash_desc->tfm = tctx->polyval; + err = crypto_shash_import(hash_desc, hctr2_hashed_tweaklen(tctx, has_remainder)); + if (err) + return err; + err = crypto_shash_update(hash_desc, req->iv, TWEAK_SIZE); + if (err) + return err; + + // Store the hashed tweak, since we need it when computing both + // H(T || N) and H(T || V). + return crypto_shash_export(hash_desc, hctr2_hashed_tweak(tctx, rctx)); +} + +static int hctr2_hash_message(struct skcipher_request *req, + struct scatterlist *sgl, + u8 digest[POLYVAL_DIGEST_SIZE]) +{ + static const u8 padding[BLOCKCIPHER_BLOCK_SIZE] = { 0x1 }; + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + struct shash_desc *hash_desc = &rctx->u.hash_desc; + const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + struct sg_mapping_iter miter; + unsigned int remainder = bulk_len % BLOCKCIPHER_BLOCK_SIZE; + int i; + int err = 0; + int n = 0; + + sg_miter_start(&miter, sgl, sg_nents(sgl), + SG_MITER_FROM_SG | SG_MITER_ATOMIC); + for (i = 0; i < bulk_len; i += n) { + sg_miter_next(&miter); + n = min_t(unsigned int, miter.length, bulk_len - i); + err = crypto_shash_update(hash_desc, miter.addr, n); + if (err) + break; + } + sg_miter_stop(&miter); + + if (err) + return err; + + if (remainder) { + err = crypto_shash_update(hash_desc, padding, + BLOCKCIPHER_BLOCK_SIZE - remainder); + if (err) + return err; + } + return crypto_shash_final(hash_desc, digest); +} + +static int hctr2_finish(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + u8 digest[POLYVAL_DIGEST_SIZE]; + struct shash_desc *hash_desc = &rctx->u.hash_desc; + int err; + + // U = UU ^ H(T || V) + // or M = MM ^ H(T || N) + hash_desc->tfm = tctx->polyval; + err = crypto_shash_import(hash_desc, hctr2_hashed_tweak(tctx, rctx)); + if (err) + return err; + err = hctr2_hash_message(req, rctx->bulk_part_dst, digest); + if (err) + return err; + crypto_xor(rctx->first_block, digest, BLOCKCIPHER_BLOCK_SIZE); + + // Copy U (or M) into dst scatterlist + scatterwalk_map_and_copy(rctx->first_block, req->dst, + 0, BLOCKCIPHER_BLOCK_SIZE, 1); + return 0; +} + +static void hctr2_xctr_done(struct crypto_async_request *areq, + int err) +{ + struct skcipher_request *req = areq->data; + + if (!err) + err = hctr2_finish(req); + + skcipher_request_complete(req, err); +} + +static int hctr2_crypt(struct skcipher_request *req, bool enc) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct hctr2_request_ctx *rctx = skcipher_request_ctx(req); + u8 digest[POLYVAL_DIGEST_SIZE]; + int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE; + int err; + + // Requests must be at least one block + if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE) + return -EINVAL; + + // Copy M (or U) into a temporary buffer + scatterwalk_map_and_copy(rctx->first_block, req->src, + 0, BLOCKCIPHER_BLOCK_SIZE, 0); + + // Create scatterlists for N and V + rctx->bulk_part_src = scatterwalk_ffwd(rctx->sg_src, req->src, + BLOCKCIPHER_BLOCK_SIZE); + rctx->bulk_part_dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, + BLOCKCIPHER_BLOCK_SIZE); + + // MM = M ^ H(T || N) + // or UU = U ^ H(T || V) + err = hctr2_hash_tweak(req); + if (err) + return err; + err = hctr2_hash_message(req, rctx->bulk_part_src, digest); + if (err) + return err; + crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE); + + // UU = E(MM) + // or MM = D(UU) + if (enc) + crypto_cipher_encrypt_one(tctx->blockcipher, rctx->first_block, + digest); + else + crypto_cipher_decrypt_one(tctx->blockcipher, rctx->first_block, + digest); + + // S = MM ^ UU ^ L + crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE); + crypto_xor_cpy(rctx->xctr_iv, digest, tctx->L, BLOCKCIPHER_BLOCK_SIZE); + + // V = XCTR(S, N) + // or N = XCTR(S, V) + skcipher_request_set_tfm(&rctx->u.xctr_req, tctx->xctr); + skcipher_request_set_crypt(&rctx->u.xctr_req, rctx->bulk_part_src, + rctx->bulk_part_dst, bulk_len, + rctx->xctr_iv); + skcipher_request_set_callback(&rctx->u.xctr_req, + req->base.flags, + hctr2_xctr_done, req); + return crypto_skcipher_encrypt(&rctx->u.xctr_req) ?: + hctr2_finish(req); +} + +static int hctr2_encrypt(struct skcipher_request *req) +{ + return hctr2_crypt(req, true); +} + +static int hctr2_decrypt(struct skcipher_request *req) +{ + return hctr2_crypt(req, false); +} + +static int hctr2_init_tfm(struct crypto_skcipher *tfm) +{ + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst); + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + struct crypto_skcipher *xctr; + struct crypto_cipher *blockcipher; + struct crypto_shash *polyval; + unsigned int subreq_size; + int err; + + xctr = crypto_spawn_skcipher(&ictx->xctr_spawn); + if (IS_ERR(xctr)) + return PTR_ERR(xctr); + + blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn); + if (IS_ERR(blockcipher)) { + err = PTR_ERR(blockcipher); + goto err_free_xctr; + } + + polyval = crypto_spawn_shash(&ictx->polyval_spawn); + if (IS_ERR(polyval)) { + err = PTR_ERR(polyval); + goto err_free_blockcipher; + } + + tctx->xctr = xctr; + tctx->blockcipher = blockcipher; + tctx->polyval = polyval; + + BUILD_BUG_ON(offsetofend(struct hctr2_request_ctx, u) != + sizeof(struct hctr2_request_ctx)); + subreq_size = max(sizeof_field(struct hctr2_request_ctx, u.hash_desc) + + crypto_shash_descsize(polyval), + sizeof_field(struct hctr2_request_ctx, u.xctr_req) + + crypto_skcipher_reqsize(xctr)); + + tctx->hashed_tweak_offset = offsetof(struct hctr2_request_ctx, u) + + subreq_size; + crypto_skcipher_set_reqsize(tfm, tctx->hashed_tweak_offset + + crypto_shash_statesize(polyval)); + return 0; + +err_free_blockcipher: + crypto_free_cipher(blockcipher); +err_free_xctr: + crypto_free_skcipher(xctr); + return err; +} + +static void hctr2_exit_tfm(struct crypto_skcipher *tfm) +{ + struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); + + crypto_free_cipher(tctx->blockcipher); + crypto_free_skcipher(tctx->xctr); + crypto_free_shash(tctx->polyval); +} + +static void hctr2_free_instance(struct skcipher_instance *inst) +{ + struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst); + + crypto_drop_cipher(&ictx->blockcipher_spawn); + crypto_drop_skcipher(&ictx->xctr_spawn); + crypto_drop_shash(&ictx->polyval_spawn); + kfree(inst); +} + +static int hctr2_create_common(struct crypto_template *tmpl, + struct rtattr **tb, + const char *xctr_name, + const char *polyval_name) +{ + u32 mask; + struct skcipher_instance *inst; + struct hctr2_instance_ctx *ictx; + struct skcipher_alg *xctr_alg; + struct crypto_alg *blockcipher_alg; + struct shash_alg *polyval_alg; + char blockcipher_name[CRYPTO_MAX_ALG_NAME]; + int len; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); + if (err) + return err; + + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + ictx = skcipher_instance_ctx(inst); + + /* Stream cipher, xctr(block_cipher) */ + err = crypto_grab_skcipher(&ictx->xctr_spawn, + skcipher_crypto_instance(inst), + xctr_name, 0, mask); + if (err) + goto err_free_inst; + xctr_alg = crypto_spawn_skcipher_alg(&ictx->xctr_spawn); + + err = -EINVAL; + if (strncmp(xctr_alg->base.cra_name, "xctr(", 5)) + goto err_free_inst; + len = strscpy(blockcipher_name, xctr_alg->base.cra_name + 5, + sizeof(blockcipher_name)); + if (len < 1) + goto err_free_inst; + if (blockcipher_name[len - 1] != ')') + goto err_free_inst; + blockcipher_name[len - 1] = 0; + + /* Block cipher, e.g. "aes" */ + err = crypto_grab_cipher(&ictx->blockcipher_spawn, + skcipher_crypto_instance(inst), + blockcipher_name, 0, mask); + if (err) + goto err_free_inst; + blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn); + + /* Require blocksize of 16 bytes */ + err = -EINVAL; + if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE) + goto err_free_inst; + + /* Polyval ε-∆U hash function */ + err = crypto_grab_shash(&ictx->polyval_spawn, + skcipher_crypto_instance(inst), + polyval_name, 0, mask); + if (err) + goto err_free_inst; + polyval_alg = crypto_spawn_shash_alg(&ictx->polyval_spawn); + + /* Ensure Polyval is being used */ + err = -EINVAL; + if (strcmp(polyval_alg->base.cra_name, "polyval") != 0) + goto err_free_inst; + + /* Instance fields */ + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "hctr2(%s)", + blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "hctr2_base(%s,%s)", + xctr_alg->base.cra_driver_name, + polyval_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE; + inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx) + + polyval_alg->statesize * 2; + inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask | + polyval_alg->base.cra_alignmask; + /* + * The hash function is called twice, so it is weighted higher than the + * xctr and blockcipher. + */ + inst->alg.base.cra_priority = (2 * xctr_alg->base.cra_priority + + 4 * polyval_alg->base.cra_priority + + blockcipher_alg->cra_priority) / 7; + + inst->alg.setkey = hctr2_setkey; + inst->alg.encrypt = hctr2_encrypt; + inst->alg.decrypt = hctr2_decrypt; + inst->alg.init = hctr2_init_tfm; + inst->alg.exit = hctr2_exit_tfm; + inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(xctr_alg); + inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(xctr_alg); + inst->alg.ivsize = TWEAK_SIZE; + + inst->free = hctr2_free_instance; + + err = skcipher_register_instance(tmpl, inst); + if (err) { +err_free_inst: + hctr2_free_instance(inst); + } + return err; +} + +static int hctr2_create_base(struct crypto_template *tmpl, struct rtattr **tb) +{ + const char *xctr_name; + const char *polyval_name; + + xctr_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(xctr_name)) + return PTR_ERR(xctr_name); + + polyval_name = crypto_attr_alg_name(tb[2]); + if (IS_ERR(polyval_name)) + return PTR_ERR(polyval_name); + + return hctr2_create_common(tmpl, tb, xctr_name, polyval_name); +} + +static int hctr2_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + const char *blockcipher_name; + char xctr_name[CRYPTO_MAX_ALG_NAME]; + + blockcipher_name = crypto_attr_alg_name(tb[1]); + if (IS_ERR(blockcipher_name)) + return PTR_ERR(blockcipher_name); + + if (snprintf(xctr_name, CRYPTO_MAX_ALG_NAME, "xctr(%s)", + blockcipher_name) >= CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + return hctr2_create_common(tmpl, tb, xctr_name, "polyval"); +} + +static struct crypto_template hctr2_tmpls[] = { + { + /* hctr2_base(xctr_name, polyval_name) */ + .name = "hctr2_base", + .create = hctr2_create_base, + .module = THIS_MODULE, + }, { + /* hctr2(blockcipher_name) */ + .name = "hctr2", + .create = hctr2_create, + .module = THIS_MODULE, + } +}; + +static int __init hctr2_module_init(void) +{ + return crypto_register_templates(hctr2_tmpls, ARRAY_SIZE(hctr2_tmpls)); +} + +static void __exit hctr2_module_exit(void) +{ + return crypto_unregister_templates(hctr2_tmpls, + ARRAY_SIZE(hctr2_tmpls)); +} + +subsys_initcall(hctr2_module_init); +module_exit(hctr2_module_exit); + +MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("hctr2"); +MODULE_IMPORT_NS(CRYPTO_INTERNAL); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index dd9cf216029b7..336598da8eac1 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -2191,6 +2191,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) 16, 16, aead_speed_template_19, num_mb); break; + case 226: + test_cipher_speed("hctr2(aes)", ENCRYPT, sec, NULL, + 0, speed_template_32); + break; + case 300: if (alg) { test_hash_speed(alg, sec, generic_hash_speed_template); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 0f40e260b5a97..4850974610685 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5088,6 +5088,14 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(ghash_tv_template) } + }, { + .alg = "hctr2(aes)", + .generic_driver = + "hctr2_base(xctr(aes-generic),polyval-generic)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aes_hctr2_tv_template) + } }, { .alg = "hmac(md5)", .test = alg_test_hash, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index b58e139303077..808ba07baa041 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -35115,4 +35115,676 @@ static const struct hash_testvec polyval_tv_template[] = { }; +/* + * Test vectors generated using https://github.com/google/hctr2 + */ +static const struct cipher_testvec aes_hctr2_tv_template[] = { + { + .key = "\xe1\x15\x66\x3c\x8d\xc6\x3a\xff" + "\xef\x41\xd7\x47\xa2\xcc\x8a\xba", + .iv = "\xc3\xbe\x2a\xcb\xb5\x39\x86\xf1" + "\x91\xad\x6c\xf4\xde\x74\x45\x63" + "\x5c\x7a\xd5\xcc\x8b\x76\xef\x0e" + "\xcf\x2c\x60\x69\x37\xfd\x07\x96", + .ptext = "\x65\x75\xae\xd3\xe2\xbc\x43\x5c" + "\xb3\x1a\xd8\x05\xc3\xd0\x56\x29", + .ctext = "\x11\x91\xea\x74\x58\xcc\xd5\xa2" + "\xd0\x55\x9e\x3d\xfe\x7f\xc8\xfe", + .klen = 16, + .len = 16, + }, + { + .key = "\xe7\xd1\x77\x48\x76\x0b\xcd\x34" + "\x2a\x2d\xe7\x74\xca\x11\x9c\xae", + .iv = "\x71\x1c\x49\x62\xd9\x5b\x50\x5e" + "\x68\x87\xbc\xf6\x89\xff\xed\x30" + "\xe4\xe5\xbd\xb6\x10\x4f\x9f\x66" + "\x28\x06\x5a\xf4\x27\x35\xcd\xe5", + .ptext = "\x87\x03\x8f\x06\xa8\x61\x54\xda" + "\x01\x45\xd4\x01\xef\x4a\x22\xcf" + "\x78\x15\x9f\xbd\x64\xbd\x2c\xb9" + "\x40\x1d\x72\xae\x53\x63\xa5", + .ctext = "\x4e\xa1\x05\x27\xb8\x45\xe4\xa1" + "\xbb\x30\xb4\xa6\x12\x74\x63\xd6" + "\x17\xc9\xcc\x2f\x18\x64\xe0\x06" + "\x0a\xa0\xff\x72\x10\x7b\x22", + .klen = 16, + .len = 31, + }, + { + .key = "\x59\x65\x3b\x1d\x43\x5e\xc0\xae" + "\xb8\x9d\x9b\xdd\x22\x03\xbf\xca", + .iv = "\xec\x95\xfa\x5a\xcf\x5e\xd2\x93" + "\xa3\xb5\xe5\xbe\xf3\x01\x7b\x01" + "\xd1\xca\x6c\x06\x82\xf0\xbd\x67" + "\xd9\x6c\xa4\xdc\xb4\x38\x0f\x74", + .ptext = "\x45\xdf\x75\x87\xbc\x72\xce\x55" + "\xc9\xfa\xcb\xfc\x9f\x40\x82\x2b" + "\xc6\x4f\x4f\x5b\x8b\x3b\x6d\x67" + "\xa6\x93\x62\x89\x8c\x19\xf4\xe3" + "\x08\x92\x9c\xc9\x47\x2c\x6e\xd0" + "\xa3\x02\x2b\xdb\x2c\xf2\x8d\x46" + "\xcd\xb0\x9d\x26\x63\x4c\x40\x6b" + "\x79\x43\xe5\xce\x42\xa8\xec\x3b" + "\x5b\xd0\xea\xa4\xe6\xdb\x66\x55" + "\x7a\x76\xec\xab\x7d\x2a\x2b\xbd" + "\xa9\xab\x22\x64\x1a\xa1\xae\x84" + "\x86\x79\x67\xe9\xb2\x50\xbe\x12" + "\x2f\xb2\x14\xf0\xdb\x71\xd8\xa7" + "\x41\x8a\x88\xa0\x6a\x6e\x9d\x2a" + "\xfa\x11\x37\x40\x32\x09\x4c\x47" + "\x41\x07\x31\x85\x3d\xa8\xf7\x64", + .ctext = "\x2d\x4b\x9f\x93\xca\x5a\x48\x26" + "\x01\xcc\x54\xe4\x31\x50\x12\xf0" + "\x49\xff\x59\x42\x68\xbd\x87\x8f" + "\x9e\x62\x96\xcd\xb9\x24\x57\xa4" + "\x0b\x7b\xf5\x2e\x0e\xa8\x65\x07" + "\xab\x05\xd5\xca\xe7\x9c\x6c\x34" + "\x5d\x42\x34\xa4\x62\xe9\x75\x48" + "\x3d\x9e\x8f\xfa\x42\xe9\x75\x08" + "\x4e\x54\x91\x2b\xbd\x11\x0f\x8e" + "\xf0\x82\xf5\x24\xf1\xc4\xfc\xae" + "\x42\x54\x7f\xce\x15\xa8\xb2\x33" + "\xc0\x86\xb6\x2b\xe8\x44\xce\x1f" + "\x68\x57\x66\x94\x6e\xad\xeb\xf3" + "\x30\xf8\x11\xbd\x60\x00\xc6\xd5" + "\x4c\x81\xf1\x20\x2b\x4a\x5b\x99" + "\x79\x3b\xc9\x5c\x74\x23\xe6\x5d", + .klen = 16, + .len = 128, + }, + { + .key = "\x3e\x08\x5d\x64\x6c\x98\xec\xec" + "\x70\x0e\x0d\xa1\x41\x20\x99\x82", + .iv = "\x11\xb7\x77\x91\x0d\x99\xd9\x8d" + "\x35\x3a\xf7\x14\x6b\x09\x37\xe5" + "\xad\x51\xf6\xc3\x96\x4b\x64\x56" + "\xa8\xbd\x81\xcc\xbe\x94\xaf\xe4", + .ptext = "\xff\x8d\xb9\xc0\xe3\x69\xb3\xb2" + "\x8b\x11\x26\xb3\x11\xec\xfb\xb9" + "\x9c\xc1\x71\xd6\xe3\x26\x0e\xe0" + "\x68\x40\x60\xb9\x3a\x63\x56\x8a" + "\x9e\xc1\xf0\x10\xb1\x64\x32\x70" + "\xf8\xcd\xc6\xc4\x49\x4c\xe1\xce" + "\xf3\xe1\x03\xf8\x35\xae\xe0\x5e" + "\xef\x5f\xbc\x41\x75\x26\x13\xcc" + "\x37\x85\xdf\xc0\x5d\xa6\x47\x98" + "\xf1\x97\x52\x58\x04\xe6\xb5\x01" + "\xc0\xb8\x17\x6d\x74\xbd\x9a\xdf" + "\xa4\x37\x94\x86\xb0\x13\x83\x28" + "\xc9\xa2\x07\x3f\xb5\xb2\x72\x40" + "\x0e\x60\xdf\x57\x07\xb7\x2c\x66" + "\x10\x3f\x8d\xdd\x30\x0a\x47\xd5" + "\xe8\x9d\xfb\xa1\xaf\x53\xd7\x05" + "\xc7\xd2\xba\xe7\x2c\xa0\xbf\xb8" + "\xd1\x93\xe7\x41\x82\xa3\x41\x3a" + "\xaf\x12\xd6\xf8\x34\xda\x92\x46" + "\xad\xa2\x2f\xf6\x7e\x46\x96\xd8" + "\x03\xf3\x49\x64\xde\xd8\x06\x8b" + "\xa0\xbc\x63\x35\x38\xb6\x6b\xda" + "\x5b\x50\x3f\x13\xa5\x84\x1b\x1b" + "\x66\x89\x95\xb7\xc2\x16\x3c\xe9" + "\x24\xb0\x8c\x6f\x49\xef\xf7\x28" + "\x6a\x24\xfd\xbe\x25\xe2\xb4\x90" + "\x77\x44\x08\xb8\xda\xd2\xde\x2c" + "\xa0\x57\x45\x57\x29\x47\x6b\x89" + "\x4a\xf6\xa7\x2a\xc3\x9e\x7b\xc8" + "\xfd\x9f\x89\xab\xee\x6d\xa3\xb4" + "\x23\x90\x7a\xe9\x89\xa0\xc7\xb3" + "\x17\x41\x87\x91\xfc\x97\x42", + .ctext = "\xfc\x9b\x96\x66\xc4\x82\x2a\x4a" + "\xb1\x24\xba\xc7\x78\x5f\x79\xc1" + "\x57\x2e\x47\x29\x4d\x7b\xd2\x9a" + "\xbd\xc6\xc1\x26\x7b\x8e\x3f\x5d" + "\xd4\xb4\x9f\x6a\x02\x24\x4a\xad" + "\x0c\x00\x1b\xdf\x92\xc5\x8a\xe1" + "\x77\x79\xcc\xd5\x20\xbf\x83\xf4" + "\x4b\xad\x11\xbf\xdb\x47\x65\x70" + "\x43\xf3\x65\xdf\xb7\xdc\xb2\xb9" + "\xaa\x3f\xb3\xdf\x79\x69\x0d\xa0" + "\x86\x1c\xba\x48\x0b\x01\xc1\x88" + "\xdf\x03\xb1\x06\x3c\x1d\x56\xa1" + "\x8e\x98\xc1\xa6\x95\xa2\x5b\x72" + "\x76\x59\xd2\x26\x25\xcd\xef\x7c" + "\xc9\x60\xea\x43\xd1\x12\x8a\x8a" + "\x63\x12\x78\xcb\x2f\x88\x1e\x88" + "\x78\x59\xde\xba\x4d\x2c\x78\x61" + "\x75\x37\x54\xfd\x80\xc7\x5e\x98" + "\xcf\x14\x62\x8e\xfb\x72\xee\x4d" + "\x9f\xaf\x8b\x09\xe5\x21\x0a\x91" + "\x8f\x88\x87\xd5\xb1\x84\xab\x18" + "\x08\x57\xed\x72\x35\xa6\x0e\xc6" + "\xff\xcb\xfe\x2c\x48\x39\x14\x44" + "\xba\x59\x32\x3a\x2d\xc4\x5f\xcb" + "\xbe\x68\x8e\x7b\xee\x21\xa4\x32" + "\x11\xa0\x99\xfd\x90\xde\x59\x43" + "\xeb\xed\xd5\x87\x68\x46\xc6\xde" + "\x0b\x07\x17\x59\x6a\xab\xca\x15" + "\x65\x02\x01\xb6\x71\x8c\x3b\xaa" + "\x18\x3b\x30\xae\x38\x5b\x2c\x74" + "\xd4\xee\x4a\xfc\xf7\x1b\x09\xd4" + "\xda\x8b\x1d\x5d\x6f\x21\x6c", + .klen = 16, + .len = 255, + }, + { + .key = "\x24\xf6\xe1\x62\xe5\xaf\x99\xda" + "\x84\xec\x41\xb0\xa3\x0b\xd5\xa8" + "\xa0\x3e\x7b\xa6\xdd\x6c\x8f\xa8", + .iv = "\x7f\x80\x24\x62\x32\xdd\xab\x66" + "\xf2\x87\x29\x24\xec\xd2\x4b\x9f" + "\x0c\x33\x52\xd9\xe0\xcc\x6e\xe4" + "\x90\x85\x43\x97\xc4\x62\x14\x33", + .ptext = "\xef\x58\xe7\x7f\xa9\xd9\xb8\xd7" + "\xa2\x91\x97\x07\x27\x9e\xba\xe8" + "\xaa", + .ctext = "\xd7\xc3\x81\x91\xf2\x40\x17\x73" + "\x3e\x3b\x1c\x2a\x8e\x11\x9c\x17" + "\xf1", + .klen = 24, + .len = 17, + }, + { + .key = "\xbf\xaf\xd7\x67\x8c\x47\xcf\x21" + "\x8a\xa5\xdd\x32\x25\x47\xbe\x4f" + "\xf1\x3a\x0b\xa6\xaa\x2d\xcf\x09", + .iv = "\xd9\xe8\xf0\x92\x4e\xfc\x1d\xf2" + "\x81\x37\x7c\x8f\xf1\x59\x09\x20" + "\xf4\x46\x51\x86\x4f\x54\x8b\x32" + "\x58\xd1\x99\x8b\x8c\x03\xeb\x5d", + .ptext = "\xcd\x64\x90\xf9\x7c\xe5\x0e\x5a" + "\x75\xe7\x8e\x39\x86\xec\x20\x43" + "\x8a\x49\x09\x15\x47\xf4\x3c\x89" + "\x21\xeb\xcf\x4e\xcf\x91\xb5\x40" + "\xcd\xe5\x4d\x5c\x6f\xf2\xd2\x80" + "\xfa\xab\xb3\x76\x9f\x7f\x84\x0a", + .ctext = "\x44\x98\x64\x15\xb7\x0b\x80\xa3" + "\xb9\xca\x23\xff\x3b\x0b\x68\x74" + "\xbb\x3e\x20\x19\x9f\x28\x71\x2a" + "\x48\x3c\x7c\xe2\xef\xb5\x10\xac" + "\x82\x9f\xcd\x08\x8f\x6b\x16\x6f" + "\xc3\xbb\x07\xfb\x3c\xb0\x1b\x27", + .klen = 24, + .len = 48, + }, + { + .key = "\xb8\x35\xa2\x5f\x86\xbb\x82\x99" + "\x27\xeb\x01\x3f\x92\xaf\x80\x24" + "\x4c\x66\xa2\x89\xff\x2e\xa2\x25", + .iv = "\x0a\x1d\x96\xd3\xe0\xe8\x0c\x9b" + "\x9d\x6f\x21\x97\xc2\x17\xdb\x39" + "\x3f\xd8\x64\x48\x80\x04\xee\x43" + "\x02\xce\x88\xe2\x81\x81\x5f\x81", + .ptext = "\xb8\xf9\x16\x8b\x25\x68\xd0\x9c" + "\xd2\x28\xac\xa8\x79\xc2\x30\xc1" + "\x31\xde\x1c\x37\x1b\xa2\xb5\xe6" + "\xf0\xd0\xf8\x9c\x7f\xc6\x46\x07" + "\x5c\xc3\x06\xe4\xf0\x02\xec\xf8" + "\x59\x7c\xc2\x5d\xf8\x0c\x21\xae" + "\x9e\x82\xb1\x1a\x5f\x78\x44\x15" + "\x00\xa7\x2e\x52\xc5\x98\x98\x35" + "\x03\xae\xd0\x8e\x07\x57\xe2\x5a" + "\x17\xbf\x52\x40\x54\x5b\x74\xe5" + "\x2d\x35\xaf\x9e\x37\xf7\x7e\x4a" + "\x8c\x9e\xa1\xdc\x40\xb4\x5b\x36" + "\xdc\x3a\x68\xe6\xb7\x35\x0b\x8a" + "\x90\xec\x74\x8f\x09\x9a\x7f\x02" + "\x4d\x03\x46\x35\x62\xb1\xbd\x08" + "\x3f\x54\x2a\x10\x0b\xdc\x69\xaf" + "\x25\x3a\x0c\x5f\xe0\x51\xe7\x11" + "\xb7\x00\xab\xbb\x9a\xb0\xdc\x4d" + "\xc3\x7d\x1a\x6e\xd1\x09\x52\xbd" + "\x6b\x43\x55\x22\x3a\x78\x14\x7d" + "\x79\xfd\x8d\xfc\x9b\x1d\x0f\xa2" + "\xc7\xb9\xf8\x87\xd5\x96\x50\x61" + "\xa7\x5e\x1e\x57\x97\xe0\xad\x2f" + "\x93\xe6\xe8\x83\xec\x85\x26\x5e" + "\xd9\x2a\x15\xe0\xe9\x09\x25\xa1" + "\x77\x2b\x88\xdc\xa4\xa5\x48\xb6" + "\xf7\xcc\xa6\xa9\xba\xf3\x42\x5c" + "\x70\x9d\xe9\x29\xc1\xf1\x33\xdd" + "\x56\x48\x17\x86\x14\x51\x5c\x10" + "\xab\xfd\xd3\x26\x8c\x21\xf5\x93" + "\x1b\xeb\x47\x97\x73\xbb\x88\x10" + "\xf3\xfe\xf5\xde\xf3\x2e\x05\x46" + "\x1c\x0d\xa3\x10\x48\x9c\x71\x16" + "\x78\x33\x4d\x0a\x74\x3b\xe9\x34" + "\x0b\xa7\x0e\x9e\x61\xe9\xe9\xfd" + "\x85\xa0\xcb\x19\xfd\x7c\x33\xe3" + "\x0e\xce\xc2\x6f\x9d\xa4\x2d\x77" + "\xfd\xad\xee\x5e\x08\x3e\xd7\xf5" + "\xfb\xc3\xd7\x93\x96\x08\x96\xca" + "\x58\x81\x16\x9b\x98\x0a\xe2\xef" + "\x7f\xda\x40\xe4\x1f\x46\x9e\x67" + "\x2b\x84\xcb\x42\xc4\xd6\x6a\xcf" + "\x2d\xb2\x33\xc0\x56\xb3\x35\x6f" + "\x29\x36\x8f\x6a\x5b\xec\xd5\x4f" + "\xa0\x70\xff\xb6\x5b\xde\x6a\x93" + "\x20\x3c\xe2\x76\x7a\xef\x3c\x79" + "\x31\x65\xce\x3a\x0e\xd0\xbe\xa8" + "\x21\x95\xc7\x2b\x62\x8e\x67\xdd" + "\x20\x79\xe4\xe5\x01\x15\xc0\xec" + "\x0f\xd9\x23\xc8\xca\xdf\xd4\x7d" + "\x1d\xf8\x64\x4f\x56\xb1\x83\xa7" + "\x43\xbe\xfc\xcf\xc2\x8c\x33\xda" + "\x36\xd0\x52\xef\x9e\x9e\x88\xf4" + "\xa8\x21\x0f\xaa\xee\x8d\xa0\x24" + "\x4d\xcb\xb1\x72\x07\xf0\xc2\x06" + "\x60\x65\x85\x84\x2c\x60\xcf\x61" + "\xe7\x56\x43\x5b\x2b\x50\x74\xfa" + "\xdb\x4e\xea\x88\xd4\xb3\x83\x8f" + "\x6f\x97\x4b\x57\x7a\x64\x64\xae" + "\x0a\x37\x66\xc5\x03\xad\xb5\xf9" + "\x08\xb0\x3a\x74\xde\x97\x51\xff" + "\x48\x4f\x5c\xa4\xf8\x7a\xb4\x05" + "\x27\x70\x52\x86\x1b\x78\xfc\x18" + "\x06\x27\xa9\x62\xf7\xda\xd2\x8e", + .ctext = "\x3b\xe1\xdb\xb3\xc5\x9a\xde\x69" + "\x58\x05\xcc\xeb\x02\x51\x78\x4a" + "\xac\x28\xe9\xed\xd1\xc9\x15\x7d" + "\x33\x7d\xc1\x47\x12\x41\x11\xf8" + "\x4a\x2c\xb7\xa3\x41\xbe\x59\xf7" + "\x22\xdb\x2c\xda\x9c\x00\x61\x9b" + "\x73\xb3\x0b\x84\x2b\xc1\xf3\x80" + "\x84\xeb\x19\x60\x80\x09\xe1\xcd" + "\x16\x3a\x20\x23\xc4\x82\x4f\xba" + "\x3b\x8e\x55\xd7\xa9\x0b\x75\xd0" + "\xda\xce\xd2\xee\x7e\x4b\x7f\x65" + "\x4d\x28\xc5\xd3\x15\x2c\x40\x96" + "\x52\xd4\x18\x61\x2b\xe7\x83\xec" + "\x89\x62\x9c\x4c\x50\xe6\xe2\xbb" + "\x25\xa1\x0f\xa7\xb0\xb4\xb2\xde" + "\x54\x20\xae\xa3\x56\xa5\x26\x4c" + "\xd5\xcc\xe5\xcb\x28\x44\xb1\xef" + "\x67\x2e\x93\x6d\x00\x88\x83\x9a" + "\xf2\x1c\x48\x38\xec\x1a\x24\x90" + "\x73\x0a\xdb\xe8\xce\x95\x7a\x2c" + "\x8c\xe9\xb7\x07\x1d\xb3\xa3\x20" + "\xbe\xad\x61\x84\xac\xde\x76\xb5" + "\xa6\x28\x29\x47\x63\xc4\xfc\x13" + "\x3f\x71\xfb\x58\x37\x34\x82\xed" + "\x9e\x05\x19\x1f\xc1\x67\xc1\xab" + "\xf5\xfd\x7c\xea\xfa\xa4\xf8\x0a" + "\xac\x4c\x92\xdf\x65\x73\xd7\xdb" + "\xed\x2c\xe0\x84\x5f\x57\x8c\x76" + "\x3e\x05\xc0\xc3\x68\x96\x95\x0b" + "\x88\x97\xfe\x2e\x99\xd5\xc2\xb9" + "\x53\x9f\xf3\x32\x10\x1f\x1f\x5d" + "\xdf\x21\x95\x70\x91\xe8\xa1\x3e" + "\x19\x3e\xb6\x0b\xa8\xdb\xf8\xd4" + "\x54\x27\xb8\xab\x5d\x78\x0c\xe6" + "\xb7\x08\xee\xa4\xb6\x6b\xeb\x5a" + "\x89\x69\x2b\xbd\xd4\x21\x5b\xbf" + "\x79\xbb\x0f\xff\xdb\x23\x9a\xeb" + "\x8d\xf2\xc4\x39\xb4\x90\x77\x6f" + "\x68\xe2\xb8\xf3\xf1\x65\x4f\xd5" + "\x24\x80\x06\xaf\x7c\x8d\x15\x0c" + "\xfd\x56\xe5\xe3\x01\xa5\xf7\x1c" + "\x31\xd6\xa2\x01\x1e\x59\xf9\xa9" + "\x42\xd5\xc2\x34\xda\x25\xde\xc6" + "\x5d\x38\xef\xd1\x4c\xc1\xd9\x1b" + "\x98\xfd\xcd\x57\x6f\xfd\x46\x91" + "\x90\x3d\x52\x2b\x2c\x7d\xcf\x71" + "\xcf\xd1\x77\x23\x71\x36\xb1\xce" + "\xc7\x5d\xf0\x5b\x44\x3d\x43\x71" + "\xac\xb8\xa0\x6a\xea\x89\x5c\xff" + "\x81\x73\xd4\x83\xd1\xc9\xe9\xe2" + "\xa8\xa6\x0f\x36\xe6\xaa\x57\xd4" + "\x27\xd2\xc9\xda\x94\x02\x1f\xfb" + "\xe1\xa1\x07\xbe\xe1\x1b\x15\x94" + "\x1e\xac\x2f\x57\xbb\x41\x22\xaf" + "\x60\x5e\xcc\x66\xcb\x16\x62\xab" + "\xb8\x7c\x99\xf4\x84\x93\x0c\xc2" + "\xa2\x49\xe4\xfd\x17\x55\xe1\xa6" + "\x8d\x5b\xc6\x1b\xc8\xac\xec\x11" + "\x33\xcf\xb0\xe8\xc7\x28\x4f\xb2" + "\x5c\xa6\xe2\x71\xab\x80\x0a\xa7" + "\x5c\x59\x50\x9f\x7a\x32\xb7\xe5" + "\x24\x9a\x8e\x25\x21\x2e\xb7\x18" + "\xd0\xf2\xe7\x27\x6f\xda\xc1\x00" + "\xd9\xa6\x03\x59\xac\x4b\xcb\xba", + .klen = 24, + .len = 512, + }, + { + .key = "\x9e\xeb\xb2\x49\x3c\x1c\xf5\xf4" + "\x6a\x99\xc2\xc4\xdf\xb1\xf4\xdd" + "\x75\x20\x57\xea\x2c\x4f\xcd\xb2" + "\xa5\x3d\x7b\x49\x1e\xab\xfd\x0f", + .iv = "\xdf\x63\xd4\xab\xd2\x49\xf3\xd8" + "\x33\x81\x37\x60\x7d\xfa\x73\x08" + "\xd8\x49\x6d\x80\xe8\x2f\x62\x54" + "\xeb\x0e\xa9\x39\x5b\x45\x7f\x8a", + .ptext = "\x67\xc9\xf2\x30\x84\x41\x8e\x43" + "\xfb\xf3\xb3\x3e\x79\x36\x7f\xe8", + .ctext = "\x27\x38\x78\x47\x16\xd9\x71\x35" + "\x2e\x7e\xdd\x7e\x43\x3c\xb8\x40", + .klen = 32, + .len = 16, + }, + { + .key = "\x93\xfa\x7e\xe2\x0e\x67\xc4\x39" + "\xe7\xca\x47\x95\x68\x9d\x5e\x5a" + "\x7c\x26\x19\xab\xc6\xca\x6a\x4c" + "\x45\xa6\x96\x42\xae\x6c\xff\xe7", + .iv = "\xea\x82\x47\x95\x3b\x22\xa1\x3a" + "\x6a\xca\x24\x4c\x50\x7e\x23\xcd" + "\x0e\x50\xe5\x41\xb6\x65\x29\xd8" + "\x30\x23\x00\xd2\x54\xa7\xd6\x56", + .ptext = "\xdb\x1f\x1f\xec\xad\x83\x6e\x5d" + "\x19\xa5\xf6\x3b\xb4\x93\x5a\x57" + "\x6f", + .ctext = "\xf1\x46\x6e\x9d\xb3\x01\xf0\x6b" + "\xc2\xac\x57\x88\x48\x6d\x40\x72" + "\x68", + .klen = 32, + .len = 17, + }, + { + .key = "\x36\x2b\x57\x97\xf8\x5d\xcd\x99" + "\x5f\x1a\x5a\x44\x1d\x92\x0f\x27" + "\xcc\x16\xd7\x2b\x85\x63\x99\xd3" + "\xba\x96\xa1\xdb\xd2\x60\x68\xda", + .iv = "\xef\x58\x69\xb1\x2c\x5e\x9a\x47" + "\x24\xc1\xb1\x69\xe1\x12\x93\x8f" + "\x43\x3d\x6d\x00\xdb\x5e\xd8\xd9" + "\x12\x9a\xfe\xd9\xff\x2d\xaa\xc4", + .ptext = "\x5e\xa8\x68\x19\x85\x98\x12\x23" + "\x26\x0a\xcc\xdb\x0a\x04\xb9\xdf" + "\x4d\xb3\x48\x7b\xb0\xe3\xc8\x19" + "\x43\x5a\x46\x06\x94\x2d\xf2", + .ctext = "\xdb\xfd\xc8\x03\xd0\xec\xc1\xfe" + "\xbd\x64\x37\xb8\x82\x43\x62\x4e" + "\x7e\x54\xa3\xe2\x24\xa7\x27\xe8" + "\xa4\xd5\xb3\x6c\xb2\x26\xb4", + .klen = 32, + .len = 31, + }, + { + .key = "\x03\x65\x03\x6e\x4d\xe6\xe8\x4e" + "\x8b\xbe\x22\x19\x48\x31\xee\xd9" + "\xa0\x91\x21\xbe\x62\x89\xde\x78" + "\xd9\xb0\x36\xa3\x3c\xce\x43\xd5", + .iv = "\xa9\xc3\x4b\xe7\x0f\xfc\x6d\xbf" + "\x56\x27\x21\x1c\xfc\xd6\x04\x10" + "\x5f\x43\xe2\x30\x35\x29\x6c\x10" + "\x90\xf1\xbf\x61\xed\x0f\x8a\x91", + .ptext = "\x07\xaa\x02\x26\xb4\x98\x11\x5e" + "\x33\x41\x21\x51\x51\x63\x2c\x72" + "\x00\xab\x32\xa7\x1c\xc8\x3c\x9c" + "\x25\x0e\x8b\x9a\xdf\x85\xed\x2d" + "\xf4\xf2\xbc\x55\xca\x92\x6d\x22" + "\xfd\x22\x3b\x42\x4c\x0b\x74\xec", + .ctext = "\x7b\xb1\x43\x6d\xd8\x72\x6c\xf6" + "\x67\x6a\x00\xc4\xf1\xf0\xf5\xa4" + "\xfc\x60\x91\xab\x46\x0b\x15\xfc" + "\xd7\xc1\x28\x15\xa1\xfc\xf7\x68" + "\x8e\xcc\x27\x62\x00\x64\x56\x72" + "\xa6\x17\xd7\x3f\x67\x80\x10\x58", + .klen = 32, + .len = 48, + }, + { + .key = "\xa5\x28\x24\x34\x1a\x3c\xd8\xf7" + "\x05\x91\x8f\xee\x85\x1f\x35\x7f" + "\x80\x3d\xfc\x9b\x94\xf6\xfc\x9e" + "\x19\x09\x00\xa9\x04\x31\x4f\x11", + .iv = "\xa1\xba\x49\x95\xff\x34\x6d\xb8" + "\xcd\x87\x5d\x5e\xfd\xea\x85\xdb" + "\x8a\x7b\x5e\xb2\x5d\x57\xdd\x62" + "\xac\xa9\x8c\x41\x42\x94\x75\xb7", + .ptext = "\x69\xb4\xe8\x8c\x37\xe8\x67\x82" + "\xf1\xec\x5d\x04\xe5\x14\x91\x13" + "\xdf\xf2\x87\x1b\x69\x81\x1d\x71" + "\x70\x9e\x9c\x3b\xde\x49\x70\x11" + "\xa0\xa3\xdb\x0d\x54\x4f\x66\x69" + "\xd7\xdb\x80\xa7\x70\x92\x68\xce" + "\x81\x04\x2c\xc6\xab\xae\xe5\x60" + "\x15\xe9\x6f\xef\xaa\x8f\xa7\xa7" + "\x63\x8f\xf2\xf0\x77\xf1\xa8\xea" + "\xe1\xb7\x1f\x9e\xab\x9e\x4b\x3f" + "\x07\x87\x5b\x6f\xcd\xa8\xaf\xb9" + "\xfa\x70\x0b\x52\xb8\xa8\xa7\x9e" + "\x07\x5f\xa6\x0e\xb3\x9b\x79\x13" + "\x79\xc3\x3e\x8d\x1c\x2c\x68\xc8" + "\x51\x1d\x3c\x7b\x7d\x79\x77\x2a" + "\x56\x65\xc5\x54\x23\x28\xb0\x03", + .ctext = "\xeb\xf9\x98\x86\x3c\x40\x9f\x16" + "\x84\x01\xf9\x06\x0f\xeb\x3c\xa9" + "\x4c\xa4\x8e\x5d\xc3\x8d\xe5\xd3" + "\xae\xa6\xe6\xcc\xd6\x2d\x37\x4f" + "\x99\xc8\xa3\x21\x46\xb8\x69\xf2" + "\xe3\x14\x89\xd7\xb9\xf5\x9e\x4e" + "\x07\x93\x6f\x78\x8e\x6b\xea\x8f" + "\xfb\x43\xb8\x3e\x9b\x4c\x1d\x7e" + "\x20\x9a\xc5\x87\xee\xaf\xf6\xf9" + "\x46\xc5\x18\x8a\xe8\x69\xe7\x96" + "\x52\x55\x5f\x00\x1e\x1a\xdc\xcc" + "\x13\xa5\xee\xff\x4b\x27\xca\xdc" + "\x10\xa6\x48\x76\x98\x43\x94\xa3" + "\xc7\xe2\xc9\x65\x9b\x08\x14\x26" + "\x1d\x68\xfb\x15\x0a\x33\x49\x84" + "\x84\x33\x5a\x1b\x24\x46\x31\x92", + .klen = 32, + .len = 128, + }, + { + .key = "\x36\x45\x11\xa2\x98\x5f\x96\x7c" + "\xc6\xb4\x94\x31\x0a\x67\x09\x32" + "\x6c\x6f\x6f\x00\xf0\x17\xcb\xac" + "\xa5\xa9\x47\x9e\x2e\x85\x2f\xfa", + .iv = "\x28\x88\xaa\x9b\x59\x3b\x1e\x97" + "\x82\xe5\x5c\x9e\x6d\x14\x11\x19" + "\x6e\x38\x8f\xd5\x40\x2b\xca\xf9" + "\x7b\x4c\xe4\xa3\xd0\xd2\x8a\x13", + .ptext = "\x95\xd2\xf7\x71\x1b\xca\xa5\x86" + "\xd9\x48\x01\x93\x2f\x79\x55\x29" + "\x71\x13\x15\x0e\xe6\x12\xbc\x4d" + "\x8a\x31\xe3\x40\x2a\xc6\x5e\x0d" + "\x68\xbb\x4a\x62\x8d\xc7\x45\x77" + "\xd2\xb8\xc7\x1d\xf1\xd2\x5d\x97" + "\xcf\xac\x52\xe5\x32\x77\xb6\xda" + "\x30\x85\xcf\x2b\x98\xe9\xaa\x34" + "\x62\xb5\x23\x9e\xb7\xa6\xd4\xe0" + "\xb4\x58\x18\x8c\x4d\xde\x4d\x01" + "\x83\x89\x24\xca\xfb\x11\xd4\x82" + "\x30\x7a\x81\x35\xa0\xb4\xd4\xb6" + "\x84\xea\x47\x91\x8c\x19\x86\x25" + "\xa6\x06\x8d\x78\xe6\xed\x87\xeb" + "\xda\xea\x73\x7c\xbf\x66\xb8\x72" + "\xe3\x0a\xb8\x0c\xcb\x1a\x73\xf1" + "\xa7\xca\x0a\xde\x57\x2b\xbd\x2b" + "\xeb\x8b\x24\x38\x22\xd3\x0e\x1f" + "\x17\xa0\x84\x98\x31\x77\xfd\x34" + "\x6a\x4e\x3d\x84\x4c\x0e\xfb\xed" + "\xc8\x2a\x51\xfa\xd8\x73\x21\x8a" + "\xdb\xb5\xfe\x1f\xee\xc4\xe8\x65" + "\x54\x84\xdd\x96\x6d\xfd\xd3\x31" + "\x77\x36\x52\x6b\x80\x4f\x9e\xb4" + "\xa2\x55\xbf\x66\x41\x49\x4e\x87" + "\xa7\x0c\xca\xe7\xa5\xc5\xf6\x6f" + "\x27\x56\xe2\x48\x22\xdd\x5f\x59" + "\x3c\xf1\x9f\x83\xe5\x2d\xfb\x71" + "\xad\xd1\xae\x1b\x20\x5c\x47\xb7" + "\x3b\xd3\x14\xce\x81\x42\xb1\x0a" + "\xf0\x49\xfa\xc2\xe7\x86\xbf\xcd" + "\xb0\x95\x9f\x8f\x79\x41\x54", + .ctext = "\xf6\x57\x51\xc4\x25\x61\x2d\xfa" + "\xd6\xd9\x3f\x9a\x81\x51\xdd\x8e" + "\x3d\xe7\xaa\x2d\xb1\xda\xc8\xa6" + "\x9d\xaa\x3c\xab\x62\xf2\x80\xc3" + "\x2c\xe7\x58\x72\x1d\x44\xc5\x28" + "\x7f\xb4\xf9\xbc\x9c\xb2\xab\x8e" + "\xfa\xd1\x4d\x72\xd9\x79\xf5\xa0" + "\x24\x3e\x90\x25\x31\x14\x38\x45" + "\x59\xc8\xf6\xe2\xc6\xf6\xc1\xa7" + "\xb2\xf8\xa7\xa9\x2b\x6f\x12\x3a" + "\xb0\x81\xa4\x08\x57\x59\xb1\x56" + "\x4c\x8f\x18\x55\x33\x5f\xd6\x6a" + "\xc6\xa0\x4b\xd6\x6b\x64\x3e\x9e" + "\xfd\x66\x16\xe2\xdb\xeb\x5f\xb3" + "\x50\x50\x3e\xde\x8d\x72\x76\x01" + "\xbe\xcc\xc9\x52\x09\x2d\x8d\xe7" + "\xd6\xc3\x66\xdb\x36\x08\xd1\x77" + "\xc8\x73\x46\x26\x24\x29\xbf\x68" + "\x2d\x2a\x99\x43\x56\x55\xe4\x93" + "\xaf\xae\x4d\xe7\x55\x4a\xc0\x45" + "\x26\xeb\x3b\x12\x90\x7c\xdc\xd1" + "\xd5\x6f\x0a\xd0\xa9\xd7\x4b\x89" + "\x0b\x07\xd8\x86\xad\xa1\xc4\x69" + "\x1f\x5e\x8b\xc4\x9e\x91\x41\x25" + "\x56\x98\x69\x78\x3a\x9e\xae\x91" + "\xd8\xd9\xfa\xfb\xff\x81\x25\x09" + "\xfc\xed\x2d\x87\xbc\x04\x62\x97" + "\x35\xe1\x26\xc2\x46\x1c\xcf\xd7" + "\x14\xed\x02\x09\xa5\xb2\xb6\xaa" + "\x27\x4e\x61\xb3\x71\x6b\x47\x16" + "\xb7\xe8\xd4\xaf\x52\xeb\x6a\x6b" + "\xdb\x4c\x65\x21\x9e\x1c\x36", + .klen = 32, + .len = 255, + }, + { + .key = "\xd3\x81\x72\x18\x23\xff\x6f\x4a" + "\x25\x74\x29\x0d\x51\x8a\x0e\x13" + "\xc1\x53\x5d\x30\x8d\xee\x75\x0d" + "\x14\xd6\x69\xc9\x15\xa9\x0c\x60", + .iv = "\x65\x9b\xd4\xa8\x7d\x29\x1d\xf4" + "\xc4\xd6\x9b\x6a\x28\xab\x64\xe2" + "\x62\x81\x97\xc5\x81\xaa\xf9\x44" + "\xc1\x72\x59\x82\xaf\x16\xc8\x2c", + .ptext = "\xc7\x6b\x52\x6a\x10\xf0\xcc\x09" + "\xc1\x12\x1d\x6d\x21\xa6\x78\xf5" + "\x05\xa3\x69\x60\x91\x36\x98\x57" + "\xba\x0c\x14\xcc\xf3\x2d\x73\x03" + "\xc6\xb2\x5f\xc8\x16\x27\x37\x5d" + "\xd0\x0b\x87\xb2\x50\x94\x7b\x58" + "\x04\xf4\xe0\x7f\x6e\x57\x8e\xc9" + "\x41\x84\xc1\xb1\x7e\x4b\x91\x12" + "\x3a\x8b\x5d\x50\x82\x7b\xcb\xd9" + "\x9a\xd9\x4e\x18\x06\x23\x9e\xd4" + "\xa5\x20\x98\xef\xb5\xda\xe5\xc0" + "\x8a\x6a\x83\x77\x15\x84\x1e\xae" + "\x78\x94\x9d\xdf\xb7\xd1\xea\x67" + "\xaa\xb0\x14\x15\xfa\x67\x21\x84" + "\xd3\x41\x2a\xce\xba\x4b\x4a\xe8" + "\x95\x62\xa9\x55\xf0\x80\xad\xbd" + "\xab\xaf\xdd\x4f\xa5\x7c\x13\x36" + "\xed\x5e\x4f\x72\xad\x4b\xf1\xd0" + "\x88\x4e\xec\x2c\x88\x10\x5e\xea" + "\x12\xc0\x16\x01\x29\xa3\xa0\x55" + "\xaa\x68\xf3\xe9\x9d\x3b\x0d\x3b" + "\x6d\xec\xf8\xa0\x2d\xf0\x90\x8d" + "\x1c\xe2\x88\xd4\x24\x71\xf9\xb3" + "\xc1\x9f\xc5\xd6\x76\x70\xc5\x2e" + "\x9c\xac\xdb\x90\xbd\x83\x72\xba" + "\x6e\xb5\xa5\x53\x83\xa9\xa5\xbf" + "\x7d\x06\x0e\x3c\x2a\xd2\x04\xb5" + "\x1e\x19\x38\x09\x16\xd2\x82\x1f" + "\x75\x18\x56\xb8\x96\x0b\xa6\xf9" + "\xcf\x62\xd9\x32\x5d\xa9\xd7\x1d" + "\xec\xe4\xdf\x1b\xbe\xf1\x36\xee" + "\xe3\x7b\xb5\x2f\xee\xf8\x53\x3d" + "\x6a\xb7\x70\xa9\xfc\x9c\x57\x25" + "\xf2\x89\x10\xd3\xb8\xa8\x8c\x30" + "\xae\x23\x4f\x0e\x13\x66\x4f\xe1" + "\xb6\xc0\xe4\xf8\xef\x93\xbd\x6e" + "\x15\x85\x6b\xe3\x60\x81\x1d\x68" + "\xd7\x31\x87\x89\x09\xab\xd5\x96" + "\x1d\xf3\x6d\x67\x80\xca\x07\x31" + "\x5d\xa7\xe4\xfb\x3e\xf2\x9b\x33" + "\x52\x18\xc8\x30\xfe\x2d\xca\x1e" + "\x79\x92\x7a\x60\x5c\xb6\x58\x87" + "\xa4\x36\xa2\x67\x92\x8b\xa4\xb7" + "\xf1\x86\xdf\xdc\xc0\x7e\x8f\x63" + "\xd2\xa2\xdc\x78\xeb\x4f\xd8\x96" + "\x47\xca\xb8\x91\xf9\xf7\x94\x21" + "\x5f\x9a\x9f\x5b\xb8\x40\x41\x4b" + "\x66\x69\x6a\x72\xd0\xcb\x70\xb7" + "\x93\xb5\x37\x96\x05\x37\x4f\xe5" + "\x8c\xa7\x5a\x4e\x8b\xb7\x84\xea" + "\xc7\xfc\x19\x6e\x1f\x5a\xa1\xac" + "\x18\x7d\x52\x3b\xb3\x34\x62\x99" + "\xe4\x9e\x31\x04\x3f\xc0\x8d\x84" + "\x17\x7c\x25\x48\x52\x67\x11\x27" + "\x67\xbb\x5a\x85\xca\x56\xb2\x5c" + "\xe6\xec\xd5\x96\x3d\x15\xfc\xfb" + "\x22\x25\xf4\x13\xe5\x93\x4b\x9a" + "\x77\xf1\x52\x18\xfa\x16\x5e\x49" + "\x03\x45\xa8\x08\xfa\xb3\x41\x92" + "\x79\x50\x33\xca\xd0\xd7\x42\x55" + "\xc3\x9a\x0c\x4e\xd9\xa4\x3c\x86" + "\x80\x9f\x53\xd1\xa4\x2e\xd1\xbc" + "\xf1\x54\x6e\x93\xa4\x65\x99\x8e" + "\xdf\x29\xc0\x64\x63\x07\xbb\xea", + .ctext = "\x9f\x72\x87\xc7\x17\xfb\x20\x15" + "\x65\xb3\x55\xa8\x1c\x8e\x52\x32" + "\xb1\x82\x8d\xbf\xb5\x9f\x10\x0a" + "\xe8\x0c\x70\x62\xef\x89\xb6\x1f" + "\x73\xcc\xe4\xcc\x7a\x3a\x75\x4a" + "\x26\xe7\xf5\xd7\x7b\x17\x39\x2d" + "\xd2\x27\x6e\xf9\x2f\x9e\xe2\xf6" + "\xfa\x16\xc2\xf2\x49\x26\xa7\x5b" + "\xe7\xca\x25\x0e\x45\xa0\x34\xc2" + "\x9a\x37\x79\x7e\x7c\x58\x18\x94" + "\x10\xa8\x7c\x48\xa9\xd7\x63\x89" + "\x9e\x61\x4d\x26\x34\xd9\xf0\xb1" + "\x2d\x17\x2c\x6f\x7c\x35\x0e\xbe" + "\x77\x71\x7c\x17\x5b\xab\x70\xdb" + "\x2f\x54\x0f\xa9\xc8\xf4\xf5\xab" + "\x52\x04\x3a\xb8\x03\xa7\xfd\x57" + "\x45\x5e\xbc\x77\xe1\xee\x79\x8c" + "\x58\x7b\x1f\xf7\x75\xde\x68\x17" + "\x98\x85\x8a\x18\x5c\xd2\x39\x78" + "\x7a\x6f\x26\x6e\xe1\x13\x91\xdd" + "\xdf\x0e\x6e\x67\xcc\x51\x53\xd8" + "\x17\x5e\xce\xa7\xe4\xaf\xfa\xf3" + "\x4f\x9f\x01\x9b\x04\xe7\xfc\xf9" + "\x6a\xdc\x1d\x0c\x9a\xaa\x3a\x7a" + "\x73\x03\xdf\xbf\x3b\x82\xbe\xb0" + "\xb4\xa4\xcf\x07\xd7\xde\x71\x25" + "\xc5\x10\xee\x0a\x15\x96\x8b\x4f" + "\xfe\xb8\x28\xbd\x4a\xcd\xeb\x9f" + "\x5d\x00\xc1\xee\xe8\x16\x44\xec" + "\xe9\x7b\xd6\x85\x17\x29\xcf\x58" + "\x20\xab\xf7\xce\x6b\xe7\x71\x7d" + "\x4f\xa8\xb0\xe9\x7d\x70\xd6\x0b" + "\x2e\x20\xb1\x1a\x63\x37\xaa\x2c" + "\x94\xee\xd5\xf6\x58\x2a\xf4\x7a" + "\x4c\xba\xf5\xe9\x3c\x6f\x95\x13" + "\x5f\x96\x81\x5b\xb5\x62\xf2\xd7" + "\x8d\xbe\xa1\x31\x51\xe6\xfe\xc9" + "\x07\x7d\x0f\x00\x3a\x66\x8c\x4b" + "\x94\xaa\xe5\x56\xde\xcd\x74\xa7" + "\x48\x67\x6f\xed\xc9\x6a\xef\xaf" + "\x9a\xb7\xae\x60\xfa\xc0\x37\x39" + "\xa5\x25\xe5\x22\xea\x82\x55\x68" + "\x3e\x30\xc3\x5a\xb6\x29\x73\x7a" + "\xb6\xfb\x34\xee\x51\x7c\x54\xe5" + "\x01\x4d\x72\x25\x32\x4a\xa3\x68" + "\x80\x9a\x89\xc5\x11\x66\x4c\x8c" + "\x44\x50\xbe\xd7\xa0\xee\xa6\xbb" + "\x92\x0c\xe6\xd7\x83\x51\xb1\x69" + "\x63\x40\xf3\xf4\x92\x84\xc4\x38" + "\x29\xfb\xb4\x84\xa0\x19\x75\x16" + "\x60\xbf\x0a\x9c\x89\xee\xad\xb4" + "\x43\xf9\x71\x39\x45\x7c\x24\x83" + "\x30\xbb\xee\x28\xb0\x86\x7b\xec" + "\x93\xc1\xbf\xb9\x97\x1b\x96\xef" + "\xee\x58\x35\x61\x12\x19\xda\x25" + "\x77\xe5\x80\x1a\x31\x27\x9b\xe4" + "\xda\x8b\x7e\x51\x4d\xcb\x01\x19" + "\x4f\xdc\x92\x1a\x17\xd5\x6b\xf4" + "\x50\xe3\x06\xe4\x76\x9f\x65\x00" + "\xbd\x7a\xe2\x64\x26\xf2\xe4\x7e" + "\x40\xf2\x80\xab\x62\xd5\xef\x23" + "\x8b\xfb\x6f\x24\x6e\x9b\x66\x0e" + "\xf4\x1c\x24\x1e\x1d\x26\x95\x09" + "\x94\x3c\xb2\xb6\x02\xa7\xd9\x9a", + .klen = 32, + .len = 512, + }, + +}; + #endif /* _CRYPTO_TESTMGR_H */ From fd94fcf09957a75e25941f7dbfc84d30a63817ac Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 20 May 2022 18:14:56 +0000 Subject: [PATCH 08/89] crypto: x86/aesni-xctr - Add accelerated implementation of XCTR Add hardware accelerated version of XCTR for x86-64 CPUs with AESNI support. More information on XCTR can be found in the HCTR2 paper: "Length-preserving encryption with HCTR2": https://eprint.iacr.org/2021/1441.pdf Signed-off-by: Nathan Huckleberry Reviewed-by: Ard Biesheuvel Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/aes_ctrby8_avx-x86_64.S | 232 ++++++++++++++++-------- arch/x86/crypto/aesni-intel_glue.c | 114 +++++++++++- crypto/Kconfig | 2 +- 3 files changed, 266 insertions(+), 82 deletions(-) diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 43852ba6e19c7..2402b9418cd7a 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -23,6 +23,11 @@ #define VMOVDQ vmovdqu +/* + * Note: the "x" prefix in these aliases means "this is an xmm register". The + * alias prefixes have no relation to XCTR where the "X" prefix means "XOR + * counter". + */ #define xdata0 %xmm0 #define xdata1 %xmm1 #define xdata2 %xmm2 @@ -31,8 +36,10 @@ #define xdata5 %xmm5 #define xdata6 %xmm6 #define xdata7 %xmm7 -#define xcounter %xmm8 -#define xbyteswap %xmm9 +#define xcounter %xmm8 // CTR mode only +#define xiv %xmm8 // XCTR mode only +#define xbyteswap %xmm9 // CTR mode only +#define xtmp %xmm9 // XCTR mode only #define xkey0 %xmm10 #define xkey4 %xmm11 #define xkey8 %xmm12 @@ -45,7 +52,7 @@ #define p_keys %rdx #define p_out %rcx #define num_bytes %r8 - +#define counter %r9 // XCTR mode only #define tmp %r10 #define DDQ_DATA 0 #define XDATA 1 @@ -102,7 +109,7 @@ ddq_add_8: * do_aes num_in_par load_keys key_len * This increments p_in, but not p_out */ -.macro do_aes b, k, key_len +.macro do_aes b, k, key_len, xctr .set by, \b .set load_keys, \k .set klen, \key_len @@ -111,29 +118,48 @@ ddq_add_8: vmovdqa 0*16(p_keys), xkey0 .endif - vpshufb xbyteswap, xcounter, xdata0 - - .set i, 1 - .rept (by - 1) - club XDATA, i - vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata - vptest ddq_low_msk(%rip), var_xdata - jnz 1f - vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata - vpaddq ddq_high_add_1(%rip), xcounter, xcounter - 1: - vpshufb xbyteswap, var_xdata, var_xdata - .set i, (i +1) - .endr + .if \xctr + movq counter, xtmp + .set i, 0 + .rept (by) + club XDATA, i + vpaddq (ddq_add_1 + 16 * i)(%rip), xtmp, var_xdata + .set i, (i +1) + .endr + .set i, 0 + .rept (by) + club XDATA, i + vpxor xiv, var_xdata, var_xdata + .set i, (i +1) + .endr + .else + vpshufb xbyteswap, xcounter, xdata0 + .set i, 1 + .rept (by - 1) + club XDATA, i + vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata + vptest ddq_low_msk(%rip), var_xdata + jnz 1f + vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata + vpaddq ddq_high_add_1(%rip), xcounter, xcounter + 1: + vpshufb xbyteswap, var_xdata, var_xdata + .set i, (i +1) + .endr + .endif vmovdqa 1*16(p_keys), xkeyA vpxor xkey0, xdata0, xdata0 - vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter - vptest ddq_low_msk(%rip), xcounter - jnz 1f - vpaddq ddq_high_add_1(%rip), xcounter, xcounter - 1: + .if \xctr + add $by, counter + .else + vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter + vptest ddq_low_msk(%rip), xcounter + jnz 1f + vpaddq ddq_high_add_1(%rip), xcounter, xcounter + 1: + .endif .set i, 1 .rept (by - 1) @@ -371,94 +397,99 @@ ddq_add_8: .endr .endm -.macro do_aes_load val, key_len - do_aes \val, 1, \key_len +.macro do_aes_load val, key_len, xctr + do_aes \val, 1, \key_len, \xctr .endm -.macro do_aes_noload val, key_len - do_aes \val, 0, \key_len +.macro do_aes_noload val, key_len, xctr + do_aes \val, 0, \key_len, \xctr .endm /* main body of aes ctr load */ -.macro do_aes_ctrmain key_len +.macro do_aes_ctrmain key_len, xctr cmp $16, num_bytes - jb .Ldo_return2\key_len + jb .Ldo_return2\xctr\key_len - vmovdqa byteswap_const(%rip), xbyteswap - vmovdqu (p_iv), xcounter - vpshufb xbyteswap, xcounter, xcounter + .if \xctr + shr $4, counter + vmovdqu (p_iv), xiv + .else + vmovdqa byteswap_const(%rip), xbyteswap + vmovdqu (p_iv), xcounter + vpshufb xbyteswap, xcounter, xcounter + .endif mov num_bytes, tmp and $(7*16), tmp - jz .Lmult_of_8_blks\key_len + jz .Lmult_of_8_blks\xctr\key_len /* 1 <= tmp <= 7 */ cmp $(4*16), tmp - jg .Lgt4\key_len - je .Leq4\key_len + jg .Lgt4\xctr\key_len + je .Leq4\xctr\key_len -.Llt4\key_len: +.Llt4\xctr\key_len: cmp $(2*16), tmp - jg .Leq3\key_len - je .Leq2\key_len + jg .Leq3\xctr\key_len + je .Leq2\xctr\key_len -.Leq1\key_len: - do_aes_load 1, \key_len +.Leq1\xctr\key_len: + do_aes_load 1, \key_len, \xctr add $(1*16), p_out and $(~7*16), num_bytes - jz .Ldo_return2\key_len - jmp .Lmain_loop2\key_len + jz .Ldo_return2\xctr\key_len + jmp .Lmain_loop2\xctr\key_len -.Leq2\key_len: - do_aes_load 2, \key_len +.Leq2\xctr\key_len: + do_aes_load 2, \key_len, \xctr add $(2*16), p_out and $(~7*16), num_bytes - jz .Ldo_return2\key_len - jmp .Lmain_loop2\key_len + jz .Ldo_return2\xctr\key_len + jmp .Lmain_loop2\xctr\key_len -.Leq3\key_len: - do_aes_load 3, \key_len +.Leq3\xctr\key_len: + do_aes_load 3, \key_len, \xctr add $(3*16), p_out and $(~7*16), num_bytes - jz .Ldo_return2\key_len - jmp .Lmain_loop2\key_len + jz .Ldo_return2\xctr\key_len + jmp .Lmain_loop2\xctr\key_len -.Leq4\key_len: - do_aes_load 4, \key_len +.Leq4\xctr\key_len: + do_aes_load 4, \key_len, \xctr add $(4*16), p_out and $(~7*16), num_bytes - jz .Ldo_return2\key_len - jmp .Lmain_loop2\key_len + jz .Ldo_return2\xctr\key_len + jmp .Lmain_loop2\xctr\key_len -.Lgt4\key_len: +.Lgt4\xctr\key_len: cmp $(6*16), tmp - jg .Leq7\key_len - je .Leq6\key_len + jg .Leq7\xctr\key_len + je .Leq6\xctr\key_len -.Leq5\key_len: - do_aes_load 5, \key_len +.Leq5\xctr\key_len: + do_aes_load 5, \key_len, \xctr add $(5*16), p_out and $(~7*16), num_bytes - jz .Ldo_return2\key_len - jmp .Lmain_loop2\key_len + jz .Ldo_return2\xctr\key_len + jmp .Lmain_loop2\xctr\key_len -.Leq6\key_len: - do_aes_load 6, \key_len +.Leq6\xctr\key_len: + do_aes_load 6, \key_len, \xctr add $(6*16), p_out and $(~7*16), num_bytes - jz .Ldo_return2\key_len - jmp .Lmain_loop2\key_len + jz .Ldo_return2\xctr\key_len + jmp .Lmain_loop2\xctr\key_len -.Leq7\key_len: - do_aes_load 7, \key_len +.Leq7\xctr\key_len: + do_aes_load 7, \key_len, \xctr add $(7*16), p_out and $(~7*16), num_bytes - jz .Ldo_return2\key_len - jmp .Lmain_loop2\key_len + jz .Ldo_return2\xctr\key_len + jmp .Lmain_loop2\xctr\key_len -.Lmult_of_8_blks\key_len: +.Lmult_of_8_blks\xctr\key_len: .if (\key_len != KEY_128) vmovdqa 0*16(p_keys), xkey0 vmovdqa 4*16(p_keys), xkey4 @@ -471,17 +502,19 @@ ddq_add_8: vmovdqa 9*16(p_keys), xkey12 .endif .align 16 -.Lmain_loop2\key_len: +.Lmain_loop2\xctr\key_len: /* num_bytes is a multiple of 8 and >0 */ - do_aes_noload 8, \key_len + do_aes_noload 8, \key_len, \xctr add $(8*16), p_out sub $(8*16), num_bytes - jne .Lmain_loop2\key_len + jne .Lmain_loop2\xctr\key_len -.Ldo_return2\key_len: - /* return updated IV */ - vpshufb xbyteswap, xcounter, xcounter - vmovdqu xcounter, (p_iv) +.Ldo_return2\xctr\key_len: + .if !\xctr + /* return updated IV */ + vpshufb xbyteswap, xcounter, xcounter + vmovdqu xcounter, (p_iv) + .endif RET .endm @@ -494,7 +527,7 @@ ddq_add_8: */ SYM_FUNC_START(aes_ctr_enc_128_avx_by8) /* call the aes main loop */ - do_aes_ctrmain KEY_128 + do_aes_ctrmain KEY_128 0 SYM_FUNC_END(aes_ctr_enc_128_avx_by8) @@ -507,7 +540,7 @@ SYM_FUNC_END(aes_ctr_enc_128_avx_by8) */ SYM_FUNC_START(aes_ctr_enc_192_avx_by8) /* call the aes main loop */ - do_aes_ctrmain KEY_192 + do_aes_ctrmain KEY_192 0 SYM_FUNC_END(aes_ctr_enc_192_avx_by8) @@ -520,6 +553,45 @@ SYM_FUNC_END(aes_ctr_enc_192_avx_by8) */ SYM_FUNC_START(aes_ctr_enc_256_avx_by8) /* call the aes main loop */ - do_aes_ctrmain KEY_256 + do_aes_ctrmain KEY_256 0 SYM_FUNC_END(aes_ctr_enc_256_avx_by8) + +/* + * routine to do AES128 XCTR enc/decrypt "by8" + * XMM registers are clobbered. + * Saving/restoring must be done at a higher level + * aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv, const void *keys, + * u8* out, unsigned int num_bytes, unsigned int byte_ctr) + */ +SYM_FUNC_START(aes_xctr_enc_128_avx_by8) + /* call the aes main loop */ + do_aes_ctrmain KEY_128 1 + +SYM_FUNC_END(aes_xctr_enc_128_avx_by8) + +/* + * routine to do AES192 XCTR enc/decrypt "by8" + * XMM registers are clobbered. + * Saving/restoring must be done at a higher level + * aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv, const void *keys, + * u8* out, unsigned int num_bytes, unsigned int byte_ctr) + */ +SYM_FUNC_START(aes_xctr_enc_192_avx_by8) + /* call the aes main loop */ + do_aes_ctrmain KEY_192 1 + +SYM_FUNC_END(aes_xctr_enc_192_avx_by8) + +/* + * routine to do AES256 XCTR enc/decrypt "by8" + * XMM registers are clobbered. + * Saving/restoring must be done at a higher level + * aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv, const void *keys, + * u8* out, unsigned int num_bytes, unsigned int byte_ctr) + */ +SYM_FUNC_START(aes_xctr_enc_256_avx_by8) + /* call the aes main loop */ + do_aes_ctrmain KEY_256 1 + +SYM_FUNC_END(aes_xctr_enc_256_avx_by8) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 41901ba9d3a2c..a5b0cb3efeba5 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -135,6 +135,20 @@ asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); + + +asmlinkage void aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv, + const void *keys, u8 *out, unsigned int num_bytes, + unsigned int byte_ctr); + +asmlinkage void aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv, + const void *keys, u8 *out, unsigned int num_bytes, + unsigned int byte_ctr); + +asmlinkage void aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv, + const void *keys, u8 *out, unsigned int num_bytes, + unsigned int byte_ctr); + /* * asmlinkage void aesni_gcm_init_avx_gen2() * gcm_data *my_ctx_data, context data @@ -527,6 +541,59 @@ static int ctr_crypt(struct skcipher_request *req) return err; } +static void aesni_xctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv, + unsigned int byte_ctr) +{ + if (ctx->key_length == AES_KEYSIZE_128) + aes_xctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len, + byte_ctr); + else if (ctx->key_length == AES_KEYSIZE_192) + aes_xctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len, + byte_ctr); + else + aes_xctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len, + byte_ctr); +} + +static int xctr_crypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); + u8 keystream[AES_BLOCK_SIZE]; + struct skcipher_walk walk; + unsigned int nbytes; + unsigned int byte_ctr = 0; + int err; + __le32 block[AES_BLOCK_SIZE / sizeof(__le32)]; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + kernel_fpu_begin(); + if (nbytes & AES_BLOCK_MASK) + aesni_xctr_enc_avx_tfm(ctx, walk.dst.virt.addr, + walk.src.virt.addr, nbytes & AES_BLOCK_MASK, + walk.iv, byte_ctr); + nbytes &= ~AES_BLOCK_MASK; + byte_ctr += walk.nbytes - nbytes; + + if (walk.nbytes == walk.total && nbytes > 0) { + memcpy(block, walk.iv, AES_BLOCK_SIZE); + block[0] ^= cpu_to_le32(1 + byte_ctr / AES_BLOCK_SIZE); + aesni_enc(ctx, keystream, (u8 *)block); + crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - + nbytes, walk.src.virt.addr + walk.nbytes + - nbytes, keystream, nbytes); + byte_ctr += nbytes; + nbytes = 0; + } + kernel_fpu_end(); + err = skcipher_walk_done(&walk, nbytes); + } + return err; +} + static int rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) { @@ -1050,6 +1117,33 @@ static struct skcipher_alg aesni_skciphers[] = { static struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; +#ifdef CONFIG_X86_64 +/* + * XCTR does not have a non-AVX implementation, so it must be enabled + * conditionally. + */ +static struct skcipher_alg aesni_xctr = { + .base = { + .cra_name = "__xctr(aes)", + .cra_driver_name = "__xctr-aes-aesni", + .cra_priority = 400, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, + .cra_ctxsize = CRYPTO_AES_CTX_SIZE, + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, + .setkey = aesni_skcipher_setkey, + .encrypt = xctr_crypt, + .decrypt = xctr_crypt, +}; + +static struct simd_skcipher_alg *aesni_simd_xctr; +#endif /* CONFIG_X86_64 */ + #ifdef CONFIG_X86_64 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key, unsigned int key_len) @@ -1163,7 +1257,7 @@ static int __init aesni_init(void) static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm); pr_info("AES CTR mode by8 optimization enabled\n"); } -#endif +#endif /* CONFIG_X86_64 */ err = crypto_register_alg(&aesni_cipher_alg); if (err) @@ -1180,8 +1274,22 @@ static int __init aesni_init(void) if (err) goto unregister_skciphers; +#ifdef CONFIG_X86_64 + if (boot_cpu_has(X86_FEATURE_AVX)) + err = simd_register_skciphers_compat(&aesni_xctr, 1, + &aesni_simd_xctr); + if (err) + goto unregister_aeads; +#endif /* CONFIG_X86_64 */ + return 0; +#ifdef CONFIG_X86_64 +unregister_aeads: + simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads), + aesni_simd_aeads); +#endif /* CONFIG_X86_64 */ + unregister_skciphers: simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), aesni_simd_skciphers); @@ -1197,6 +1305,10 @@ static void __exit aesni_exit(void) simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), aesni_simd_skciphers); crypto_unregister_alg(&aesni_cipher_alg); +#ifdef CONFIG_X86_64 + if (boot_cpu_has(X86_FEATURE_AVX)) + simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr); +#endif /* CONFIG_X86_64 */ } late_initcall(aesni_init); diff --git a/crypto/Kconfig b/crypto/Kconfig index 0601a2d2feeff..dfcc3235e918c 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1169,7 +1169,7 @@ config CRYPTO_AES_NI_INTEL In addition to AES cipher algorithm support, the acceleration for some popular block cipher mode is supported too, including ECB, CBC, LRW, XTS. The 64 bit version has additional - acceleration for CTR. + acceleration for CTR and XCTR. config CRYPTO_AES_SPARC64 tristate "AES cipher algorithms (SPARC64)" From 23a251cc1696e1bf68df1dbba569d2fe12469d22 Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 20 May 2022 18:14:57 +0000 Subject: [PATCH 09/89] crypto: arm64/aes-xctr - Add accelerated implementation of XCTR Add hardware accelerated version of XCTR for ARM64 CPUs with ARMv8 Crypto Extension support. This XCTR implementation is based on the CTR implementation in aes-modes.S. More information on XCTR can be found in the HCTR2 paper: "Length-preserving encryption with HCTR2": https://eprint.iacr.org/2021/1441.pdf Signed-off-by: Nathan Huckleberry Reviewed-by: Ard Biesheuvel Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/arm64/crypto/Kconfig | 4 +- arch/arm64/crypto/aes-glue.c | 64 ++++++++++++- arch/arm64/crypto/aes-modes.S | 166 +++++++++++++++++++++------------- 3 files changed, 168 insertions(+), 66 deletions(-) diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index ac85682c013c1..74d5bed9394f5 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -96,13 +96,13 @@ config CRYPTO_AES_ARM64_CE_CCM select CRYPTO_LIB_AES config CRYPTO_AES_ARM64_CE_BLK - tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions" + tristate "AES in ECB/CBC/CTR/XTS/XCTR modes using ARMv8 Crypto Extensions" depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_AES_ARM64_CE config CRYPTO_AES_ARM64_NEON_BLK - tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions" + tristate "AES in ECB/CBC/CTR/XTS/XCTR modes using NEON instructions" depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_LIB_AES diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 561dd23325711..b6883288234c7 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -34,10 +34,11 @@ #define aes_essiv_cbc_encrypt ce_aes_essiv_cbc_encrypt #define aes_essiv_cbc_decrypt ce_aes_essiv_cbc_decrypt #define aes_ctr_encrypt ce_aes_ctr_encrypt +#define aes_xctr_encrypt ce_aes_xctr_encrypt #define aes_xts_encrypt ce_aes_xts_encrypt #define aes_xts_decrypt ce_aes_xts_decrypt #define aes_mac_update ce_aes_mac_update -MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); +MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS/XCTR using ARMv8 Crypto Extensions"); #else #define MODE "neon" #define PRIO 200 @@ -50,16 +51,18 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); #define aes_essiv_cbc_encrypt neon_aes_essiv_cbc_encrypt #define aes_essiv_cbc_decrypt neon_aes_essiv_cbc_decrypt #define aes_ctr_encrypt neon_aes_ctr_encrypt +#define aes_xctr_encrypt neon_aes_xctr_encrypt #define aes_xts_encrypt neon_aes_xts_encrypt #define aes_xts_decrypt neon_aes_xts_decrypt #define aes_mac_update neon_aes_mac_update -MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); +MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS/XCTR using ARMv8 NEON"); #endif #if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS) MODULE_ALIAS_CRYPTO("ecb(aes)"); MODULE_ALIAS_CRYPTO("cbc(aes)"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); +MODULE_ALIAS_CRYPTO("xctr(aes)"); #endif MODULE_ALIAS_CRYPTO("cts(cbc(aes))"); MODULE_ALIAS_CRYPTO("essiv(cbc(aes),sha256)"); @@ -89,6 +92,9 @@ asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[], asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int bytes, u8 ctr[]); +asmlinkage void aes_xctr_encrypt(u8 out[], u8 const in[], u32 const rk[], + int rounds, int bytes, u8 ctr[], int byte_ctr); + asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds, int bytes, u32 const rk2[], u8 iv[], int first); @@ -442,6 +448,44 @@ static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req) return err ?: cbc_decrypt_walk(req, &walk); } +static int __maybe_unused xctr_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + int err, rounds = 6 + ctx->key_length / 4; + struct skcipher_walk walk; + unsigned int byte_ctr = 0; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes > 0) { + const u8 *src = walk.src.virt.addr; + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + u8 buf[AES_BLOCK_SIZE]; + + if (unlikely(nbytes < AES_BLOCK_SIZE)) + src = dst = memcpy(buf + sizeof(buf) - nbytes, + src, nbytes); + else if (nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); + + kernel_neon_begin(); + aes_xctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes, + walk.iv, byte_ctr); + kernel_neon_end(); + + if (unlikely(nbytes < AES_BLOCK_SIZE)) + memcpy(walk.dst.virt.addr, + buf + sizeof(buf) - nbytes, nbytes); + byte_ctr += nbytes; + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } + + return err; +} + static int __maybe_unused ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -669,6 +713,22 @@ static struct skcipher_alg aes_algs[] = { { .setkey = skcipher_aes_setkey, .encrypt = ctr_encrypt, .decrypt = ctr_encrypt, +}, { + .base = { + .cra_name = "xctr(aes)", + .cra_driver_name = "xctr-aes-" MODE, + .cra_priority = PRIO, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct crypto_aes_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, + .setkey = skcipher_aes_setkey, + .encrypt = xctr_encrypt, + .decrypt = xctr_encrypt, }, { .base = { .cra_name = "xts(aes)", diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index dc35eb0245c55..6c36a3b0ed7d4 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -318,79 +318,102 @@ AES_FUNC_END(aes_cbc_cts_decrypt) .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .previous - /* - * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, - * int bytes, u8 ctr[]) + * This macro generates the code for CTR and XCTR mode. */ - -AES_FUNC_START(aes_ctr_encrypt) +.macro ctr_encrypt xctr stp x29, x30, [sp, #-16]! mov x29, sp enc_prepare w3, x2, x12 ld1 {vctr.16b}, [x5] - umov x12, vctr.d[1] /* keep swabbed ctr in reg */ - rev x12, x12 + .if \xctr + umov x12, vctr.d[0] + lsr w11, w6, #4 + .else + umov x12, vctr.d[1] /* keep swabbed ctr in reg */ + rev x12, x12 + .endif -.LctrloopNx: +.LctrloopNx\xctr: add w7, w4, #15 sub w4, w4, #MAX_STRIDE << 4 lsr w7, w7, #4 mov w8, #MAX_STRIDE cmp w7, w8 csel w7, w7, w8, lt - adds x12, x12, x7 + .if \xctr + add x11, x11, x7 + .else + adds x12, x12, x7 + .endif mov v0.16b, vctr.16b mov v1.16b, vctr.16b mov v2.16b, vctr.16b mov v3.16b, vctr.16b ST5( mov v4.16b, vctr.16b ) - bcs 0f - - .subsection 1 - /* apply carry to outgoing counter */ -0: umov x8, vctr.d[0] - rev x8, x8 - add x8, x8, #1 - rev x8, x8 - ins vctr.d[0], x8 - - /* apply carry to N counter blocks for N := x12 */ - cbz x12, 2f - adr x16, 1f - sub x16, x16, x12, lsl #3 - br x16 - bti c - mov v0.d[0], vctr.d[0] - bti c - mov v1.d[0], vctr.d[0] - bti c - mov v2.d[0], vctr.d[0] - bti c - mov v3.d[0], vctr.d[0] -ST5( bti c ) -ST5( mov v4.d[0], vctr.d[0] ) -1: b 2f - .previous - -2: rev x7, x12 - ins vctr.d[1], x7 - sub x7, x12, #MAX_STRIDE - 1 - sub x8, x12, #MAX_STRIDE - 2 - sub x9, x12, #MAX_STRIDE - 3 - rev x7, x7 - rev x8, x8 - mov v1.d[1], x7 - rev x9, x9 -ST5( sub x10, x12, #MAX_STRIDE - 4 ) - mov v2.d[1], x8 -ST5( rev x10, x10 ) - mov v3.d[1], x9 -ST5( mov v4.d[1], x10 ) - tbnz w4, #31, .Lctrtail + .if \xctr + sub x6, x11, #MAX_STRIDE - 1 + sub x7, x11, #MAX_STRIDE - 2 + sub x8, x11, #MAX_STRIDE - 3 + sub x9, x11, #MAX_STRIDE - 4 +ST5( sub x10, x11, #MAX_STRIDE - 5 ) + eor x6, x6, x12 + eor x7, x7, x12 + eor x8, x8, x12 + eor x9, x9, x12 +ST5( eor x10, x10, x12 ) + mov v0.d[0], x6 + mov v1.d[0], x7 + mov v2.d[0], x8 + mov v3.d[0], x9 +ST5( mov v4.d[0], x10 ) + .else + bcs 0f + .subsection 1 + /* apply carry to outgoing counter */ +0: umov x8, vctr.d[0] + rev x8, x8 + add x8, x8, #1 + rev x8, x8 + ins vctr.d[0], x8 + + /* apply carry to N counter blocks for N := x12 */ + cbz x12, 2f + adr x16, 1f + sub x16, x16, x12, lsl #3 + br x16 + bti c + mov v0.d[0], vctr.d[0] + bti c + mov v1.d[0], vctr.d[0] + bti c + mov v2.d[0], vctr.d[0] + bti c + mov v3.d[0], vctr.d[0] +ST5( bti c ) +ST5( mov v4.d[0], vctr.d[0] ) +1: b 2f + .previous + +2: rev x7, x12 + ins vctr.d[1], x7 + sub x7, x12, #MAX_STRIDE - 1 + sub x8, x12, #MAX_STRIDE - 2 + sub x9, x12, #MAX_STRIDE - 3 + rev x7, x7 + rev x8, x8 + mov v1.d[1], x7 + rev x9, x9 +ST5( sub x10, x12, #MAX_STRIDE - 4 ) + mov v2.d[1], x8 +ST5( rev x10, x10 ) + mov v3.d[1], x9 +ST5( mov v4.d[1], x10 ) + .endif + tbnz w4, #31, .Lctrtail\xctr ld1 {v5.16b-v7.16b}, [x1], #48 ST4( bl aes_encrypt_block4x ) ST5( bl aes_encrypt_block5x ) @@ -403,16 +426,17 @@ ST5( ld1 {v5.16b-v6.16b}, [x1], #32 ) ST5( eor v4.16b, v6.16b, v4.16b ) st1 {v0.16b-v3.16b}, [x0], #64 ST5( st1 {v4.16b}, [x0], #16 ) - cbz w4, .Lctrout - b .LctrloopNx + cbz w4, .Lctrout\xctr + b .LctrloopNx\xctr -.Lctrout: - st1 {vctr.16b}, [x5] /* return next CTR value */ +.Lctrout\xctr: + .if !\xctr + st1 {vctr.16b}, [x5] /* return next CTR value */ + .endif ldp x29, x30, [sp], #16 ret -.Lctrtail: - /* XOR up to MAX_STRIDE * 16 - 1 bytes of in/output with v0 ... v3/v4 */ +.Lctrtail\xctr: mov x16, #16 ands x6, x4, #0xf csel x13, x6, x16, ne @@ -427,7 +451,7 @@ ST5( csel x14, x16, xzr, gt ) adr_l x12, .Lcts_permute_table add x12, x12, x13 - ble .Lctrtail1x + ble .Lctrtail1x\xctr ST5( ld1 {v5.16b}, [x1], x14 ) ld1 {v6.16b}, [x1], x15 @@ -459,9 +483,9 @@ ST5( st1 {v5.16b}, [x0], x14 ) add x13, x13, x0 st1 {v9.16b}, [x13] // overlapping stores st1 {v8.16b}, [x0] - b .Lctrout + b .Lctrout\xctr -.Lctrtail1x: +.Lctrtail1x\xctr: sub x7, x6, #16 csel x6, x6, x7, eq add x1, x1, x6 @@ -476,9 +500,27 @@ ST5( mov v3.16b, v4.16b ) eor v5.16b, v5.16b, v3.16b bif v5.16b, v6.16b, v11.16b st1 {v5.16b}, [x0] - b .Lctrout + b .Lctrout\xctr +.endm + + /* + * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, + * int bytes, u8 ctr[]) + */ + +AES_FUNC_START(aes_ctr_encrypt) + ctr_encrypt 0 AES_FUNC_END(aes_ctr_encrypt) + /* + * aes_xctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, + * int bytes, u8 const iv[], int byte_ctr) + */ + +AES_FUNC_START(aes_xctr_encrypt) + ctr_encrypt 1 +AES_FUNC_END(aes_xctr_encrypt) + /* * aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, From c0eb7591c1ed9cbdb0ad796bb56aed13748b55fa Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 20 May 2022 18:14:58 +0000 Subject: [PATCH 10/89] crypto: arm64/aes-xctr - Improve readability of XCTR and CTR modes Added some clarifying comments, changed the register allocations to make the code clearer, and added register aliases. Signed-off-by: Nathan Huckleberry Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-glue.c | 16 +++ arch/arm64/crypto/aes-modes.S | 237 ++++++++++++++++++++++++---------- 2 files changed, 185 insertions(+), 68 deletions(-) diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index b6883288234c7..162787c7aa865 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -464,6 +464,14 @@ static int __maybe_unused xctr_encrypt(struct skcipher_request *req) u8 *dst = walk.dst.virt.addr; u8 buf[AES_BLOCK_SIZE]; + /* + * If given less than 16 bytes, we must copy the partial block + * into a temporary buffer of 16 bytes to avoid out of bounds + * reads and writes. Furthermore, this code is somewhat unusual + * in that it expects the end of the data to be at the end of + * the temporary buffer, rather than the start of the data at + * the start of the temporary buffer. + */ if (unlikely(nbytes < AES_BLOCK_SIZE)) src = dst = memcpy(buf + sizeof(buf) - nbytes, src, nbytes); @@ -501,6 +509,14 @@ static int __maybe_unused ctr_encrypt(struct skcipher_request *req) u8 *dst = walk.dst.virt.addr; u8 buf[AES_BLOCK_SIZE]; + /* + * If given less than 16 bytes, we must copy the partial block + * into a temporary buffer of 16 bytes to avoid out of bounds + * reads and writes. Furthermore, this code is somewhat unusual + * in that it expects the end of the data to be at the end of + * the temporary buffer, rather than the start of the data at + * the start of the temporary buffer. + */ if (unlikely(nbytes < AES_BLOCK_SIZE)) src = dst = memcpy(buf + sizeof(buf) - nbytes, src, nbytes); diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index 6c36a3b0ed7d4..5abc834271f4a 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -322,32 +322,60 @@ AES_FUNC_END(aes_cbc_cts_decrypt) * This macro generates the code for CTR and XCTR mode. */ .macro ctr_encrypt xctr + // Arguments + OUT .req x0 + IN .req x1 + KEY .req x2 + ROUNDS_W .req w3 + BYTES_W .req w4 + IV .req x5 + BYTE_CTR_W .req w6 // XCTR only + // Intermediate values + CTR_W .req w11 // XCTR only + CTR .req x11 // XCTR only + IV_PART .req x12 + BLOCKS .req x13 + BLOCKS_W .req w13 + stp x29, x30, [sp, #-16]! mov x29, sp - enc_prepare w3, x2, x12 - ld1 {vctr.16b}, [x5] + enc_prepare ROUNDS_W, KEY, IV_PART + ld1 {vctr.16b}, [IV] + /* + * Keep 64 bits of the IV in a register. For CTR mode this lets us + * easily increment the IV. For XCTR mode this lets us efficiently XOR + * the 64-bit counter with the IV. + */ .if \xctr - umov x12, vctr.d[0] - lsr w11, w6, #4 + umov IV_PART, vctr.d[0] + lsr CTR_W, BYTE_CTR_W, #4 .else - umov x12, vctr.d[1] /* keep swabbed ctr in reg */ - rev x12, x12 + umov IV_PART, vctr.d[1] + rev IV_PART, IV_PART .endif .LctrloopNx\xctr: - add w7, w4, #15 - sub w4, w4, #MAX_STRIDE << 4 - lsr w7, w7, #4 + add BLOCKS_W, BYTES_W, #15 + sub BYTES_W, BYTES_W, #MAX_STRIDE << 4 + lsr BLOCKS_W, BLOCKS_W, #4 mov w8, #MAX_STRIDE - cmp w7, w8 - csel w7, w7, w8, lt + cmp BLOCKS_W, w8 + csel BLOCKS_W, BLOCKS_W, w8, lt + /* + * Set up the counter values in v0-v{MAX_STRIDE-1}. + * + * If we are encrypting less than MAX_STRIDE blocks, the tail block + * handling code expects the last keystream block to be in + * v{MAX_STRIDE-1}. For example: if encrypting two blocks with + * MAX_STRIDE=5, then v3 and v4 should have the next two counter blocks. + */ .if \xctr - add x11, x11, x7 + add CTR, CTR, BLOCKS .else - adds x12, x12, x7 + adds IV_PART, IV_PART, BLOCKS .endif mov v0.16b, vctr.16b mov v1.16b, vctr.16b @@ -355,16 +383,16 @@ AES_FUNC_END(aes_cbc_cts_decrypt) mov v3.16b, vctr.16b ST5( mov v4.16b, vctr.16b ) .if \xctr - sub x6, x11, #MAX_STRIDE - 1 - sub x7, x11, #MAX_STRIDE - 2 - sub x8, x11, #MAX_STRIDE - 3 - sub x9, x11, #MAX_STRIDE - 4 -ST5( sub x10, x11, #MAX_STRIDE - 5 ) - eor x6, x6, x12 - eor x7, x7, x12 - eor x8, x8, x12 - eor x9, x9, x12 -ST5( eor x10, x10, x12 ) + sub x6, CTR, #MAX_STRIDE - 1 + sub x7, CTR, #MAX_STRIDE - 2 + sub x8, CTR, #MAX_STRIDE - 3 + sub x9, CTR, #MAX_STRIDE - 4 +ST5( sub x10, CTR, #MAX_STRIDE - 5 ) + eor x6, x6, IV_PART + eor x7, x7, IV_PART + eor x8, x8, IV_PART + eor x9, x9, IV_PART +ST5( eor x10, x10, IV_PART ) mov v0.d[0], x6 mov v1.d[0], x7 mov v2.d[0], x8 @@ -373,17 +401,32 @@ ST5( mov v4.d[0], x10 ) .else bcs 0f .subsection 1 - /* apply carry to outgoing counter */ + /* + * This subsection handles carries. + * + * Conditional branching here is allowed with respect to time + * invariance since the branches are dependent on the IV instead + * of the plaintext or key. This code is rarely executed in + * practice anyway. + */ + + /* Apply carry to outgoing counter. */ 0: umov x8, vctr.d[0] rev x8, x8 add x8, x8, #1 rev x8, x8 ins vctr.d[0], x8 - /* apply carry to N counter blocks for N := x12 */ - cbz x12, 2f + /* + * Apply carry to counter blocks if needed. + * + * Since the carry flag was set, we know 0 <= IV_PART < + * MAX_STRIDE. Using the value of IV_PART we can determine how + * many counter blocks need to be updated. + */ + cbz IV_PART, 2f adr x16, 1f - sub x16, x16, x12, lsl #3 + sub x16, x16, IV_PART, lsl #3 br x16 bti c mov v0.d[0], vctr.d[0] @@ -398,71 +441,88 @@ ST5( mov v4.d[0], vctr.d[0] ) 1: b 2f .previous -2: rev x7, x12 +2: rev x7, IV_PART ins vctr.d[1], x7 - sub x7, x12, #MAX_STRIDE - 1 - sub x8, x12, #MAX_STRIDE - 2 - sub x9, x12, #MAX_STRIDE - 3 + sub x7, IV_PART, #MAX_STRIDE - 1 + sub x8, IV_PART, #MAX_STRIDE - 2 + sub x9, IV_PART, #MAX_STRIDE - 3 rev x7, x7 rev x8, x8 mov v1.d[1], x7 rev x9, x9 -ST5( sub x10, x12, #MAX_STRIDE - 4 ) +ST5( sub x10, IV_PART, #MAX_STRIDE - 4 ) mov v2.d[1], x8 ST5( rev x10, x10 ) mov v3.d[1], x9 ST5( mov v4.d[1], x10 ) .endif - tbnz w4, #31, .Lctrtail\xctr - ld1 {v5.16b-v7.16b}, [x1], #48 + + /* + * If there are at least MAX_STRIDE blocks left, XOR the data with + * keystream and store. Otherwise jump to tail handling. + */ + tbnz BYTES_W, #31, .Lctrtail\xctr + ld1 {v5.16b-v7.16b}, [IN], #48 ST4( bl aes_encrypt_block4x ) ST5( bl aes_encrypt_block5x ) eor v0.16b, v5.16b, v0.16b -ST4( ld1 {v5.16b}, [x1], #16 ) +ST4( ld1 {v5.16b}, [IN], #16 ) eor v1.16b, v6.16b, v1.16b -ST5( ld1 {v5.16b-v6.16b}, [x1], #32 ) +ST5( ld1 {v5.16b-v6.16b}, [IN], #32 ) eor v2.16b, v7.16b, v2.16b eor v3.16b, v5.16b, v3.16b ST5( eor v4.16b, v6.16b, v4.16b ) - st1 {v0.16b-v3.16b}, [x0], #64 -ST5( st1 {v4.16b}, [x0], #16 ) - cbz w4, .Lctrout\xctr + st1 {v0.16b-v3.16b}, [OUT], #64 +ST5( st1 {v4.16b}, [OUT], #16 ) + cbz BYTES_W, .Lctrout\xctr b .LctrloopNx\xctr .Lctrout\xctr: .if !\xctr - st1 {vctr.16b}, [x5] /* return next CTR value */ + st1 {vctr.16b}, [IV] /* return next CTR value */ .endif ldp x29, x30, [sp], #16 ret .Lctrtail\xctr: + /* + * Handle up to MAX_STRIDE * 16 - 1 bytes of plaintext + * + * This code expects the last keystream block to be in v{MAX_STRIDE-1}. + * For example: if encrypting two blocks with MAX_STRIDE=5, then v3 and + * v4 should have the next two counter blocks. + * + * This allows us to store the ciphertext by writing to overlapping + * regions of memory. Any invalid ciphertext blocks get overwritten by + * correctly computed blocks. This approach greatly simplifies the + * logic for storing the ciphertext. + */ mov x16, #16 - ands x6, x4, #0xf - csel x13, x6, x16, ne + ands w7, BYTES_W, #0xf + csel x13, x7, x16, ne -ST5( cmp w4, #64 - (MAX_STRIDE << 4) ) +ST5( cmp BYTES_W, #64 - (MAX_STRIDE << 4)) ST5( csel x14, x16, xzr, gt ) - cmp w4, #48 - (MAX_STRIDE << 4) + cmp BYTES_W, #48 - (MAX_STRIDE << 4) csel x15, x16, xzr, gt - cmp w4, #32 - (MAX_STRIDE << 4) + cmp BYTES_W, #32 - (MAX_STRIDE << 4) csel x16, x16, xzr, gt - cmp w4, #16 - (MAX_STRIDE << 4) + cmp BYTES_W, #16 - (MAX_STRIDE << 4) - adr_l x12, .Lcts_permute_table - add x12, x12, x13 + adr_l x9, .Lcts_permute_table + add x9, x9, x13 ble .Lctrtail1x\xctr -ST5( ld1 {v5.16b}, [x1], x14 ) - ld1 {v6.16b}, [x1], x15 - ld1 {v7.16b}, [x1], x16 +ST5( ld1 {v5.16b}, [IN], x14 ) + ld1 {v6.16b}, [IN], x15 + ld1 {v7.16b}, [IN], x16 ST4( bl aes_encrypt_block4x ) ST5( bl aes_encrypt_block5x ) - ld1 {v8.16b}, [x1], x13 - ld1 {v9.16b}, [x1] - ld1 {v10.16b}, [x12] + ld1 {v8.16b}, [IN], x13 + ld1 {v9.16b}, [IN] + ld1 {v10.16b}, [x9] ST4( eor v6.16b, v6.16b, v0.16b ) ST4( eor v7.16b, v7.16b, v1.16b ) @@ -477,35 +537,70 @@ ST5( eor v7.16b, v7.16b, v2.16b ) ST5( eor v8.16b, v8.16b, v3.16b ) ST5( eor v9.16b, v9.16b, v4.16b ) -ST5( st1 {v5.16b}, [x0], x14 ) - st1 {v6.16b}, [x0], x15 - st1 {v7.16b}, [x0], x16 - add x13, x13, x0 +ST5( st1 {v5.16b}, [OUT], x14 ) + st1 {v6.16b}, [OUT], x15 + st1 {v7.16b}, [OUT], x16 + add x13, x13, OUT st1 {v9.16b}, [x13] // overlapping stores - st1 {v8.16b}, [x0] + st1 {v8.16b}, [OUT] b .Lctrout\xctr .Lctrtail1x\xctr: - sub x7, x6, #16 - csel x6, x6, x7, eq - add x1, x1, x6 - add x0, x0, x6 - ld1 {v5.16b}, [x1] - ld1 {v6.16b}, [x0] + /* + * Handle <= 16 bytes of plaintext + * + * This code always reads and writes 16 bytes. To avoid out of bounds + * accesses, XCTR and CTR modes must use a temporary buffer when + * encrypting/decrypting less than 16 bytes. + * + * This code is unusual in that it loads the input and stores the output + * relative to the end of the buffers rather than relative to the start. + * This causes unusual behaviour when encrypting/decrypting less than 16 + * bytes; the end of the data is expected to be at the end of the + * temporary buffer rather than the start of the data being at the start + * of the temporary buffer. + */ + sub x8, x7, #16 + csel x7, x7, x8, eq + add IN, IN, x7 + add OUT, OUT, x7 + ld1 {v5.16b}, [IN] + ld1 {v6.16b}, [OUT] ST5( mov v3.16b, v4.16b ) - encrypt_block v3, w3, x2, x8, w7 - ld1 {v10.16b-v11.16b}, [x12] + encrypt_block v3, ROUNDS_W, KEY, x8, w7 + ld1 {v10.16b-v11.16b}, [x9] tbl v3.16b, {v3.16b}, v10.16b sshr v11.16b, v11.16b, #7 eor v5.16b, v5.16b, v3.16b bif v5.16b, v6.16b, v11.16b - st1 {v5.16b}, [x0] + st1 {v5.16b}, [OUT] b .Lctrout\xctr + + // Arguments + .unreq OUT + .unreq IN + .unreq KEY + .unreq ROUNDS_W + .unreq BYTES_W + .unreq IV + .unreq BYTE_CTR_W // XCTR only + // Intermediate values + .unreq CTR_W // XCTR only + .unreq CTR // XCTR only + .unreq IV_PART + .unreq BLOCKS + .unreq BLOCKS_W .endm /* * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int bytes, u8 ctr[]) + * + * The input and output buffers must always be at least 16 bytes even if + * encrypting/decrypting less than 16 bytes. Otherwise out of bounds + * accesses will occur. The data to be encrypted/decrypted is expected + * to be at the end of this 16-byte temporary buffer rather than the + * start. */ AES_FUNC_START(aes_ctr_encrypt) @@ -515,6 +610,12 @@ AES_FUNC_END(aes_ctr_encrypt) /* * aes_xctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int bytes, u8 const iv[], int byte_ctr) + * + * The input and output buffers must always be at least 16 bytes even if + * encrypting/decrypting less than 16 bytes. Otherwise out of bounds + * accesses will occur. The data to be encrypted/decrypted is expected + * to be at the end of this 16-byte temporary buffer rather than the + * start. */ AES_FUNC_START(aes_xctr_encrypt) From 34f7f6c3011276313383099156be287ac745bcea Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 20 May 2022 18:14:59 +0000 Subject: [PATCH 11/89] crypto: x86/polyval - Add PCLMULQDQ accelerated implementation of POLYVAL Add hardware accelerated version of POLYVAL for x86-64 CPUs with PCLMULQDQ support. This implementation is accelerated using PCLMULQDQ instructions to perform the finite field computations. For added efficiency, 8 blocks of the message are processed simultaneously by precomputing the first 8 powers of the key. Schoolbook multiplication is used instead of Karatsuba multiplication because it was found to be slightly faster on x86-64 machines. Montgomery reduction must be used instead of Barrett reduction due to the difference in modulus between POLYVAL's field and other finite fields. More information on POLYVAL can be found in the HCTR2 paper: "Length-preserving encryption with HCTR2": https://eprint.iacr.org/2021/1441.pdf Signed-off-by: Nathan Huckleberry Reviewed-by: Ard Biesheuvel Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/x86/crypto/Makefile | 3 + arch/x86/crypto/polyval-clmulni_asm.S | 321 +++++++++++++++++++++++++ arch/x86/crypto/polyval-clmulni_glue.c | 203 ++++++++++++++++ crypto/Kconfig | 9 + crypto/polyval-generic.c | 40 +++ include/crypto/polyval.h | 5 + 6 files changed, 581 insertions(+) create mode 100644 arch/x86/crypto/polyval-clmulni_asm.S create mode 100644 arch/x86/crypto/polyval-clmulni_glue.c diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 2831685adf6fb..b9847152acd82 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -69,6 +69,9 @@ libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o +obj-$(CONFIG_CRYPTO_POLYVAL_CLMUL_NI) += polyval-clmulni.o +polyval-clmulni-y := polyval-clmulni_asm.o polyval-clmulni_glue.o + obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o crc32c-intel-y := crc32c-intel_glue.o crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o diff --git a/arch/x86/crypto/polyval-clmulni_asm.S b/arch/x86/crypto/polyval-clmulni_asm.S new file mode 100644 index 0000000000000..a6ebe4e7dd2b7 --- /dev/null +++ b/arch/x86/crypto/polyval-clmulni_asm.S @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2021 Google LLC + */ +/* + * This is an efficient implementation of POLYVAL using intel PCLMULQDQ-NI + * instructions. It works on 8 blocks at a time, by precomputing the first 8 + * keys powers h^8, ..., h^1 in the POLYVAL finite field. This precomputation + * allows us to split finite field multiplication into two steps. + * + * In the first step, we consider h^i, m_i as normal polynomials of degree less + * than 128. We then compute p(x) = h^8m_0 + ... + h^1m_7 where multiplication + * is simply polynomial multiplication. + * + * In the second step, we compute the reduction of p(x) modulo the finite field + * modulus g(x) = x^128 + x^127 + x^126 + x^121 + 1. + * + * This two step process is equivalent to computing h^8m_0 + ... + h^1m_7 where + * multiplication is finite field multiplication. The advantage is that the + * two-step process only requires 1 finite field reduction for every 8 + * polynomial multiplications. Further parallelism is gained by interleaving the + * multiplications and polynomial reductions. + */ + +#include +#include + +#define STRIDE_BLOCKS 8 + +#define GSTAR %xmm7 +#define PL %xmm8 +#define PH %xmm9 +#define TMP_XMM %xmm11 +#define LO %xmm12 +#define HI %xmm13 +#define MI %xmm14 +#define SUM %xmm15 + +#define KEY_POWERS %rdi +#define MSG %rsi +#define BLOCKS_LEFT %rdx +#define ACCUMULATOR %rcx +#define TMP %rax + +.section .rodata.cst16.gstar, "aM", @progbits, 16 +.align 16 + +.Lgstar: + .quad 0xc200000000000000, 0xc200000000000000 + +.text + +/* + * Performs schoolbook1_iteration on two lists of 128-bit polynomials of length + * count pointed to by MSG and KEY_POWERS. + */ +.macro schoolbook1 count + .set i, 0 + .rept (\count) + schoolbook1_iteration i 0 + .set i, (i +1) + .endr +.endm + +/* + * Computes the product of two 128-bit polynomials at the memory locations + * specified by (MSG + 16*i) and (KEY_POWERS + 16*i) and XORs the components of + * the 256-bit product into LO, MI, HI. + * + * Given: + * X = [X_1 : X_0] + * Y = [Y_1 : Y_0] + * + * We compute: + * LO += X_0 * Y_0 + * MI += X_0 * Y_1 + X_1 * Y_0 + * HI += X_1 * Y_1 + * + * Later, the 256-bit result can be extracted as: + * [HI_1 : HI_0 + MI_1 : LO_1 + MI_0 : LO_0] + * This step is done when computing the polynomial reduction for efficiency + * reasons. + * + * If xor_sum == 1, then also XOR the value of SUM into m_0. This avoids an + * extra multiplication of SUM and h^8. + */ +.macro schoolbook1_iteration i xor_sum + movups (16*\i)(MSG), %xmm0 + .if (\i == 0 && \xor_sum == 1) + pxor SUM, %xmm0 + .endif + vpclmulqdq $0x01, (16*\i)(KEY_POWERS), %xmm0, %xmm2 + vpclmulqdq $0x00, (16*\i)(KEY_POWERS), %xmm0, %xmm1 + vpclmulqdq $0x10, (16*\i)(KEY_POWERS), %xmm0, %xmm3 + vpclmulqdq $0x11, (16*\i)(KEY_POWERS), %xmm0, %xmm4 + vpxor %xmm2, MI, MI + vpxor %xmm1, LO, LO + vpxor %xmm4, HI, HI + vpxor %xmm3, MI, MI +.endm + +/* + * Performs the same computation as schoolbook1_iteration, except we expect the + * arguments to already be loaded into xmm0 and xmm1 and we set the result + * registers LO, MI, and HI directly rather than XOR'ing into them. + */ +.macro schoolbook1_noload + vpclmulqdq $0x01, %xmm0, %xmm1, MI + vpclmulqdq $0x10, %xmm0, %xmm1, %xmm2 + vpclmulqdq $0x00, %xmm0, %xmm1, LO + vpclmulqdq $0x11, %xmm0, %xmm1, HI + vpxor %xmm2, MI, MI +.endm + +/* + * Computes the 256-bit polynomial represented by LO, HI, MI. Stores + * the result in PL, PH. + * [PH : PL] = [HI_1 : HI_0 + MI_1 : LO_1 + MI_0 : LO_0] + */ +.macro schoolbook2 + vpslldq $8, MI, PL + vpsrldq $8, MI, PH + pxor LO, PL + pxor HI, PH +.endm + +/* + * Computes the 128-bit reduction of PH : PL. Stores the result in dest. + * + * This macro computes p(x) mod g(x) where p(x) is in montgomery form and g(x) = + * x^128 + x^127 + x^126 + x^121 + 1. + * + * We have a 256-bit polynomial PH : PL = P_3 : P_2 : P_1 : P_0 that is the + * product of two 128-bit polynomials in Montgomery form. We need to reduce it + * mod g(x). Also, since polynomials in Montgomery form have an "extra" factor + * of x^128, this product has two extra factors of x^128. To get it back into + * Montgomery form, we need to remove one of these factors by dividing by x^128. + * + * To accomplish both of these goals, we add multiples of g(x) that cancel out + * the low 128 bits P_1 : P_0, leaving just the high 128 bits. Since the low + * bits are zero, the polynomial division by x^128 can be done by right shifting. + * + * Since the only nonzero term in the low 64 bits of g(x) is the constant term, + * the multiple of g(x) needed to cancel out P_0 is P_0 * g(x). The CPU can + * only do 64x64 bit multiplications, so split P_0 * g(x) into x^128 * P_0 + + * x^64 * g*(x) * P_0 + P_0, where g*(x) is bits 64-127 of g(x). Adding this to + * the original polynomial gives P_3 : P_2 + P_0 + T_1 : P_1 + T_0 : 0, where T + * = T_1 : T_0 = g*(x) * P_0. Thus, bits 0-63 got "folded" into bits 64-191. + * + * Repeating this same process on the next 64 bits "folds" bits 64-127 into bits + * 128-255, giving the answer in bits 128-255. This time, we need to cancel P_1 + * + T_0 in bits 64-127. The multiple of g(x) required is (P_1 + T_0) * g(x) * + * x^64. Adding this to our previous computation gives P_3 + P_1 + T_0 + V_1 : + * P_2 + P_0 + T_1 + V_0 : 0 : 0, where V = V_1 : V_0 = g*(x) * (P_1 + T_0). + * + * So our final computation is: + * T = T_1 : T_0 = g*(x) * P_0 + * V = V_1 : V_0 = g*(x) * (P_1 + T_0) + * p(x) / x^{128} mod g(x) = P_3 + P_1 + T_0 + V_1 : P_2 + P_0 + T_1 + V_0 + * + * The implementation below saves a XOR instruction by computing P_1 + T_0 : P_0 + * + T_1 and XORing into dest, rather than separately XORing P_1 : P_0 and T_0 : + * T_1 into dest. This allows us to reuse P_1 + T_0 when computing V. + */ +.macro montgomery_reduction dest + vpclmulqdq $0x00, PL, GSTAR, TMP_XMM # TMP_XMM = T_1 : T_0 = P_0 * g*(x) + pshufd $0b01001110, TMP_XMM, TMP_XMM # TMP_XMM = T_0 : T_1 + pxor PL, TMP_XMM # TMP_XMM = P_1 + T_0 : P_0 + T_1 + pxor TMP_XMM, PH # PH = P_3 + P_1 + T_0 : P_2 + P_0 + T_1 + pclmulqdq $0x11, GSTAR, TMP_XMM # TMP_XMM = V_1 : V_0 = V = [(P_1 + T_0) * g*(x)] + vpxor TMP_XMM, PH, \dest +.endm + +/* + * Compute schoolbook multiplication for 8 blocks + * m_0h^8 + ... + m_7h^1 + * + * If reduce is set, also computes the montgomery reduction of the + * previous full_stride call and XORs with the first message block. + * (m_0 + REDUCE(PL, PH))h^8 + ... + m_7h^1. + * I.e., the first multiplication uses m_0 + REDUCE(PL, PH) instead of m_0. + */ +.macro full_stride reduce + pxor LO, LO + pxor HI, HI + pxor MI, MI + + schoolbook1_iteration 7 0 + .if \reduce + vpclmulqdq $0x00, PL, GSTAR, TMP_XMM + .endif + + schoolbook1_iteration 6 0 + .if \reduce + pshufd $0b01001110, TMP_XMM, TMP_XMM + .endif + + schoolbook1_iteration 5 0 + .if \reduce + pxor PL, TMP_XMM + .endif + + schoolbook1_iteration 4 0 + .if \reduce + pxor TMP_XMM, PH + .endif + + schoolbook1_iteration 3 0 + .if \reduce + pclmulqdq $0x11, GSTAR, TMP_XMM + .endif + + schoolbook1_iteration 2 0 + .if \reduce + vpxor TMP_XMM, PH, SUM + .endif + + schoolbook1_iteration 1 0 + + schoolbook1_iteration 0 1 + + addq $(8*16), MSG + schoolbook2 +.endm + +/* + * Process BLOCKS_LEFT blocks, where 0 < BLOCKS_LEFT < STRIDE_BLOCKS + */ +.macro partial_stride + mov BLOCKS_LEFT, TMP + shlq $4, TMP + addq $(16*STRIDE_BLOCKS), KEY_POWERS + subq TMP, KEY_POWERS + + movups (MSG), %xmm0 + pxor SUM, %xmm0 + movaps (KEY_POWERS), %xmm1 + schoolbook1_noload + dec BLOCKS_LEFT + addq $16, MSG + addq $16, KEY_POWERS + + test $4, BLOCKS_LEFT + jz .Lpartial4BlocksDone + schoolbook1 4 + addq $(4*16), MSG + addq $(4*16), KEY_POWERS +.Lpartial4BlocksDone: + test $2, BLOCKS_LEFT + jz .Lpartial2BlocksDone + schoolbook1 2 + addq $(2*16), MSG + addq $(2*16), KEY_POWERS +.Lpartial2BlocksDone: + test $1, BLOCKS_LEFT + jz .LpartialDone + schoolbook1 1 +.LpartialDone: + schoolbook2 + montgomery_reduction SUM +.endm + +/* + * Perform montgomery multiplication in GF(2^128) and store result in op1. + * + * Computes op1*op2*x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1 + * If op1, op2 are in montgomery form, this computes the montgomery + * form of op1*op2. + * + * void clmul_polyval_mul(u8 *op1, const u8 *op2); + */ +SYM_FUNC_START(clmul_polyval_mul) + FRAME_BEGIN + vmovdqa .Lgstar(%rip), GSTAR + movups (%rdi), %xmm0 + movups (%rsi), %xmm1 + schoolbook1_noload + schoolbook2 + montgomery_reduction SUM + movups SUM, (%rdi) + FRAME_END + RET +SYM_FUNC_END(clmul_polyval_mul) + +/* + * Perform polynomial evaluation as specified by POLYVAL. This computes: + * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} + * where n=nblocks, h is the hash key, and m_i are the message blocks. + * + * rdi - pointer to precomputed key powers h^8 ... h^1 + * rsi - pointer to message blocks + * rdx - number of blocks to hash + * rcx - pointer to the accumulator + * + * void clmul_polyval_update(const struct polyval_tfm_ctx *keys, + * const u8 *in, size_t nblocks, u8 *accumulator); + */ +SYM_FUNC_START(clmul_polyval_update) + FRAME_BEGIN + vmovdqa .Lgstar(%rip), GSTAR + movups (ACCUMULATOR), SUM + subq $STRIDE_BLOCKS, BLOCKS_LEFT + js .LstrideLoopExit + full_stride 0 + subq $STRIDE_BLOCKS, BLOCKS_LEFT + js .LstrideLoopExitReduce +.LstrideLoop: + full_stride 1 + subq $STRIDE_BLOCKS, BLOCKS_LEFT + jns .LstrideLoop +.LstrideLoopExitReduce: + montgomery_reduction SUM +.LstrideLoopExit: + add $STRIDE_BLOCKS, BLOCKS_LEFT + jz .LskipPartial + partial_stride +.LskipPartial: + movups SUM, (ACCUMULATOR) + FRAME_END + RET +SYM_FUNC_END(clmul_polyval_update) diff --git a/arch/x86/crypto/polyval-clmulni_glue.c b/arch/x86/crypto/polyval-clmulni_glue.c new file mode 100644 index 0000000000000..b7664d0188510 --- /dev/null +++ b/arch/x86/crypto/polyval-clmulni_glue.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Glue code for POLYVAL using PCMULQDQ-NI + * + * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen + * Copyright (c) 2009 Intel Corp. + * Author: Huang Ying + * Copyright 2021 Google LLC + */ + +/* + * Glue code based on ghash-clmulni-intel_glue.c. + * + * This implementation of POLYVAL uses montgomery multiplication + * accelerated by PCLMULQDQ-NI to implement the finite field + * operations. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NUM_KEY_POWERS 8 + +struct polyval_tfm_ctx { + /* + * These powers must be in the order h^8, ..., h^1. + */ + u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE]; +}; + +struct polyval_desc_ctx { + u8 buffer[POLYVAL_BLOCK_SIZE]; + u32 bytes; +}; + +asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys, + const u8 *in, size_t nblocks, u8 *accumulator); +asmlinkage void clmul_polyval_mul(u8 *op1, const u8 *op2); + +static void internal_polyval_update(const struct polyval_tfm_ctx *keys, + const u8 *in, size_t nblocks, u8 *accumulator) +{ + if (likely(crypto_simd_usable())) { + kernel_fpu_begin(); + clmul_polyval_update(keys, in, nblocks, accumulator); + kernel_fpu_end(); + } else { + polyval_update_non4k(keys->key_powers[NUM_KEY_POWERS-1], in, + nblocks, accumulator); + } +} + +static void internal_polyval_mul(u8 *op1, const u8 *op2) +{ + if (likely(crypto_simd_usable())) { + kernel_fpu_begin(); + clmul_polyval_mul(op1, op2); + kernel_fpu_end(); + } else { + polyval_mul_non4k(op1, op2); + } +} + +static int polyval_x86_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm); + int i; + + if (keylen != POLYVAL_BLOCK_SIZE) + return -EINVAL; + + memcpy(tctx->key_powers[NUM_KEY_POWERS-1], key, POLYVAL_BLOCK_SIZE); + + for (i = NUM_KEY_POWERS-2; i >= 0; i--) { + memcpy(tctx->key_powers[i], key, POLYVAL_BLOCK_SIZE); + internal_polyval_mul(tctx->key_powers[i], + tctx->key_powers[i+1]); + } + + return 0; +} + +static int polyval_x86_init(struct shash_desc *desc) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + + memset(dctx, 0, sizeof(*dctx)); + + return 0; +} + +static int polyval_x86_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + u8 *pos; + unsigned int nblocks; + unsigned int n; + + if (dctx->bytes) { + n = min(srclen, dctx->bytes); + pos = dctx->buffer + POLYVAL_BLOCK_SIZE - dctx->bytes; + + dctx->bytes -= n; + srclen -= n; + + while (n--) + *pos++ ^= *src++; + + if (!dctx->bytes) + internal_polyval_mul(dctx->buffer, + tctx->key_powers[NUM_KEY_POWERS-1]); + } + + while (srclen >= POLYVAL_BLOCK_SIZE) { + /* Allow rescheduling every 4K bytes. */ + nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; + internal_polyval_update(tctx, src, nblocks, dctx->buffer); + srclen -= nblocks * POLYVAL_BLOCK_SIZE; + src += nblocks * POLYVAL_BLOCK_SIZE; + } + + if (srclen) { + dctx->bytes = POLYVAL_BLOCK_SIZE - srclen; + pos = dctx->buffer; + while (srclen--) + *pos++ ^= *src++; + } + + return 0; +} + +static int polyval_x86_final(struct shash_desc *desc, u8 *dst) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + + if (dctx->bytes) { + internal_polyval_mul(dctx->buffer, + tctx->key_powers[NUM_KEY_POWERS-1]); + } + + memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE); + + return 0; +} + +static struct shash_alg polyval_alg = { + .digestsize = POLYVAL_DIGEST_SIZE, + .init = polyval_x86_init, + .update = polyval_x86_update, + .final = polyval_x86_final, + .setkey = polyval_x86_setkey, + .descsize = sizeof(struct polyval_desc_ctx), + .base = { + .cra_name = "polyval", + .cra_driver_name = "polyval-clmulni", + .cra_priority = 200, + .cra_blocksize = POLYVAL_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct polyval_tfm_ctx), + .cra_module = THIS_MODULE, + }, +}; + +__maybe_unused static const struct x86_cpu_id pcmul_cpu_id[] = { + X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id); + +static int __init polyval_clmulni_mod_init(void) +{ + if (!x86_match_cpu(pcmul_cpu_id)) + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_AVX)) + return -ENODEV; + + return crypto_register_shash(&polyval_alg); +} + +static void __exit polyval_clmulni_mod_exit(void) +{ + crypto_unregister_shash(&polyval_alg); +} + +module_init(polyval_clmulni_mod_init); +module_exit(polyval_clmulni_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("POLYVAL hash function accelerated by PCLMULQDQ-NI"); +MODULE_ALIAS_CRYPTO("polyval"); +MODULE_ALIAS_CRYPTO("polyval-clmulni"); diff --git a/crypto/Kconfig b/crypto/Kconfig index dfcc3235e918c..9b654984de799 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -792,6 +792,15 @@ config CRYPTO_POLYVAL POLYVAL is the hash function used in HCTR2. It is not a general-purpose cryptographic hash function. +config CRYPTO_POLYVAL_CLMUL_NI + tristate "POLYVAL hash function (CLMUL-NI accelerated)" + depends on X86 && 64BIT + select CRYPTO_POLYVAL + help + This is the x86_64 CLMUL-NI accelerated implementation of POLYVAL. It is + used to efficiently implement HCTR2 on x86-64 processors that support + carry-less multiplication instructions. + config CRYPTO_POLY1305 tristate "Poly1305 authenticator algorithm" select CRYPTO_HASH diff --git a/crypto/polyval-generic.c b/crypto/polyval-generic.c index bf2b03b7bfc04..16bfa6925b31e 100644 --- a/crypto/polyval-generic.c +++ b/crypto/polyval-generic.c @@ -76,6 +76,46 @@ static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE], put_unaligned(swab64(b), (u64 *)&dst[0]); } +/* + * Performs multiplication in the POLYVAL field using the GHASH field as a + * subroutine. This function is used as a fallback for hardware accelerated + * implementations when simd registers are unavailable. + * + * Note: This function is not used for polyval-generic, instead we use the 4k + * lookup table implementation for finite field multiplication. + */ +void polyval_mul_non4k(u8 *op1, const u8 *op2) +{ + be128 a, b; + + // Assume one argument is in Montgomery form and one is not. + copy_and_reverse((u8 *)&a, op1); + copy_and_reverse((u8 *)&b, op2); + gf128mul_x_lle(&a, &a); + gf128mul_lle(&a, &b); + copy_and_reverse(op1, (u8 *)&a); +} +EXPORT_SYMBOL_GPL(polyval_mul_non4k); + +/* + * Perform a POLYVAL update using non4k multiplication. This function is used + * as a fallback for hardware accelerated implementations when simd registers + * are unavailable. + * + * Note: This function is not used for polyval-generic, instead we use the 4k + * lookup table implementation of finite field multiplication. + */ +void polyval_update_non4k(const u8 *key, const u8 *in, + size_t nblocks, u8 *accumulator) +{ + while (nblocks--) { + crypto_xor(accumulator, in, POLYVAL_BLOCK_SIZE); + polyval_mul_non4k(accumulator, key); + in += POLYVAL_BLOCK_SIZE; + } +} +EXPORT_SYMBOL_GPL(polyval_update_non4k); + static int polyval_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h index b14c38aa91663..1d630f371f777 100644 --- a/include/crypto/polyval.h +++ b/include/crypto/polyval.h @@ -14,4 +14,9 @@ #define POLYVAL_BLOCK_SIZE 16 #define POLYVAL_DIGEST_SIZE 16 +void polyval_mul_non4k(u8 *op1, const u8 *op2); + +void polyval_update_non4k(const u8 *key, const u8 *in, + size_t nblocks, u8 *accumulator); + #endif From 9d2c0b485c46c7c5f781067c60300def5d1365cb Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 20 May 2022 18:15:00 +0000 Subject: [PATCH 12/89] crypto: arm64/polyval - Add PMULL accelerated implementation of POLYVAL Add hardware accelerated version of POLYVAL for ARM64 CPUs with Crypto Extensions support. This implementation is accelerated using PMULL instructions to perform the finite field computations. For added efficiency, 8 blocks of the message are processed simultaneously by precomputing the first 8 powers of the key. Karatsuba multiplication is used instead of Schoolbook multiplication because it was found to be slightly faster on ARM64 CPUs. Montgomery reduction must be used instead of Barrett reduction due to the difference in modulus between POLYVAL's field and other finite fields. More information on POLYVAL can be found in the HCTR2 paper: "Length-preserving encryption with HCTR2": https://eprint.iacr.org/2021/1441.pdf Signed-off-by: Nathan Huckleberry Reviewed-by: Ard Biesheuvel Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/arm64/crypto/Kconfig | 5 + arch/arm64/crypto/Makefile | 3 + arch/arm64/crypto/polyval-ce-core.S | 361 ++++++++++++++++++++++++++++ arch/arm64/crypto/polyval-ce-glue.c | 191 +++++++++++++++ 4 files changed, 560 insertions(+) create mode 100644 arch/arm64/crypto/polyval-ce-core.S create mode 100644 arch/arm64/crypto/polyval-ce-glue.c diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 74d5bed9394f5..4391a463abd77 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -72,6 +72,11 @@ config CRYPTO_GHASH_ARM64_CE select CRYPTO_GF128MUL select CRYPTO_LIB_AES +config CRYPTO_POLYVAL_ARM64_CE + tristate "POLYVAL using ARMv8 Crypto Extensions (for HCTR2)" + depends on KERNEL_MODE_NEON + select CRYPTO_POLYVAL + config CRYPTO_CRCT10DIF_ARM64_CE tristate "CRCT10DIF digest algorithm using PMULL instructions" depends on KERNEL_MODE_NEON && CRC_T10DIF diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index bea8995133b1f..24bb0c4610de2 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -32,6 +32,9 @@ sm4-neon-y := sm4-neon-glue.o sm4-neon-core.o obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o +obj-$(CONFIG_CRYPTO_POLYVAL_ARM64_CE) += polyval-ce.o +polyval-ce-y := polyval-ce-glue.o polyval-ce-core.o + obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o diff --git a/arch/arm64/crypto/polyval-ce-core.S b/arch/arm64/crypto/polyval-ce-core.S new file mode 100644 index 0000000000000..b5326540d2e34 --- /dev/null +++ b/arch/arm64/crypto/polyval-ce-core.S @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Implementation of POLYVAL using ARMv8 Crypto Extensions. + * + * Copyright 2021 Google LLC + */ +/* + * This is an efficient implementation of POLYVAL using ARMv8 Crypto Extensions + * It works on 8 blocks at a time, by precomputing the first 8 keys powers h^8, + * ..., h^1 in the POLYVAL finite field. This precomputation allows us to split + * finite field multiplication into two steps. + * + * In the first step, we consider h^i, m_i as normal polynomials of degree less + * than 128. We then compute p(x) = h^8m_0 + ... + h^1m_7 where multiplication + * is simply polynomial multiplication. + * + * In the second step, we compute the reduction of p(x) modulo the finite field + * modulus g(x) = x^128 + x^127 + x^126 + x^121 + 1. + * + * This two step process is equivalent to computing h^8m_0 + ... + h^1m_7 where + * multiplication is finite field multiplication. The advantage is that the + * two-step process only requires 1 finite field reduction for every 8 + * polynomial multiplications. Further parallelism is gained by interleaving the + * multiplications and polynomial reductions. + */ + +#include +#define STRIDE_BLOCKS 8 + +KEY_POWERS .req x0 +MSG .req x1 +BLOCKS_LEFT .req x2 +ACCUMULATOR .req x3 +KEY_START .req x10 +EXTRA_BYTES .req x11 +TMP .req x13 + +M0 .req v0 +M1 .req v1 +M2 .req v2 +M3 .req v3 +M4 .req v4 +M5 .req v5 +M6 .req v6 +M7 .req v7 +KEY8 .req v8 +KEY7 .req v9 +KEY6 .req v10 +KEY5 .req v11 +KEY4 .req v12 +KEY3 .req v13 +KEY2 .req v14 +KEY1 .req v15 +PL .req v16 +PH .req v17 +TMP_V .req v18 +LO .req v20 +MI .req v21 +HI .req v22 +SUM .req v23 +GSTAR .req v24 + + .text + + .arch armv8-a+crypto + .align 4 + +.Lgstar: + .quad 0xc200000000000000, 0xc200000000000000 + +/* + * Computes the product of two 128-bit polynomials in X and Y and XORs the + * components of the 256-bit product into LO, MI, HI. + * + * Given: + * X = [X_1 : X_0] + * Y = [Y_1 : Y_0] + * + * We compute: + * LO += X_0 * Y_0 + * MI += (X_0 + X_1) * (Y_0 + Y_1) + * HI += X_1 * Y_1 + * + * Later, the 256-bit result can be extracted as: + * [HI_1 : HI_0 + HI_1 + MI_1 + LO_1 : LO_1 + HI_0 + MI_0 + LO_0 : LO_0] + * This step is done when computing the polynomial reduction for efficiency + * reasons. + * + * Karatsuba multiplication is used instead of Schoolbook multiplication because + * it was found to be slightly faster on ARM64 CPUs. + * + */ +.macro karatsuba1 X Y + X .req \X + Y .req \Y + ext v25.16b, X.16b, X.16b, #8 + ext v26.16b, Y.16b, Y.16b, #8 + eor v25.16b, v25.16b, X.16b + eor v26.16b, v26.16b, Y.16b + pmull2 v28.1q, X.2d, Y.2d + pmull v29.1q, X.1d, Y.1d + pmull v27.1q, v25.1d, v26.1d + eor HI.16b, HI.16b, v28.16b + eor LO.16b, LO.16b, v29.16b + eor MI.16b, MI.16b, v27.16b + .unreq X + .unreq Y +.endm + +/* + * Same as karatsuba1, except overwrites HI, LO, MI rather than XORing into + * them. + */ +.macro karatsuba1_store X Y + X .req \X + Y .req \Y + ext v25.16b, X.16b, X.16b, #8 + ext v26.16b, Y.16b, Y.16b, #8 + eor v25.16b, v25.16b, X.16b + eor v26.16b, v26.16b, Y.16b + pmull2 HI.1q, X.2d, Y.2d + pmull LO.1q, X.1d, Y.1d + pmull MI.1q, v25.1d, v26.1d + .unreq X + .unreq Y +.endm + +/* + * Computes the 256-bit polynomial represented by LO, HI, MI. Stores + * the result in PL, PH. + * [PH : PL] = + * [HI_1 : HI_1 + HI_0 + MI_1 + LO_1 : HI_0 + MI_0 + LO_1 + LO_0 : LO_0] + */ +.macro karatsuba2 + // v4 = [HI_1 + MI_1 : HI_0 + MI_0] + eor v4.16b, HI.16b, MI.16b + // v4 = [HI_1 + MI_1 + LO_1 : HI_0 + MI_0 + LO_0] + eor v4.16b, v4.16b, LO.16b + // v5 = [HI_0 : LO_1] + ext v5.16b, LO.16b, HI.16b, #8 + // v4 = [HI_1 + HI_0 + MI_1 + LO_1 : HI_0 + MI_0 + LO_1 + LO_0] + eor v4.16b, v4.16b, v5.16b + // HI = [HI_0 : HI_1] + ext HI.16b, HI.16b, HI.16b, #8 + // LO = [LO_0 : LO_1] + ext LO.16b, LO.16b, LO.16b, #8 + // PH = [HI_1 : HI_1 + HI_0 + MI_1 + LO_1] + ext PH.16b, v4.16b, HI.16b, #8 + // PL = [HI_0 + MI_0 + LO_1 + LO_0 : LO_0] + ext PL.16b, LO.16b, v4.16b, #8 +.endm + +/* + * Computes the 128-bit reduction of PH : PL. Stores the result in dest. + * + * This macro computes p(x) mod g(x) where p(x) is in montgomery form and g(x) = + * x^128 + x^127 + x^126 + x^121 + 1. + * + * We have a 256-bit polynomial PH : PL = P_3 : P_2 : P_1 : P_0 that is the + * product of two 128-bit polynomials in Montgomery form. We need to reduce it + * mod g(x). Also, since polynomials in Montgomery form have an "extra" factor + * of x^128, this product has two extra factors of x^128. To get it back into + * Montgomery form, we need to remove one of these factors by dividing by x^128. + * + * To accomplish both of these goals, we add multiples of g(x) that cancel out + * the low 128 bits P_1 : P_0, leaving just the high 128 bits. Since the low + * bits are zero, the polynomial division by x^128 can be done by right + * shifting. + * + * Since the only nonzero term in the low 64 bits of g(x) is the constant term, + * the multiple of g(x) needed to cancel out P_0 is P_0 * g(x). The CPU can + * only do 64x64 bit multiplications, so split P_0 * g(x) into x^128 * P_0 + + * x^64 * g*(x) * P_0 + P_0, where g*(x) is bits 64-127 of g(x). Adding this to + * the original polynomial gives P_3 : P_2 + P_0 + T_1 : P_1 + T_0 : 0, where T + * = T_1 : T_0 = g*(x) * P_0. Thus, bits 0-63 got "folded" into bits 64-191. + * + * Repeating this same process on the next 64 bits "folds" bits 64-127 into bits + * 128-255, giving the answer in bits 128-255. This time, we need to cancel P_1 + * + T_0 in bits 64-127. The multiple of g(x) required is (P_1 + T_0) * g(x) * + * x^64. Adding this to our previous computation gives P_3 + P_1 + T_0 + V_1 : + * P_2 + P_0 + T_1 + V_0 : 0 : 0, where V = V_1 : V_0 = g*(x) * (P_1 + T_0). + * + * So our final computation is: + * T = T_1 : T_0 = g*(x) * P_0 + * V = V_1 : V_0 = g*(x) * (P_1 + T_0) + * p(x) / x^{128} mod g(x) = P_3 + P_1 + T_0 + V_1 : P_2 + P_0 + T_1 + V_0 + * + * The implementation below saves a XOR instruction by computing P_1 + T_0 : P_0 + * + T_1 and XORing into dest, rather than separately XORing P_1 : P_0 and T_0 : + * T_1 into dest. This allows us to reuse P_1 + T_0 when computing V. + */ +.macro montgomery_reduction dest + DEST .req \dest + // TMP_V = T_1 : T_0 = P_0 * g*(x) + pmull TMP_V.1q, PL.1d, GSTAR.1d + // TMP_V = T_0 : T_1 + ext TMP_V.16b, TMP_V.16b, TMP_V.16b, #8 + // TMP_V = P_1 + T_0 : P_0 + T_1 + eor TMP_V.16b, PL.16b, TMP_V.16b + // PH = P_3 + P_1 + T_0 : P_2 + P_0 + T_1 + eor PH.16b, PH.16b, TMP_V.16b + // TMP_V = V_1 : V_0 = (P_1 + T_0) * g*(x) + pmull2 TMP_V.1q, TMP_V.2d, GSTAR.2d + eor DEST.16b, PH.16b, TMP_V.16b + .unreq DEST +.endm + +/* + * Compute Polyval on 8 blocks. + * + * If reduce is set, also computes the montgomery reduction of the + * previous full_stride call and XORs with the first message block. + * (m_0 + REDUCE(PL, PH))h^8 + ... + m_7h^1. + * I.e., the first multiplication uses m_0 + REDUCE(PL, PH) instead of m_0. + * + * Sets PL, PH. + */ +.macro full_stride reduce + eor LO.16b, LO.16b, LO.16b + eor MI.16b, MI.16b, MI.16b + eor HI.16b, HI.16b, HI.16b + + ld1 {M0.16b, M1.16b, M2.16b, M3.16b}, [MSG], #64 + ld1 {M4.16b, M5.16b, M6.16b, M7.16b}, [MSG], #64 + + karatsuba1 M7 KEY1 + .if \reduce + pmull TMP_V.1q, PL.1d, GSTAR.1d + .endif + + karatsuba1 M6 KEY2 + .if \reduce + ext TMP_V.16b, TMP_V.16b, TMP_V.16b, #8 + .endif + + karatsuba1 M5 KEY3 + .if \reduce + eor TMP_V.16b, PL.16b, TMP_V.16b + .endif + + karatsuba1 M4 KEY4 + .if \reduce + eor PH.16b, PH.16b, TMP_V.16b + .endif + + karatsuba1 M3 KEY5 + .if \reduce + pmull2 TMP_V.1q, TMP_V.2d, GSTAR.2d + .endif + + karatsuba1 M2 KEY6 + .if \reduce + eor SUM.16b, PH.16b, TMP_V.16b + .endif + + karatsuba1 M1 KEY7 + eor M0.16b, M0.16b, SUM.16b + + karatsuba1 M0 KEY8 + karatsuba2 +.endm + +/* + * Handle any extra blocks after full_stride loop. + */ +.macro partial_stride + add KEY_POWERS, KEY_START, #(STRIDE_BLOCKS << 4) + sub KEY_POWERS, KEY_POWERS, BLOCKS_LEFT, lsl #4 + ld1 {KEY1.16b}, [KEY_POWERS], #16 + + ld1 {TMP_V.16b}, [MSG], #16 + eor SUM.16b, SUM.16b, TMP_V.16b + karatsuba1_store KEY1 SUM + sub BLOCKS_LEFT, BLOCKS_LEFT, #1 + + tst BLOCKS_LEFT, #4 + beq .Lpartial4BlocksDone + ld1 {M0.16b, M1.16b, M2.16b, M3.16b}, [MSG], #64 + ld1 {KEY8.16b, KEY7.16b, KEY6.16b, KEY5.16b}, [KEY_POWERS], #64 + karatsuba1 M0 KEY8 + karatsuba1 M1 KEY7 + karatsuba1 M2 KEY6 + karatsuba1 M3 KEY5 +.Lpartial4BlocksDone: + tst BLOCKS_LEFT, #2 + beq .Lpartial2BlocksDone + ld1 {M0.16b, M1.16b}, [MSG], #32 + ld1 {KEY8.16b, KEY7.16b}, [KEY_POWERS], #32 + karatsuba1 M0 KEY8 + karatsuba1 M1 KEY7 +.Lpartial2BlocksDone: + tst BLOCKS_LEFT, #1 + beq .LpartialDone + ld1 {M0.16b}, [MSG], #16 + ld1 {KEY8.16b}, [KEY_POWERS], #16 + karatsuba1 M0 KEY8 +.LpartialDone: + karatsuba2 + montgomery_reduction SUM +.endm + +/* + * Perform montgomery multiplication in GF(2^128) and store result in op1. + * + * Computes op1*op2*x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1 + * If op1, op2 are in montgomery form, this computes the montgomery + * form of op1*op2. + * + * void pmull_polyval_mul(u8 *op1, const u8 *op2); + */ +SYM_FUNC_START(pmull_polyval_mul) + adr TMP, .Lgstar + ld1 {GSTAR.2d}, [TMP] + ld1 {v0.16b}, [x0] + ld1 {v1.16b}, [x1] + karatsuba1_store v0 v1 + karatsuba2 + montgomery_reduction SUM + st1 {SUM.16b}, [x0] + ret +SYM_FUNC_END(pmull_polyval_mul) + +/* + * Perform polynomial evaluation as specified by POLYVAL. This computes: + * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} + * where n=nblocks, h is the hash key, and m_i are the message blocks. + * + * x0 - pointer to precomputed key powers h^8 ... h^1 + * x1 - pointer to message blocks + * x2 - number of blocks to hash + * x3 - pointer to accumulator + * + * void pmull_polyval_update(const struct polyval_ctx *ctx, const u8 *in, + * size_t nblocks, u8 *accumulator); + */ +SYM_FUNC_START(pmull_polyval_update) + adr TMP, .Lgstar + mov KEY_START, KEY_POWERS + ld1 {GSTAR.2d}, [TMP] + ld1 {SUM.16b}, [ACCUMULATOR] + subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS + blt .LstrideLoopExit + ld1 {KEY8.16b, KEY7.16b, KEY6.16b, KEY5.16b}, [KEY_POWERS], #64 + ld1 {KEY4.16b, KEY3.16b, KEY2.16b, KEY1.16b}, [KEY_POWERS], #64 + full_stride 0 + subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS + blt .LstrideLoopExitReduce +.LstrideLoop: + full_stride 1 + subs BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS + bge .LstrideLoop +.LstrideLoopExitReduce: + montgomery_reduction SUM +.LstrideLoopExit: + adds BLOCKS_LEFT, BLOCKS_LEFT, #STRIDE_BLOCKS + beq .LskipPartial + partial_stride +.LskipPartial: + st1 {SUM.16b}, [ACCUMULATOR] + ret +SYM_FUNC_END(pmull_polyval_update) diff --git a/arch/arm64/crypto/polyval-ce-glue.c b/arch/arm64/crypto/polyval-ce-glue.c new file mode 100644 index 0000000000000..0a3b5718df855 --- /dev/null +++ b/arch/arm64/crypto/polyval-ce-glue.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Glue code for POLYVAL using ARMv8 Crypto Extensions + * + * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen + * Copyright (c) 2009 Intel Corp. + * Author: Huang Ying + * Copyright 2021 Google LLC + */ + +/* + * Glue code based on ghash-clmulni-intel_glue.c. + * + * This implementation of POLYVAL uses montgomery multiplication accelerated by + * ARMv8 Crypto Extensions instructions to implement the finite field operations. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NUM_KEY_POWERS 8 + +struct polyval_tfm_ctx { + /* + * These powers must be in the order h^8, ..., h^1. + */ + u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE]; +}; + +struct polyval_desc_ctx { + u8 buffer[POLYVAL_BLOCK_SIZE]; + u32 bytes; +}; + +asmlinkage void pmull_polyval_update(const struct polyval_tfm_ctx *keys, + const u8 *in, size_t nblocks, u8 *accumulator); +asmlinkage void pmull_polyval_mul(u8 *op1, const u8 *op2); + +static void internal_polyval_update(const struct polyval_tfm_ctx *keys, + const u8 *in, size_t nblocks, u8 *accumulator) +{ + if (likely(crypto_simd_usable())) { + kernel_neon_begin(); + pmull_polyval_update(keys, in, nblocks, accumulator); + kernel_neon_end(); + } else { + polyval_update_non4k(keys->key_powers[NUM_KEY_POWERS-1], in, + nblocks, accumulator); + } +} + +static void internal_polyval_mul(u8 *op1, const u8 *op2) +{ + if (likely(crypto_simd_usable())) { + kernel_neon_begin(); + pmull_polyval_mul(op1, op2); + kernel_neon_end(); + } else { + polyval_mul_non4k(op1, op2); + } +} + +static int polyval_arm64_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm); + int i; + + if (keylen != POLYVAL_BLOCK_SIZE) + return -EINVAL; + + memcpy(tctx->key_powers[NUM_KEY_POWERS-1], key, POLYVAL_BLOCK_SIZE); + + for (i = NUM_KEY_POWERS-2; i >= 0; i--) { + memcpy(tctx->key_powers[i], key, POLYVAL_BLOCK_SIZE); + internal_polyval_mul(tctx->key_powers[i], + tctx->key_powers[i+1]); + } + + return 0; +} + +static int polyval_arm64_init(struct shash_desc *desc) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + + memset(dctx, 0, sizeof(*dctx)); + + return 0; +} + +static int polyval_arm64_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + u8 *pos; + unsigned int nblocks; + unsigned int n; + + if (dctx->bytes) { + n = min(srclen, dctx->bytes); + pos = dctx->buffer + POLYVAL_BLOCK_SIZE - dctx->bytes; + + dctx->bytes -= n; + srclen -= n; + + while (n--) + *pos++ ^= *src++; + + if (!dctx->bytes) + internal_polyval_mul(dctx->buffer, + tctx->key_powers[NUM_KEY_POWERS-1]); + } + + while (srclen >= POLYVAL_BLOCK_SIZE) { + /* allow rescheduling every 4K bytes */ + nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; + internal_polyval_update(tctx, src, nblocks, dctx->buffer); + srclen -= nblocks * POLYVAL_BLOCK_SIZE; + src += nblocks * POLYVAL_BLOCK_SIZE; + } + + if (srclen) { + dctx->bytes = POLYVAL_BLOCK_SIZE - srclen; + pos = dctx->buffer; + while (srclen--) + *pos++ ^= *src++; + } + + return 0; +} + +static int polyval_arm64_final(struct shash_desc *desc, u8 *dst) +{ + struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); + const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + + if (dctx->bytes) { + internal_polyval_mul(dctx->buffer, + tctx->key_powers[NUM_KEY_POWERS-1]); + } + + memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE); + + return 0; +} + +static struct shash_alg polyval_alg = { + .digestsize = POLYVAL_DIGEST_SIZE, + .init = polyval_arm64_init, + .update = polyval_arm64_update, + .final = polyval_arm64_final, + .setkey = polyval_arm64_setkey, + .descsize = sizeof(struct polyval_desc_ctx), + .base = { + .cra_name = "polyval", + .cra_driver_name = "polyval-ce", + .cra_priority = 200, + .cra_blocksize = POLYVAL_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct polyval_tfm_ctx), + .cra_module = THIS_MODULE, + }, +}; + +static int __init polyval_ce_mod_init(void) +{ + return crypto_register_shash(&polyval_alg); +} + +static void __exit polyval_ce_mod_exit(void) +{ + crypto_unregister_shash(&polyval_alg); +} + +module_cpu_feature_match(PMULL, polyval_ce_mod_init) +module_exit(polyval_ce_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("POLYVAL hash function accelerated by ARMv8 Crypto Extensions"); +MODULE_ALIAS_CRYPTO("polyval"); +MODULE_ALIAS_CRYPTO("polyval-ce"); From 6b2a51ff03bf0c54cbc699ee85a9a49eb203ebfc Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 20 May 2022 18:15:01 +0000 Subject: [PATCH 13/89] fscrypt: Add HCTR2 support for filename encryption HCTR2 is a tweakable, length-preserving encryption mode that is intended for use on CPUs with dedicated crypto instructions. HCTR2 has the property that a bitflip in the plaintext changes the entire ciphertext. This property fixes a known weakness with filename encryption: when two filenames in the same directory share a prefix of >= 16 bytes, with AES-CTS-CBC their encrypted filenames share a common substring, leaking information. HCTR2 does not have this problem. More information on HCTR2 can be found here: "Length-preserving encryption with HCTR2": https://eprint.iacr.org/2021/1441.pdf Signed-off-by: Nathan Huckleberry Reviewed-by: Ard Biesheuvel Acked-by: Eric Biggers Signed-off-by: Herbert Xu --- Documentation/filesystems/fscrypt.rst | 22 +++++++++++++++++----- fs/crypto/fscrypt_private.h | 2 +- fs/crypto/keysetup.c | 7 +++++++ fs/crypto/policy.c | 14 +++++++++++--- include/uapi/linux/fscrypt.h | 3 ++- 5 files changed, 38 insertions(+), 10 deletions(-) diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index 2e9aaa295125a..5ba5817c17c2a 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -337,6 +337,7 @@ Currently, the following pairs of encryption modes are supported: - AES-256-XTS for contents and AES-256-CTS-CBC for filenames - AES-128-CBC for contents and AES-128-CTS-CBC for filenames - Adiantum for both contents and filenames +- AES-256-XTS for contents and AES-256-HCTR2 for filenames (v2 policies only) If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair. @@ -357,6 +358,17 @@ To use Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast implementations of ChaCha and NHPoly1305 should be enabled, e.g. CONFIG_CRYPTO_CHACHA20_NEON and CONFIG_CRYPTO_NHPOLY1305_NEON for ARM. +AES-256-HCTR2 is another true wide-block encryption mode that is intended for +use on CPUs with dedicated crypto instructions. AES-256-HCTR2 has the property +that a bitflip in the plaintext changes the entire ciphertext. This property +makes it desirable for filename encryption since initialization vectors are +reused within a directory. For more details on AES-256-HCTR2, see the paper +"Length-preserving encryption with HCTR2" +(https://eprint.iacr.org/2021/1441.pdf). To use AES-256-HCTR2, +CONFIG_CRYPTO_HCTR2 must be enabled. Also, fast implementations of XCTR and +POLYVAL should be enabled, e.g. CRYPTO_POLYVAL_ARM64_CE and +CRYPTO_AES_ARM64_CE_BLK for ARM64. + New encryption modes can be added relatively easily, without changes to individual filesystems. However, authenticated encryption (AE) modes are not currently supported because of the difficulty of dealing @@ -404,11 +416,11 @@ alternatively has the file's nonce (for `DIRECT_KEY policies`_) or inode number (for `IV_INO_LBLK_64 policies`_) included in the IVs. Thus, IV reuse is limited to within a single directory. -With CTS-CBC, the IV reuse means that when the plaintext filenames -share a common prefix at least as long as the cipher block size (16 -bytes for AES), the corresponding encrypted filenames will also share -a common prefix. This is undesirable. Adiantum does not have this -weakness, as it is a wide-block encryption mode. +With CTS-CBC, the IV reuse means that when the plaintext filenames share a +common prefix at least as long as the cipher block size (16 bytes for AES), the +corresponding encrypted filenames will also share a common prefix. This is +undesirable. Adiantum and HCTR2 do not have this weakness, as they are +wide-block encryption modes. All supported filenames encryption modes accept any plaintext length >= 16 bytes; cipher block alignment is not required. However, diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 6b4c8094cc7b0..f5be777d82795 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -31,7 +31,7 @@ #define FSCRYPT_CONTEXT_V2 2 /* Keep this in sync with include/uapi/linux/fscrypt.h */ -#define FSCRYPT_MODE_MAX FSCRYPT_MODE_ADIANTUM +#define FSCRYPT_MODE_MAX FSCRYPT_MODE_AES_256_HCTR2 struct fscrypt_context_v1 { u8 version; /* FSCRYPT_CONTEXT_V1 */ diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index c35711896bd4f..fbc71abdabe32 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -53,6 +53,13 @@ struct fscrypt_mode fscrypt_modes[] = { .ivsize = 32, .blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM, }, + [FSCRYPT_MODE_AES_256_HCTR2] = { + .friendly_name = "AES-256-HCTR2", + .cipher_str = "hctr2(aes)", + .keysize = 32, + .security_strength = 32, + .ivsize = 32, + }, }; static DEFINE_MUTEX(fscrypt_mode_key_setup_mutex); diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 5f858cee1e3b0..8a054e6d1e687 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -61,7 +61,7 @@ fscrypt_get_dummy_policy(struct super_block *sb) return sb->s_cop->get_dummy_policy(sb); } -static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode) +static bool fscrypt_valid_enc_modes_v1(u32 contents_mode, u32 filenames_mode) { if (contents_mode == FSCRYPT_MODE_AES_256_XTS && filenames_mode == FSCRYPT_MODE_AES_256_CTS) @@ -78,6 +78,14 @@ static bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode) return false; } +static bool fscrypt_valid_enc_modes_v2(u32 contents_mode, u32 filenames_mode) +{ + if (contents_mode == FSCRYPT_MODE_AES_256_XTS && + filenames_mode == FSCRYPT_MODE_AES_256_HCTR2) + return true; + return fscrypt_valid_enc_modes_v1(contents_mode, filenames_mode); +} + static bool supported_direct_key_modes(const struct inode *inode, u32 contents_mode, u32 filenames_mode) { @@ -151,7 +159,7 @@ static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy, static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy, const struct inode *inode) { - if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, + if (!fscrypt_valid_enc_modes_v1(policy->contents_encryption_mode, policy->filenames_encryption_mode)) { fscrypt_warn(inode, "Unsupported encryption modes (contents %d, filenames %d)", @@ -187,7 +195,7 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, { int count = 0; - if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, + if (!fscrypt_valid_enc_modes_v2(policy->contents_encryption_mode, policy->filenames_encryption_mode)) { fscrypt_warn(inode, "Unsupported encryption modes (contents %d, filenames %d)", diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 9f4428be3e362..a756b29afcc23 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -27,7 +27,8 @@ #define FSCRYPT_MODE_AES_128_CBC 5 #define FSCRYPT_MODE_AES_128_CTS 6 #define FSCRYPT_MODE_ADIANTUM 9 -/* If adding a mode number > 9, update FSCRYPT_MODE_MAX in fscrypt_private.h */ +#define FSCRYPT_MODE_AES_256_HCTR2 10 +/* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */ /* * Legacy policy version; ad-hoc KDF and no key verification. From 3f3bbf22a592e8bd15d2f6d8e9a4a34e2b5028cd Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:10:45 +0200 Subject: [PATCH 14/89] crypto: hisilicon/sec - fix typos in comment Spelling mistakes (triple letters) in comment. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index c2e9b01187a74..42bb486f3b6d5 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -143,10 +143,10 @@ struct sec_ctx { /* Threshold for fake busy, trigger to return -EBUSY to user */ u32 fake_req_limit; - /* Currrent cyclic index to select a queue for encipher */ + /* Current cyclic index to select a queue for encipher */ atomic_t enc_qcyclic; - /* Currrent cyclic index to select a queue for decipher */ + /* Current cyclic index to select a queue for decipher */ atomic_t dec_qcyclic; enum sec_alg_type alg_type; From cd81775a56bce2ad480ff8444e6f44f3980ceb7d Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:10:48 +0200 Subject: [PATCH 15/89] crypto: ccp - fix typo in comment Spelling mistake (triple letters) in comment. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index a5d9123a22ead..83350e2d9821e 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -366,7 +366,7 @@ struct ccp_device { /* Master lists that all cmds are queued on. Because there can be * more than one CCP command queue that can process a cmd a separate - * backlog list is neeeded so that the backlog completion call + * backlog list is needed so that the backlog completion call * completes before the cmd is available for execution. */ spinlock_t cmd_lock ____cacheline_aligned; From 4ad28689df853db3ee7b2656a225f1d722ee7639 Mon Sep 17 00:00:00 2001 From: Shijith Thotton Date: Fri, 27 May 2022 13:24:48 +0530 Subject: [PATCH 16/89] crypto: octeontx2 - add firmware version in devlink info Added running firmware version information of AE, SE and IE components in devlink info. Signed-off-by: Shijith Thotton Signed-off-by: Herbert Xu --- .../marvell/octeontx2/otx2_cpt_devlink.c | 40 ++++++++++++++++++- .../marvell/octeontx2/otx2_cptpf_ucode.c | 2 +- .../marvell/octeontx2/otx2_cptpf_ucode.h | 3 ++ 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c index bb02e0db36153..7503f6b18ac56 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c @@ -51,11 +51,47 @@ static const struct devlink_param otx2_cpt_dl_params[] = { NULL), }; -static int otx2_cpt_devlink_info_get(struct devlink *devlink, +static int otx2_cpt_dl_info_firmware_version_put(struct devlink_info_req *req, + struct otx2_cpt_eng_grp_info grp[], + const char *ver_name, int eng_type) +{ + struct otx2_cpt_engs_rsvd *eng; + int i; + + for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) { + eng = find_engines_by_type(&grp[i], eng_type); + if (eng) + return devlink_info_version_running_put(req, ver_name, + eng->ucode->ver_str); + } + + return 0; +} + +static int otx2_cpt_devlink_info_get(struct devlink *dl, struct devlink_info_req *req, struct netlink_ext_ack *extack) { - return devlink_info_driver_name_put(req, "rvu_cptpf"); + struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl); + struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf; + int err; + + err = devlink_info_driver_name_put(req, "rvu_cptpf"); + if (err) + return err; + + err = otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp, + "fw.ae", OTX2_CPT_AE_TYPES); + if (err) + return err; + + err = otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp, + "fw.se", OTX2_CPT_SE_TYPES); + if (err) + return err; + + return otx2_cpt_dl_info_firmware_version_put(req, cptpf->eng_grps.grp, + "fw.ie", OTX2_CPT_IE_TYPES); } static const struct devlink_ops otx2_cpt_devlink_ops = { diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 9cba2f714c7e1..46ffb7ae982c5 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -476,7 +476,7 @@ static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info) return ret; } -static struct otx2_cpt_engs_rsvd *find_engines_by_type( +struct otx2_cpt_engs_rsvd *find_engines_by_type( struct otx2_cpt_eng_grp_info *eng_grp, int eng_type) { diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h index 8f4d4e5f531a6..e69320a54b5d5 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h @@ -166,4 +166,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf, struct devlink_param_gset_ctx *ctx); void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf); +struct otx2_cpt_engs_rsvd *find_engines_by_type( + struct otx2_cpt_eng_grp_info *eng_grp, + int eng_type); #endif /* __OTX2_CPTPF_UCODE_H */ From 920b0442b9f884f55f4745b53430c80e71e90275 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sat, 28 May 2022 12:24:29 +0200 Subject: [PATCH 17/89] crypto: memneq - move into lib/ This is used by code that doesn't need CONFIG_CRYPTO, so move this into lib/ with a Kconfig option so that it can be selected by whatever needs it. This fixes a linker error Zheng pointed out when CRYPTO_MANAGER_DISABLE_TESTS!=y and CRYPTO=m: lib/crypto/curve25519-selftest.o: In function `curve25519_selftest': curve25519-selftest.c:(.init.text+0x60): undefined reference to `__crypto_memneq' curve25519-selftest.c:(.init.text+0xec): undefined reference to `__crypto_memneq' curve25519-selftest.c:(.init.text+0x114): undefined reference to `__crypto_memneq' curve25519-selftest.c:(.init.text+0x154): undefined reference to `__crypto_memneq' Reported-by: Zheng Bin Cc: Eric Biggers Cc: stable@vger.kernel.org Fixes: aa127963f1ca ("crypto: lib/curve25519 - re-add selftests") Signed-off-by: Jason A. Donenfeld Reviewed-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + crypto/Makefile | 2 +- lib/Kconfig | 3 +++ lib/Makefile | 1 + lib/crypto/Kconfig | 1 + {crypto => lib}/memneq.c | 0 6 files changed, 7 insertions(+), 1 deletion(-) rename {crypto => lib}/memneq.c (100%) diff --git a/crypto/Kconfig b/crypto/Kconfig index 9b654984de799..6e30e8138057b 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -15,6 +15,7 @@ source "crypto/async_tx/Kconfig" # menuconfig CRYPTO tristate "Cryptographic API" + select LIB_MEMNEQ help This option provides the core Cryptographic API. diff --git a/crypto/Makefile b/crypto/Makefile index 3bbc0dd491608..1f529704fe803 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o memneq.o +crypto-y := api.o cipher.o compress.o obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o diff --git a/lib/Kconfig b/lib/Kconfig index 6a843639814fb..eaaad4d85bf24 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -120,6 +120,9 @@ config INDIRECT_IOMEM_FALLBACK source "lib/crypto/Kconfig" +config LIB_MEMNEQ + bool + config CRC_CCITT tristate "CRC-CCITT functions" help diff --git a/lib/Makefile b/lib/Makefile index ea54294d73bf4..f99bf61f8bbc6 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -251,6 +251,7 @@ obj-$(CONFIG_DIMLIB) += dim/ obj-$(CONFIG_SIGNATURE) += digsig.o lib-$(CONFIG_CLZ_TAB) += clz_tab.o +lib-$(CONFIG_LIB_MEMNEQ) += memneq.o obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 9856e291f4141..2082af43d51fb 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -71,6 +71,7 @@ config CRYPTO_LIB_CURVE25519 tristate "Curve25519 scalar multiplication library" depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n + select LIB_MEMNEQ help Enable the Curve25519 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific diff --git a/crypto/memneq.c b/lib/memneq.c similarity index 100% rename from crypto/memneq.c rename to lib/memneq.c From 2d16803c562ecc644803d42ba98a8e0aef9c014e Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sat, 28 May 2022 21:44:07 +0200 Subject: [PATCH 18/89] crypto: blake2s - remove shash module BLAKE2s has no currently known use as an shash. Just remove all of this unnecessary plumbing. Removing this shash was something we talked about back when we were making BLAKE2s a built-in, but I simply never got around to doing it. So this completes that project. Importantly, this fixs a bug in which the lib code depends on crypto_simd_disabled_for_test, causing linker errors. Also add more alignment tests to the selftests and compare SIMD and non-SIMD compression functions, to make up for what we lose from testmgr.c. Reported-by: gaochao Cc: Eric Biggers Cc: Ard Biesheuvel Cc: stable@vger.kernel.org Fixes: 6048fdcc5f26 ("lib/crypto: blake2s: include as built-in") Signed-off-by: Jason A. Donenfeld Signed-off-by: Herbert Xu --- arch/arm/crypto/Kconfig | 2 +- arch/arm/crypto/Makefile | 4 +- arch/arm/crypto/blake2s-shash.c | 75 ----------- arch/x86/crypto/Makefile | 4 +- arch/x86/crypto/blake2s-glue.c | 3 +- arch/x86/crypto/blake2s-shash.c | 77 ----------- crypto/Kconfig | 20 +-- crypto/Makefile | 1 - crypto/blake2s_generic.c | 75 ----------- crypto/tcrypt.c | 12 -- crypto/testmgr.c | 24 ---- crypto/testmgr.h | 217 ------------------------------ include/crypto/internal/blake2s.h | 108 --------------- lib/crypto/blake2s-selftest.c | 41 ++++++ lib/crypto/blake2s.c | 37 ++++- 15 files changed, 76 insertions(+), 624 deletions(-) delete mode 100644 arch/arm/crypto/blake2s-shash.c delete mode 100644 arch/x86/crypto/blake2s-shash.c delete mode 100644 crypto/blake2s_generic.c diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index e4dba5461cb3e..149a5bd6b88c1 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -63,7 +63,7 @@ config CRYPTO_SHA512_ARM using optimized ARM assembler and NEON, when available. config CRYPTO_BLAKE2S_ARM - tristate "BLAKE2s digest algorithm (ARM)" + bool "BLAKE2s digest algorithm (ARM)" select CRYPTO_ARCH_HAVE_LIB_BLAKE2S help BLAKE2s digest algorithm optimized with ARM scalar instructions. This diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 0274f81cc8ea0..971e74546fb1b 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -9,8 +9,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o -obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += blake2s-arm.o -obj-$(if $(CONFIG_CRYPTO_BLAKE2S_ARM),y) += libblake2s-arm.o +obj-$(CONFIG_CRYPTO_BLAKE2S_ARM) += libblake2s-arm.o obj-$(CONFIG_CRYPTO_BLAKE2B_NEON) += blake2b-neon.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o @@ -32,7 +31,6 @@ sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y) sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y) -blake2s-arm-y := blake2s-shash.o libblake2s-arm-y:= blake2s-core.o blake2s-glue.o blake2b-neon-y := blake2b-neon-core.o blake2b-neon-glue.o sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o diff --git a/arch/arm/crypto/blake2s-shash.c b/arch/arm/crypto/blake2s-shash.c deleted file mode 100644 index 763c73beea2d0..0000000000000 --- a/arch/arm/crypto/blake2s-shash.c +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * BLAKE2s digest algorithm, ARM scalar implementation - * - * Copyright 2020 Google LLC - */ - -#include -#include - -#include - -static int crypto_blake2s_update_arm(struct shash_desc *desc, - const u8 *in, unsigned int inlen) -{ - return crypto_blake2s_update(desc, in, inlen, false); -} - -static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out) -{ - return crypto_blake2s_final(desc, out, false); -} - -#define BLAKE2S_ALG(name, driver_name, digest_size) \ - { \ - .base.cra_name = name, \ - .base.cra_driver_name = driver_name, \ - .base.cra_priority = 200, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ - .base.cra_module = THIS_MODULE, \ - .digestsize = digest_size, \ - .setkey = crypto_blake2s_setkey, \ - .init = crypto_blake2s_init, \ - .update = crypto_blake2s_update_arm, \ - .final = crypto_blake2s_final_arm, \ - .descsize = sizeof(struct blake2s_state), \ - } - -static struct shash_alg blake2s_arm_algs[] = { - BLAKE2S_ALG("blake2s-128", "blake2s-128-arm", BLAKE2S_128_HASH_SIZE), - BLAKE2S_ALG("blake2s-160", "blake2s-160-arm", BLAKE2S_160_HASH_SIZE), - BLAKE2S_ALG("blake2s-224", "blake2s-224-arm", BLAKE2S_224_HASH_SIZE), - BLAKE2S_ALG("blake2s-256", "blake2s-256-arm", BLAKE2S_256_HASH_SIZE), -}; - -static int __init blake2s_arm_mod_init(void) -{ - return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? - crypto_register_shashes(blake2s_arm_algs, - ARRAY_SIZE(blake2s_arm_algs)) : 0; -} - -static void __exit blake2s_arm_mod_exit(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) - crypto_unregister_shashes(blake2s_arm_algs, - ARRAY_SIZE(blake2s_arm_algs)); -} - -module_init(blake2s_arm_mod_init); -module_exit(blake2s_arm_mod_exit); - -MODULE_DESCRIPTION("BLAKE2s digest algorithm, ARM scalar implementation"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers "); -MODULE_ALIAS_CRYPTO("blake2s-128"); -MODULE_ALIAS_CRYPTO("blake2s-128-arm"); -MODULE_ALIAS_CRYPTO("blake2s-160"); -MODULE_ALIAS_CRYPTO("blake2s-160-arm"); -MODULE_ALIAS_CRYPTO("blake2s-224"); -MODULE_ALIAS_CRYPTO("blake2s-224-arm"); -MODULE_ALIAS_CRYPTO("blake2s-256"); -MODULE_ALIAS_CRYPTO("blake2s-256-arm"); diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index b9847152acd82..04d07ab744b2e 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -61,9 +61,7 @@ sha256-ssse3-$(CONFIG_AS_SHA256_NI) += sha256_ni_asm.o obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o -obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o -blake2s-x86_64-y := blake2s-shash.o -obj-$(if $(CONFIG_CRYPTO_BLAKE2S_X86),y) += libblake2s-x86_64.o +obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += libblake2s-x86_64.o libblake2s-x86_64-y := blake2s-core.o blake2s-glue.o obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c index 69853c13e8fb0..aaba212305288 100644 --- a/arch/x86/crypto/blake2s-glue.c +++ b/arch/x86/crypto/blake2s-glue.c @@ -4,7 +4,6 @@ */ #include -#include #include #include @@ -33,7 +32,7 @@ void blake2s_compress(struct blake2s_state *state, const u8 *block, /* SIMD disables preemption, so relax after processing each page. */ BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8); - if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) { + if (!static_branch_likely(&blake2s_use_ssse3) || !may_use_simd()) { blake2s_compress_generic(state, block, nblocks, inc); return; } diff --git a/arch/x86/crypto/blake2s-shash.c b/arch/x86/crypto/blake2s-shash.c deleted file mode 100644 index 59ae28abe35cc..0000000000000 --- a/arch/x86/crypto/blake2s-shash.c +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* - * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. - */ - -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -static int crypto_blake2s_update_x86(struct shash_desc *desc, - const u8 *in, unsigned int inlen) -{ - return crypto_blake2s_update(desc, in, inlen, false); -} - -static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out) -{ - return crypto_blake2s_final(desc, out, false); -} - -#define BLAKE2S_ALG(name, driver_name, digest_size) \ - { \ - .base.cra_name = name, \ - .base.cra_driver_name = driver_name, \ - .base.cra_priority = 200, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ - .base.cra_module = THIS_MODULE, \ - .digestsize = digest_size, \ - .setkey = crypto_blake2s_setkey, \ - .init = crypto_blake2s_init, \ - .update = crypto_blake2s_update_x86, \ - .final = crypto_blake2s_final_x86, \ - .descsize = sizeof(struct blake2s_state), \ - } - -static struct shash_alg blake2s_algs[] = { - BLAKE2S_ALG("blake2s-128", "blake2s-128-x86", BLAKE2S_128_HASH_SIZE), - BLAKE2S_ALG("blake2s-160", "blake2s-160-x86", BLAKE2S_160_HASH_SIZE), - BLAKE2S_ALG("blake2s-224", "blake2s-224-x86", BLAKE2S_224_HASH_SIZE), - BLAKE2S_ALG("blake2s-256", "blake2s-256-x86", BLAKE2S_256_HASH_SIZE), -}; - -static int __init blake2s_mod_init(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3)) - return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); - return 0; -} - -static void __exit blake2s_mod_exit(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3)) - crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -} - -module_init(blake2s_mod_init); -module_exit(blake2s_mod_exit); - -MODULE_ALIAS_CRYPTO("blake2s-128"); -MODULE_ALIAS_CRYPTO("blake2s-128-x86"); -MODULE_ALIAS_CRYPTO("blake2s-160"); -MODULE_ALIAS_CRYPTO("blake2s-160-x86"); -MODULE_ALIAS_CRYPTO("blake2s-224"); -MODULE_ALIAS_CRYPTO("blake2s-224-x86"); -MODULE_ALIAS_CRYPTO("blake2s-256"); -MODULE_ALIAS_CRYPTO("blake2s-256-x86"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/Kconfig b/crypto/Kconfig index 6e30e8138057b..59489a300cd10 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -712,26 +712,8 @@ config CRYPTO_BLAKE2B See https://blake2.net for further information. -config CRYPTO_BLAKE2S - tristate "BLAKE2s digest algorithm" - select CRYPTO_LIB_BLAKE2S_GENERIC - select CRYPTO_HASH - help - Implementation of cryptographic hash function BLAKE2s - optimized for 8-32bit platforms and can produce digests of any size - between 1 to 32. The keyed hash is also implemented. - - This module provides the following algorithms: - - - blake2s-128 - - blake2s-160 - - blake2s-224 - - blake2s-256 - - See https://blake2.net for further information. - config CRYPTO_BLAKE2S_X86 - tristate "BLAKE2s digest algorithm (x86 accelerated version)" + bool "BLAKE2s digest algorithm (x86 accelerated version)" depends on X86 && 64BIT select CRYPTO_LIB_BLAKE2S_GENERIC select CRYPTO_ARCH_HAVE_LIB_BLAKE2S diff --git a/crypto/Makefile b/crypto/Makefile index 1f529704fe803..a4a84860fe43d 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -84,7 +84,6 @@ obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_BLAKE2B) += blake2b_generic.o -obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c deleted file mode 100644 index 5f96a21f87883..0000000000000 --- a/crypto/blake2s_generic.c +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* - * shash interface to the generic implementation of BLAKE2s - * - * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. - */ - -#include -#include - -#include -#include -#include - -static int crypto_blake2s_update_generic(struct shash_desc *desc, - const u8 *in, unsigned int inlen) -{ - return crypto_blake2s_update(desc, in, inlen, true); -} - -static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out) -{ - return crypto_blake2s_final(desc, out, true); -} - -#define BLAKE2S_ALG(name, driver_name, digest_size) \ - { \ - .base.cra_name = name, \ - .base.cra_driver_name = driver_name, \ - .base.cra_priority = 100, \ - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ - .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, \ - .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), \ - .base.cra_module = THIS_MODULE, \ - .digestsize = digest_size, \ - .setkey = crypto_blake2s_setkey, \ - .init = crypto_blake2s_init, \ - .update = crypto_blake2s_update_generic, \ - .final = crypto_blake2s_final_generic, \ - .descsize = sizeof(struct blake2s_state), \ - } - -static struct shash_alg blake2s_algs[] = { - BLAKE2S_ALG("blake2s-128", "blake2s-128-generic", - BLAKE2S_128_HASH_SIZE), - BLAKE2S_ALG("blake2s-160", "blake2s-160-generic", - BLAKE2S_160_HASH_SIZE), - BLAKE2S_ALG("blake2s-224", "blake2s-224-generic", - BLAKE2S_224_HASH_SIZE), - BLAKE2S_ALG("blake2s-256", "blake2s-256-generic", - BLAKE2S_256_HASH_SIZE), -}; - -static int __init blake2s_mod_init(void) -{ - return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -} - -static void __exit blake2s_mod_exit(void) -{ - crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -} - -subsys_initcall(blake2s_mod_init); -module_exit(blake2s_mod_exit); - -MODULE_ALIAS_CRYPTO("blake2s-128"); -MODULE_ALIAS_CRYPTO("blake2s-128-generic"); -MODULE_ALIAS_CRYPTO("blake2s-160"); -MODULE_ALIAS_CRYPTO("blake2s-160-generic"); -MODULE_ALIAS_CRYPTO("blake2s-224"); -MODULE_ALIAS_CRYPTO("blake2s-224-generic"); -MODULE_ALIAS_CRYPTO("blake2s-256"); -MODULE_ALIAS_CRYPTO("blake2s-256-generic"); -MODULE_LICENSE("GPL v2"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 336598da8eac1..a8831060c4cee 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1670,10 +1670,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("rmd160"); break; - case 41: - ret += tcrypt_test("blake2s-256"); - break; - case 42: ret += tcrypt_test("blake2b-512"); break; @@ -2250,10 +2246,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_hash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; fallthrough; - case 316: - test_hash_speed("blake2s-256", sec, generic_hash_speed_template); - if (mode > 300 && mode < 400) break; - fallthrough; case 317: test_hash_speed("blake2b-512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; @@ -2362,10 +2354,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed("rmd160", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; - case 416: - test_ahash_speed("blake2s-256", sec, generic_hash_speed_template); - if (mode > 400 && mode < 500) break; - fallthrough; case 417: test_ahash_speed("blake2b-512", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 4850974610685..7a8a567499603 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4375,30 +4375,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .hash = __VECS(blake2b_512_tv_template) } - }, { - .alg = "blake2s-128", - .test = alg_test_hash, - .suite = { - .hash = __VECS(blakes2s_128_tv_template) - } - }, { - .alg = "blake2s-160", - .test = alg_test_hash, - .suite = { - .hash = __VECS(blakes2s_160_tv_template) - } - }, { - .alg = "blake2s-224", - .test = alg_test_hash, - .suite = { - .hash = __VECS(blakes2s_224_tv_template) - } - }, { - .alg = "blake2s-256", - .test = alg_test_hash, - .suite = { - .hash = __VECS(blakes2s_256_tv_template) - } }, { .alg = "cbc(aes)", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 808ba07baa041..4f3955ea40bf6 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -34034,223 +34034,6 @@ static const struct hash_testvec blake2b_512_tv_template[] = {{ 0xae, 0x15, 0x81, 0x15, 0xd0, 0x88, 0xa0, 0x3c, }, }}; -static const struct hash_testvec blakes2s_128_tv_template[] = {{ - .digest = (u8[]){ 0x64, 0x55, 0x0d, 0x6f, 0xfe, 0x2c, 0x0a, 0x01, - 0xa1, 0x4a, 0xba, 0x1e, 0xad, 0xe0, 0x20, 0x0c, }, -}, { - .plaintext = blake2_ordered_sequence, - .psize = 64, - .digest = (u8[]){ 0xdc, 0x66, 0xca, 0x8f, 0x03, 0x86, 0x58, 0x01, - 0xb0, 0xff, 0xe0, 0x6e, 0xd8, 0xa1, 0xa9, 0x0e, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 1, - .digest = (u8[]){ 0x88, 0x1e, 0x42, 0xe7, 0xbb, 0x35, 0x80, 0x82, - 0x63, 0x7c, 0x0a, 0x0f, 0xd7, 0xec, 0x6c, 0x2f, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 7, - .digest = (u8[]){ 0xcf, 0x9e, 0x07, 0x2a, 0xd5, 0x22, 0xf2, 0xcd, - 0xa2, 0xd8, 0x25, 0x21, 0x80, 0x86, 0x73, 0x1c, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 15, - .digest = (u8[]){ 0xf6, 0x33, 0x5a, 0x2c, 0x22, 0xa0, 0x64, 0xb2, - 0xb6, 0x3f, 0xeb, 0xbc, 0xd1, 0xc3, 0xe5, 0xb2, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 247, - .digest = (u8[]){ 0x72, 0x66, 0x49, 0x60, 0xf9, 0x4a, 0xea, 0xbe, - 0x1f, 0xf4, 0x60, 0xce, 0xb7, 0x81, 0xcb, 0x09, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 256, - .digest = (u8[]){ 0xd5, 0xa4, 0x0e, 0xc3, 0x16, 0xc7, 0x51, 0xa6, - 0x3c, 0xd0, 0xd9, 0x11, 0x57, 0xfa, 0x1e, 0xbb, }, -}}; - -static const struct hash_testvec blakes2s_160_tv_template[] = {{ - .plaintext = blake2_ordered_sequence, - .psize = 7, - .digest = (u8[]){ 0xb4, 0xf2, 0x03, 0x49, 0x37, 0xed, 0xb1, 0x3e, - 0x5b, 0x2a, 0xca, 0x64, 0x82, 0x74, 0xf6, 0x62, - 0xe3, 0xf2, 0x84, 0xff, }, -}, { - .plaintext = blake2_ordered_sequence, - .psize = 256, - .digest = (u8[]){ 0xaa, 0x56, 0x9b, 0xdc, 0x98, 0x17, 0x75, 0xf2, - 0xb3, 0x68, 0x83, 0xb7, 0x9b, 0x8d, 0x48, 0xb1, - 0x9b, 0x2d, 0x35, 0x05, }, -}, { - .ksize = 1, - .key = "B", - .digest = (u8[]){ 0x50, 0x16, 0xe7, 0x0c, 0x01, 0xd0, 0xd3, 0xc3, - 0xf4, 0x3e, 0xb1, 0x6e, 0x97, 0xa9, 0x4e, 0xd1, - 0x79, 0x65, 0x32, 0x93, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 1, - .digest = (u8[]){ 0x1c, 0x2b, 0xcd, 0x9a, 0x68, 0xca, 0x8c, 0x71, - 0x90, 0x29, 0x6c, 0x54, 0xfa, 0x56, 0x4a, 0xef, - 0xa2, 0x3a, 0x56, 0x9c, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 15, - .digest = (u8[]){ 0x36, 0xc3, 0x5f, 0x9a, 0xdc, 0x7e, 0xbf, 0x19, - 0x68, 0xaa, 0xca, 0xd8, 0x81, 0xbf, 0x09, 0x34, - 0x83, 0x39, 0x0f, 0x30, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 64, - .digest = (u8[]){ 0x86, 0x80, 0x78, 0xa4, 0x14, 0xec, 0x03, 0xe5, - 0xb6, 0x9a, 0x52, 0x0e, 0x42, 0xee, 0x39, 0x9d, - 0xac, 0xa6, 0x81, 0x63, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 247, - .digest = (u8[]){ 0x2d, 0xd8, 0xd2, 0x53, 0x66, 0xfa, 0xa9, 0x01, - 0x1c, 0x9c, 0xaf, 0xa3, 0xe2, 0x9d, 0x9b, 0x10, - 0x0a, 0xf6, 0x73, 0xe8, }, -}}; - -static const struct hash_testvec blakes2s_224_tv_template[] = {{ - .plaintext = blake2_ordered_sequence, - .psize = 1, - .digest = (u8[]){ 0x61, 0xb9, 0x4e, 0xc9, 0x46, 0x22, 0xa3, 0x91, - 0xd2, 0xae, 0x42, 0xe6, 0x45, 0x6c, 0x90, 0x12, - 0xd5, 0x80, 0x07, 0x97, 0xb8, 0x86, 0x5a, 0xfc, - 0x48, 0x21, 0x97, 0xbb, }, -}, { - .plaintext = blake2_ordered_sequence, - .psize = 247, - .digest = (u8[]){ 0x9e, 0xda, 0xc7, 0x20, 0x2c, 0xd8, 0x48, 0x2e, - 0x31, 0x94, 0xab, 0x46, 0x6d, 0x94, 0xd8, 0xb4, - 0x69, 0xcd, 0xae, 0x19, 0x6d, 0x9e, 0x41, 0xcc, - 0x2b, 0xa4, 0xd5, 0xf6, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .digest = (u8[]){ 0x32, 0xc0, 0xac, 0xf4, 0x3b, 0xd3, 0x07, 0x9f, - 0xbe, 0xfb, 0xfa, 0x4d, 0x6b, 0x4e, 0x56, 0xb3, - 0xaa, 0xd3, 0x27, 0xf6, 0x14, 0xbf, 0xb9, 0x32, - 0xa7, 0x19, 0xfc, 0xb8, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 7, - .digest = (u8[]){ 0x73, 0xad, 0x5e, 0x6d, 0xb9, 0x02, 0x8e, 0x76, - 0xf2, 0x66, 0x42, 0x4b, 0x4c, 0xfa, 0x1f, 0xe6, - 0x2e, 0x56, 0x40, 0xe5, 0xa2, 0xb0, 0x3c, 0xe8, - 0x7b, 0x45, 0xfe, 0x05, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 15, - .digest = (u8[]){ 0x16, 0x60, 0xfb, 0x92, 0x54, 0xb3, 0x6e, 0x36, - 0x81, 0xf4, 0x16, 0x41, 0xc3, 0x3d, 0xd3, 0x43, - 0x84, 0xed, 0x10, 0x6f, 0x65, 0x80, 0x7a, 0x3e, - 0x25, 0xab, 0xc5, 0x02, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 64, - .digest = (u8[]){ 0xca, 0xaa, 0x39, 0x67, 0x9c, 0xf7, 0x6b, 0xc7, - 0xb6, 0x82, 0xca, 0x0e, 0x65, 0x36, 0x5b, 0x7c, - 0x24, 0x00, 0xfa, 0x5f, 0xda, 0x06, 0x91, 0x93, - 0x6a, 0x31, 0x83, 0xb5, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 256, - .digest = (u8[]){ 0x90, 0x02, 0x26, 0xb5, 0x06, 0x9c, 0x36, 0x86, - 0x94, 0x91, 0x90, 0x1e, 0x7d, 0x2a, 0x71, 0xb2, - 0x48, 0xb5, 0xe8, 0x16, 0xfd, 0x64, 0x33, 0x45, - 0xb3, 0xd7, 0xec, 0xcc, }, -}}; - -static const struct hash_testvec blakes2s_256_tv_template[] = {{ - .plaintext = blake2_ordered_sequence, - .psize = 15, - .digest = (u8[]){ 0xd9, 0x7c, 0x82, 0x8d, 0x81, 0x82, 0xa7, 0x21, - 0x80, 0xa0, 0x6a, 0x78, 0x26, 0x83, 0x30, 0x67, - 0x3f, 0x7c, 0x4e, 0x06, 0x35, 0x94, 0x7c, 0x04, - 0xc0, 0x23, 0x23, 0xfd, 0x45, 0xc0, 0xa5, 0x2d, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .digest = (u8[]){ 0x48, 0xa8, 0x99, 0x7d, 0xa4, 0x07, 0x87, 0x6b, - 0x3d, 0x79, 0xc0, 0xd9, 0x23, 0x25, 0xad, 0x3b, - 0x89, 0xcb, 0xb7, 0x54, 0xd8, 0x6a, 0xb7, 0x1a, - 0xee, 0x04, 0x7a, 0xd3, 0x45, 0xfd, 0x2c, 0x49, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 1, - .digest = (u8[]){ 0x22, 0x27, 0xae, 0xaa, 0x6e, 0x81, 0x56, 0x03, - 0xa7, 0xe3, 0xa1, 0x18, 0xa5, 0x9a, 0x2c, 0x18, - 0xf4, 0x63, 0xbc, 0x16, 0x70, 0xf1, 0xe7, 0x4b, - 0x00, 0x6d, 0x66, 0x16, 0xae, 0x9e, 0x74, 0x4e, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 7, - .digest = (u8[]){ 0x58, 0x5d, 0xa8, 0x60, 0x1c, 0xa4, 0xd8, 0x03, - 0x86, 0x86, 0x84, 0x64, 0xd7, 0xa0, 0x8e, 0x15, - 0x2f, 0x05, 0xa2, 0x1b, 0xbc, 0xef, 0x7a, 0x34, - 0xb3, 0xc5, 0xbc, 0x4b, 0xf0, 0x32, 0xeb, 0x12, }, -}, { - .ksize = 32, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 64, - .digest = (u8[]){ 0x89, 0x75, 0xb0, 0x57, 0x7f, 0xd3, 0x55, 0x66, - 0xd7, 0x50, 0xb3, 0x62, 0xb0, 0x89, 0x7a, 0x26, - 0xc3, 0x99, 0x13, 0x6d, 0xf0, 0x7b, 0xab, 0xab, - 0xbd, 0xe6, 0x20, 0x3f, 0xf2, 0x95, 0x4e, 0xd4, }, -}, { - .ksize = 1, - .key = "B", - .plaintext = blake2_ordered_sequence, - .psize = 247, - .digest = (u8[]){ 0x2e, 0x74, 0x1c, 0x1d, 0x03, 0xf4, 0x9d, 0x84, - 0x6f, 0xfc, 0x86, 0x32, 0x92, 0x49, 0x7e, 0x66, - 0xd7, 0xc3, 0x10, 0x88, 0xfe, 0x28, 0xb3, 0xe0, - 0xbf, 0x50, 0x75, 0xad, 0x8e, 0xa4, 0xe6, 0xb2, }, -}, { - .ksize = 16, - .key = blake2_ordered_sequence, - .plaintext = blake2_ordered_sequence, - .psize = 256, - .digest = (u8[]){ 0xb9, 0xd2, 0x81, 0x0e, 0x3a, 0xb1, 0x62, 0x9b, - 0xad, 0x44, 0x05, 0xf4, 0x92, 0x2e, 0x99, 0xc1, - 0x4a, 0x47, 0xbb, 0x5b, 0x6f, 0xb2, 0x96, 0xed, - 0xd5, 0x06, 0xb5, 0x3a, 0x7c, 0x7a, 0x65, 0x1d, }, -}}; - /* * Test vectors generated using https://github.com/google/hctr2 */ diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h index 52363eee2b20e..506d56530ca93 100644 --- a/include/crypto/internal/blake2s.h +++ b/include/crypto/internal/blake2s.h @@ -8,7 +8,6 @@ #define _CRYPTO_INTERNAL_BLAKE2S_H #include -#include #include void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, @@ -19,111 +18,4 @@ void blake2s_compress(struct blake2s_state *state, const u8 *block, bool blake2s_selftest(void); -static inline void blake2s_set_lastblock(struct blake2s_state *state) -{ - state->f[0] = -1; -} - -/* Helper functions for BLAKE2s shared by the library and shash APIs */ - -static __always_inline void -__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen, - bool force_generic) -{ - const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; - - if (unlikely(!inlen)) - return; - if (inlen > fill) { - memcpy(state->buf + state->buflen, in, fill); - if (force_generic) - blake2s_compress_generic(state, state->buf, 1, - BLAKE2S_BLOCK_SIZE); - else - blake2s_compress(state, state->buf, 1, - BLAKE2S_BLOCK_SIZE); - state->buflen = 0; - in += fill; - inlen -= fill; - } - if (inlen > BLAKE2S_BLOCK_SIZE) { - const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); - /* Hash one less (full) block than strictly possible */ - if (force_generic) - blake2s_compress_generic(state, in, nblocks - 1, - BLAKE2S_BLOCK_SIZE); - else - blake2s_compress(state, in, nblocks - 1, - BLAKE2S_BLOCK_SIZE); - in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); - inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); - } - memcpy(state->buf + state->buflen, in, inlen); - state->buflen += inlen; -} - -static __always_inline void -__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic) -{ - blake2s_set_lastblock(state); - memset(state->buf + state->buflen, 0, - BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ - if (force_generic) - blake2s_compress_generic(state, state->buf, 1, state->buflen); - else - blake2s_compress(state, state->buf, 1, state->buflen); - cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); - memcpy(out, state->h, state->outlen); -} - -/* Helper functions for shash implementations of BLAKE2s */ - -struct blake2s_tfm_ctx { - u8 key[BLAKE2S_KEY_SIZE]; - unsigned int keylen; -}; - -static inline int crypto_blake2s_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); - - if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) - return -EINVAL; - - memcpy(tctx->key, key, keylen); - tctx->keylen = keylen; - - return 0; -} - -static inline int crypto_blake2s_init(struct shash_desc *desc) -{ - const struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct blake2s_state *state = shash_desc_ctx(desc); - unsigned int outlen = crypto_shash_digestsize(desc->tfm); - - __blake2s_init(state, outlen, tctx->key, tctx->keylen); - return 0; -} - -static inline int crypto_blake2s_update(struct shash_desc *desc, - const u8 *in, unsigned int inlen, - bool force_generic) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - - __blake2s_update(state, in, inlen, force_generic); - return 0; -} - -static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out, - bool force_generic) -{ - struct blake2s_state *state = shash_desc_ctx(desc); - - __blake2s_final(state, out, force_generic); - return 0; -} - #endif /* _CRYPTO_INTERNAL_BLAKE2S_H */ diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c index 409e4b7287704..66f505220f43f 100644 --- a/lib/crypto/blake2s-selftest.c +++ b/lib/crypto/blake2s-selftest.c @@ -4,6 +4,8 @@ */ #include +#include +#include #include /* @@ -587,5 +589,44 @@ bool __init blake2s_selftest(void) } } + for (i = 0; i < 32; ++i) { + enum { TEST_ALIGNMENT = 16 }; + u8 unaligned_block[BLAKE2S_BLOCK_SIZE + TEST_ALIGNMENT - 1] + __aligned(TEST_ALIGNMENT); + u8 blocks[BLAKE2S_BLOCK_SIZE * 3]; + struct blake2s_state state1, state2; + + get_random_bytes(blocks, sizeof(blocks)); + get_random_bytes(&state, sizeof(state)); + +#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \ + defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S) + memcpy(&state1, &state, sizeof(state1)); + memcpy(&state2, &state, sizeof(state2)); + blake2s_compress(&state1, blocks, 3, BLAKE2S_BLOCK_SIZE); + blake2s_compress_generic(&state2, blocks, 3, BLAKE2S_BLOCK_SIZE); + if (memcmp(&state1, &state2, sizeof(state1))) { + pr_err("blake2s random compress self-test %d: FAIL\n", + i + 1); + success = false; + } +#endif + + memcpy(&state1, &state, sizeof(state1)); + blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE); + for (l = 1; l < TEST_ALIGNMENT; ++l) { + memcpy(unaligned_block + l, blocks, + BLAKE2S_BLOCK_SIZE); + memcpy(&state2, &state, sizeof(state2)); + blake2s_compress(&state2, unaligned_block + l, 1, + BLAKE2S_BLOCK_SIZE); + if (memcmp(&state1, &state2, sizeof(state1))) { + pr_err("blake2s random compress align %d self-test %d: FAIL\n", + l, i + 1); + success = false; + } + } + } + return success; } diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index c71c09621c09c..98e688c6d8910 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -16,16 +16,44 @@ #include #include +static inline void blake2s_set_lastblock(struct blake2s_state *state) +{ + state->f[0] = -1; +} + void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) { - __blake2s_update(state, in, inlen, false); + const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; + + if (unlikely(!inlen)) + return; + if (inlen > fill) { + memcpy(state->buf + state->buflen, in, fill); + blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); + state->buflen = 0; + in += fill; + inlen -= fill; + } + if (inlen > BLAKE2S_BLOCK_SIZE) { + const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); + blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); + in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); + inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); + } + memcpy(state->buf + state->buflen, in, inlen); + state->buflen += inlen; } EXPORT_SYMBOL(blake2s_update); void blake2s_final(struct blake2s_state *state, u8 *out) { WARN_ON(IS_ENABLED(DEBUG) && !out); - __blake2s_final(state, out, false); + blake2s_set_lastblock(state); + memset(state->buf + state->buflen, 0, + BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ + blake2s_compress(state, state->buf, 1, state->buflen); + cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); + memcpy(out, state->h, state->outlen); memzero_explicit(state, sizeof(*state)); } EXPORT_SYMBOL(blake2s_final); @@ -38,12 +66,7 @@ static int __init blake2s_mod_init(void) return 0; } -static void __exit blake2s_mod_exit(void) -{ -} - module_init(blake2s_mod_init); -module_exit(blake2s_mod_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("BLAKE2s hash function"); MODULE_AUTHOR("Jason A. Donenfeld "); From b03c0dc0788abccc7a25ef7dff5818f4123bb992 Mon Sep 17 00:00:00 2001 From: Shijith Thotton Date: Wed, 1 Jun 2022 13:38:59 +0530 Subject: [PATCH 19/89] crypto: octeontx2 - fix potential null pointer access Added missing checks to avoid null pointer dereference. The patch fixes below issue reported by klocwork tool: . Pointer 'strsep( &val, ":" )' returned from call to function 'strsep' at line 1608 may be NULL and will be dereferenced at line 1608. Also there are 2 similar errors on lines 1620, 1632 in otx2_cptpf_ucode.c. Signed-off-by: Shijith Thotton Signed-off-by: Herbert Xu --- .../crypto/marvell/octeontx2/otx2_cptpf_ucode.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 46ffb7ae982c5..f10050fead164 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1605,7 +1605,10 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, if (!strncasecmp(val, "se", 2) && strchr(val, ':')) { if (has_se || ucode_idx) goto err_print; - tmp = strim(strsep(&val, ":")); + tmp = strsep(&val, ":"); + if (!tmp) + goto err_print; + tmp = strim(tmp); if (!val) goto err_print; if (strlen(tmp) != 2) @@ -1617,7 +1620,10 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) { if (has_ae || ucode_idx) goto err_print; - tmp = strim(strsep(&val, ":")); + tmp = strsep(&val, ":"); + if (!tmp) + goto err_print; + tmp = strim(tmp); if (!val) goto err_print; if (strlen(tmp) != 2) @@ -1629,7 +1635,10 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, } else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) { if (has_ie || ucode_idx) goto err_print; - tmp = strim(strsep(&val, ":")); + tmp = strsep(&val, ":"); + if (!tmp) + goto err_print; + tmp = strim(tmp); if (!val) goto err_print; if (strlen(tmp) != 2) From 7e8df1fc2d669d04c1f8a9e2d61d7afba1b43df4 Mon Sep 17 00:00:00 2001 From: Peng Wu Date: Thu, 2 Jun 2022 07:22:34 +0000 Subject: [PATCH 20/89] crypto: sun8i-ss - fix a NULL vs IS_ERR() check in sun8i_ss_hashkey The crypto_alloc_shash() function never returns NULL. It returns error pointers. Fixes: 801b7d572c0a ("crypto: sun8i-ss - add hmac(sha1)") Signed-off-by: Peng Wu Reported-by: Hulk Robot Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 845019bd95911..36a82b22953cd 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -30,8 +30,8 @@ static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key, int ret = 0; xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK); - if (!xtfm) - return -ENOMEM; + if (IS_ERR(xtfm)) + return PTR_ERR(xtfm); len = sizeof(*sdesc) + crypto_shash_descsize(xtfm); sdesc = kmalloc(len, GFP_KERNEL); From ce6330f74b08c8cbb2e3c04bd31cbace3de20660 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Thu, 9 Jun 2022 02:56:39 +0000 Subject: [PATCH 21/89] MAINTAINERS: update HiSilicon ZIP and QM maintainers This patch splits QM and ZIP in MAINTAINERS, then add Weili Qian for QM driver and Yang Shen for ZIP driver. This patch adds missing Kconfig and Makefile files as well. Signed-off-by: Zhou Wang Signed-off-by: Herbert Xu --- MAINTAINERS | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index a6d3bd9d2a8d0..b04fb33248370 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8936,16 +8936,25 @@ F: Documentation/admin-guide/perf/hisi-pcie-pmu.rst F: Documentation/admin-guide/perf/hisi-pmu.rst F: drivers/perf/hisilicon -HISILICON QM AND ZIP Controller DRIVER +HISILICON QM DRIVER +M: Weili Qian M: Zhou Wang L: linux-crypto@vger.kernel.org S: Maintained -F: Documentation/ABI/testing/debugfs-hisi-zip +F: drivers/crypto/hisilicon/Kconfig +F: drivers/crypto/hisilicon/Makefile F: drivers/crypto/hisilicon/qm.c F: drivers/crypto/hisilicon/sgl.c -F: drivers/crypto/hisilicon/zip/ F: include/linux/hisi_acc_qm.h +HISILICON ZIP Controller DRIVER +M: Yang Shen +M: Zhou Wang +L: linux-crypto@vger.kernel.org +S: Maintained +F: Documentation/ABI/testing/debugfs-hisi-zip +F: drivers/crypto/hisilicon/zip/ + HISILICON ROCE DRIVER M: Wenpeng Liang M: Weihang Li From 00856e5391fbfbe00bf641a39f1bf8d1a144367a Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Thu, 9 Jun 2022 19:18:19 +0800 Subject: [PATCH 22/89] crypto: hisilicon/trng - fix local variable type The return value of 'readl_poll_timeout' is '0' or '-ETIMEDOUT'. Therefore, change the local variable 'ret' type from 'u32' to 'int'. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/trng/trng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c index 829f2caf0f67f..97e500db0a825 100644 --- a/drivers/crypto/hisilicon/trng/trng.c +++ b/drivers/crypto/hisilicon/trng/trng.c @@ -185,7 +185,7 @@ static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) struct hisi_trng *trng; int currsize = 0; u32 val = 0; - u32 ret; + int ret; trng = container_of(rng, struct hisi_trng, rng); From bf081d6fa8e90aefe991e34a29eff7aa22deb3ff Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Thu, 9 Jun 2022 20:31:17 +0800 Subject: [PATCH 23/89] crypto: hisilicon/qm - add functions for releasing resources The resources allocated by hisi_qm_memory_init() are released by hisi_qm_uninit(). Add hisi_qm_memory_uninit() to release resources, no functional change. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b4ca2eb034d7d..903896ab5be5d 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3672,6 +3672,21 @@ static void qm_last_regs_uninit(struct hisi_qm *qm) debug->qm_last_words = NULL; } +static void hisi_qm_memory_uninit(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + + hisi_qp_memory_uninit(qm, qm->qp_num); + if (qm->qdma.va) { + hisi_qm_cache_wb(qm); + dma_free_coherent(dev, qm->qdma.size, + qm->qdma.va, qm->qdma.dma); + } + + idr_destroy(&qm->qp_idr); + kfree(qm->factor); +} + /** * hisi_qm_uninit() - Uninitialize qm. * @qm: The qm needed uninit. @@ -3680,13 +3695,9 @@ static void qm_last_regs_uninit(struct hisi_qm *qm) */ void hisi_qm_uninit(struct hisi_qm *qm) { - struct pci_dev *pdev = qm->pdev; - struct device *dev = &pdev->dev; - qm_last_regs_uninit(qm); qm_cmd_uninit(qm); - kfree(qm->factor); down_write(&qm->qps_lock); if (!qm_avail_state(qm, QM_CLOSE)) { @@ -3694,14 +3705,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) return; } - hisi_qp_memory_uninit(qm, qm->qp_num); - idr_destroy(&qm->qp_idr); - - if (qm->qdma.va) { - hisi_qm_cache_wb(qm); - dma_free_coherent(dev, qm->qdma.size, - qm->qdma.va, qm->qdma.dma); - } + hisi_qm_memory_uninit(qm); hisi_qm_set_state(qm, QM_NOT_READY); up_write(&qm->qps_lock); From 3099fc9c2b3aaace80947d07d13b40da2dd79fd4 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Thu, 9 Jun 2022 20:31:18 +0800 Subject: [PATCH 24/89] crypto: hisilicon/qm - move alloc qm->wq to qm.c Before stopping the function, the driver needs to flush all the remaining work about event irq. Therefore, accelerator drivers use a private workqueue(qm->wq) to handle event irq instead of the system workqueue. This patch moves alloc workqueue from sec_main.c and zip_main.c to qm.c. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 35 +++++++++++++++++------- drivers/crypto/hisilicon/sec2/sec_main.c | 24 +--------------- drivers/crypto/hisilicon/zip/zip_main.c | 17 +----------- 3 files changed, 27 insertions(+), 49 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 903896ab5be5d..f8d36b68494e7 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -959,10 +959,7 @@ static irqreturn_t do_qm_irq(int irq, void *data) struct hisi_qm *qm = (struct hisi_qm *)data; /* the workqueue created by device driver of QM */ - if (qm->wq) - queue_work(qm->wq, &qm->work); - else - schedule_work(&qm->work); + queue_work(qm->wq, &qm->work); return IRQ_HANDLED; } @@ -3134,11 +3131,8 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp) if (ret) dev_err(dev, "Failed to drain out data for stopping!\n"); - if (qp->qm->wq) - flush_workqueue(qp->qm->wq); - else - flush_work(&qp->qm->work); + flush_workqueue(qp->qm->wq); if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) qp_stop_fail_cb(qp); @@ -3672,6 +3666,11 @@ static void qm_last_regs_uninit(struct hisi_qm *qm) debug->qm_last_words = NULL; } +static void hisi_qm_unint_work(struct hisi_qm *qm) +{ + destroy_workqueue(qm->wq); +} + static void hisi_qm_memory_uninit(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -3698,6 +3697,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) qm_last_regs_uninit(qm); qm_cmd_uninit(qm); + hisi_qm_unint_work(qm); down_write(&qm->qps_lock); if (!qm_avail_state(qm, QM_CLOSE)) { @@ -6022,7 +6022,7 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) return ret; } -static void hisi_qm_init_work(struct hisi_qm *qm) +static int hisi_qm_init_work(struct hisi_qm *qm) { INIT_WORK(&qm->work, qm_work_process); if (qm->fun_type == QM_HW_PF) @@ -6030,6 +6030,16 @@ static void hisi_qm_init_work(struct hisi_qm *qm) if (qm->ver > QM_HW_V2) INIT_WORK(&qm->cmd_process, qm_cmd_process); + + qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | + WQ_UNBOUND, num_online_cpus(), + pci_name(qm->pdev)); + if (!qm->wq) { + pci_err(qm->pdev, "failed to alloc workqueue!\n"); + return -ENOMEM; + } + + return 0; } static int hisi_qp_alloc_memory(struct hisi_qm *qm) @@ -6180,7 +6190,10 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_alloc_uacce; - hisi_qm_init_work(qm); + ret = hisi_qm_init_work(qm); + if (ret) + goto err_free_qm_memory; + qm_cmd_init(qm); atomic_set(&qm->status.flags, QM_INIT); @@ -6188,6 +6201,8 @@ int hisi_qm_init(struct hisi_qm *qm) return 0; +err_free_qm_memory: + hisi_qm_memory_uninit(qm); err_alloc_uacce: if (qm->use_sva) { uacce_remove(qm->uacce); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 4d85d2cbf3767..bdb690aaed12a 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1002,8 +1002,6 @@ static int sec_pf_probe_init(struct sec_dev *sec) static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - int ret; - qm->pdev = pdev; qm->ver = pdev->revision; qm->algs = "cipher\ndigest\naead"; @@ -1029,25 +1027,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; } - /* - * WQ_HIGHPRI: SEC request must be low delayed, - * so need a high priority workqueue. - * WQ_UNBOUND: SEC task is likely with long - * running CPU intensive workloads. - */ - qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, num_online_cpus(), - pci_name(qm->pdev)); - if (!qm->wq) { - pci_err(qm->pdev, "fail to alloc workqueue\n"); - return -ENOMEM; - } - - ret = hisi_qm_init(qm); - if (ret) - destroy_workqueue(qm->wq); - - return ret; + return hisi_qm_init(qm); } static void sec_qm_uninit(struct hisi_qm *qm) @@ -1078,8 +1058,6 @@ static int sec_probe_init(struct sec_dev *sec) static void sec_probe_uninit(struct hisi_qm *qm) { hisi_qm_dev_err_uninit(qm); - - destroy_workqueue(qm->wq); } static void sec_iommu_used_check(struct sec_dev *sec) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 9c925e9c0a2d1..c3303d99acac7 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -990,8 +990,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - int ret; - qm->pdev = pdev; qm->ver = pdev->revision; if (pdev->revision >= QM_HW_V3) @@ -1021,25 +1019,12 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; } - qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | - WQ_UNBOUND, num_online_cpus(), - pci_name(qm->pdev)); - if (!qm->wq) { - pci_err(qm->pdev, "fail to alloc workqueue\n"); - return -ENOMEM; - } - - ret = hisi_qm_init(qm); - if (ret) - destroy_workqueue(qm->wq); - - return ret; + return hisi_qm_init(qm); } static void hisi_zip_qm_uninit(struct hisi_qm *qm) { hisi_qm_uninit(qm); - destroy_workqueue(qm->wq); } static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) From d64de9773c18409d2161228242968ff3ebe3707e Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Thu, 9 Jun 2022 20:31:19 +0800 Subject: [PATCH 25/89] crypto: hisilicon/qm - modify event irq processing When the driver receives an event interrupt, the driver will enable the event interrupt after handling all completed tasks on the function, tasks on the function are parsed through only one thread. If the task's user callback takes time, other tasks on the function will be blocked. Therefore, the event irq processing is modified as follows: 1. Obtain the ID of the queue that completes the task. 2. Enable event interrupt. 3. Parse the completed tasks in the queue and call the user callback. Enabling event interrupt in advance can quickly report pending event interrupts and process tasks in multiple threads. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 142 ++++++++++++++++++++++------------ include/linux/hisi_acc_qm.h | 8 +- 2 files changed, 99 insertions(+), 51 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index f8d36b68494e7..ad83c194d6648 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -877,13 +877,6 @@ static void qm_pm_put_sync(struct hisi_qm *qm) pm_runtime_put_autosuspend(dev); } -static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) -{ - u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; - - return &qm->qp_array[cqn]; -} - static void qm_cq_head_update(struct hisi_qp *qp) { if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { @@ -894,47 +887,37 @@ static void qm_cq_head_update(struct hisi_qp *qp) } } -static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) +static void qm_poll_req_cb(struct hisi_qp *qp) { - if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) - return; - - if (qp->event_cb) { - qp->event_cb(qp); - return; - } - - if (qp->req_cb) { - struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; - - while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { - dma_rmb(); - qp->req_cb(qp, qp->sqe + qm->sqe_size * - le16_to_cpu(cqe->sq_head)); - qm_cq_head_update(qp); - cqe = qp->cqe + qp->qp_status.cq_head; - qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, - qp->qp_status.cq_head, 0); - atomic_dec(&qp->qp_status.used); - } + struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; + struct hisi_qm *qm = qp->qm; - /* set c_flag */ + while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { + dma_rmb(); + qp->req_cb(qp, qp->sqe + qm->sqe_size * + le16_to_cpu(cqe->sq_head)); + qm_cq_head_update(qp); + cqe = qp->cqe + qp->qp_status.cq_head; qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, - qp->qp_status.cq_head, 1); + qp->qp_status.cq_head, 0); + atomic_dec(&qp->qp_status.used); } + + /* set c_flag */ + qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); } -static void qm_work_process(struct work_struct *work) +static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) { - struct hisi_qm *qm = container_of(work, struct hisi_qm, work); + struct hisi_qm *qm = poll_data->qm; struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; - struct hisi_qp *qp; int eqe_num = 0; + u16 cqn; while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { + cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; + poll_data->qp_finish_id[eqe_num] = cqn; eqe_num++; - qp = qm_to_hisi_qp(qm, eqe); - qm_poll_qp(qp, qm); if (qm->status.eq_head == QM_EQ_DEPTH - 1) { qm->status.eqc_phase = !qm->status.eqc_phase; @@ -945,34 +928,70 @@ static void qm_work_process(struct work_struct *work) qm->status.eq_head++; } - if (eqe_num == QM_EQ_DEPTH / 2 - 1) { - eqe_num = 0; - qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); - } + if (eqe_num == (QM_EQ_DEPTH >> 1) - 1) + break; } qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + + return eqe_num; } -static irqreturn_t do_qm_irq(int irq, void *data) +static void qm_work_process(struct work_struct *work) { - struct hisi_qm *qm = (struct hisi_qm *)data; + struct hisi_qm_poll_data *poll_data = + container_of(work, struct hisi_qm_poll_data, work); + struct hisi_qm *qm = poll_data->qm; + struct hisi_qp *qp; + int eqe_num, i; - /* the workqueue created by device driver of QM */ - queue_work(qm->wq, &qm->work); + /* Get qp id of completed tasks and re-enable the interrupt. */ + eqe_num = qm_get_complete_eqe_num(poll_data); + for (i = eqe_num - 1; i >= 0; i--) { + qp = &qm->qp_array[poll_data->qp_finish_id[i]]; + if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) + continue; - return IRQ_HANDLED; + if (qp->event_cb) { + qp->event_cb(qp); + continue; + } + + if (likely(qp->req_cb)) + qm_poll_req_cb(qp); + } +} + +static bool do_qm_irq(struct hisi_qm *qm) +{ + struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; + struct hisi_qm_poll_data *poll_data; + u16 cqn; + + if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) + return false; + + if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { + cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; + poll_data = &qm->poll_data[cqn]; + queue_work(qm->wq, &poll_data->work); + + return true; + } + + return false; } static irqreturn_t qm_irq(int irq, void *data) { struct hisi_qm *qm = data; + bool ret; - if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) - return do_qm_irq(irq, data); + ret = do_qm_irq(qm); + if (ret) + return IRQ_HANDLED; atomic64_inc(&qm->debug.dfx.err_irq_cnt); - dev_err(&qm->pdev->dev, "invalid int source\n"); qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); return IRQ_NONE; @@ -3551,8 +3570,10 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) for (i = num - 1; i >= 0; i--) { qdma = &qm->qp_array[i].qdma; dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); + kfree(qm->poll_data[i].qp_finish_id); } + kfree(qm->poll_data); kfree(qm->qp_array); } @@ -3561,12 +3582,18 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) struct device *dev = &qm->pdev->dev; size_t off = qm->sqe_size * QM_Q_DEPTH; struct hisi_qp *qp; + int ret = -ENOMEM; + + qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), + GFP_KERNEL); + if (!qm->poll_data[id].qp_finish_id) + return -ENOMEM; qp = &qm->qp_array[id]; qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, GFP_KERNEL); if (!qp->qdma.va) - return -ENOMEM; + goto err_free_qp_finish_id; qp->sqe = qp->qdma.va; qp->sqe_dma = qp->qdma.dma; @@ -3577,6 +3604,10 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) qp->qp_id = id; return 0; + +err_free_qp_finish_id: + kfree(qm->poll_data[id].qp_finish_id); + return ret; } static void hisi_qm_pre_init(struct hisi_qm *qm) @@ -6024,7 +6055,11 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) static int hisi_qm_init_work(struct hisi_qm *qm) { - INIT_WORK(&qm->work, qm_work_process); + int i; + + for (i = 0; i < qm->qp_num; i++) + INIT_WORK(&qm->poll_data[i].work, qm_work_process); + if (qm->fun_type == QM_HW_PF) INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); @@ -6052,11 +6087,18 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm) if (!qm->qp_array) return -ENOMEM; + qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); + if (!qm->poll_data) { + kfree(qm->qp_array); + return -ENOMEM; + } + /* one more page for device or qp statuses */ qp_dma_size = qm->sqe_size * QM_Q_DEPTH + sizeof(struct qm_cqe) * QM_Q_DEPTH; qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; for (i = 0; i < qm->qp_num; i++) { + qm->poll_data[i].qm = qm; ret = hisi_qp_memory_init(qm, qp_dma_size, i); if (ret) goto err_init_qp_mem; diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 6cabafffd0dda..116e8bd68c999 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -265,6 +265,12 @@ struct hisi_qm_list { void (*unregister_from_crypto)(struct hisi_qm *qm); }; +struct hisi_qm_poll_data { + struct hisi_qm *qm; + struct work_struct work; + u16 *qp_finish_id; +}; + struct hisi_qm { enum qm_hw_ver ver; enum qm_fun_type fun_type; @@ -302,6 +308,7 @@ struct hisi_qm { struct rw_semaphore qps_lock; struct idr qp_idr; struct hisi_qp *qp_array; + struct hisi_qm_poll_data *poll_data; struct mutex mailbox_lock; @@ -312,7 +319,6 @@ struct hisi_qm { u32 error_mask; struct workqueue_struct *wq; - struct work_struct work; struct work_struct rst_work; struct work_struct cmd_process; From d61a7b3decf7f0cf4121a7204303deefd2c7151b Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Fri, 10 Jun 2022 21:27:15 +0300 Subject: [PATCH 26/89] crypto: sun8i-ss - fix infinite loop in sun8i_ss_setup_ivs() There is no i decrement in while (i >= 0) loop. Found by Linux Verification Center (linuxtesting.org) with SVACE. Signed-off-by: Alexey Khoroshilov Fixes: 359e893e8af4 ("crypto: sun8i-ss - rework handling of IV") Acked-by: Corentin Labbe Tested-by: Corentin Labbe Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 5bb950182026f..910d6751644cf 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -170,6 +170,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) while (i >= 0) { dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); memzero_explicit(sf->iv[i], ivsize); + i--; } return err; } From bffa1fc065893a14703545efba7d69bb4082b18a Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Sat, 11 Jun 2022 15:38:08 +0800 Subject: [PATCH 27/89] crypto: hisilicon/sec - only HW V2 needs to change the BD err detection The base register address of V2 and V3 are different. HW V3 not needs to change the BD err detection. Signed-off-by: Kai Ye Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index bdb690aaed12a..2c0be91c0b094 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -508,16 +508,17 @@ static int sec_engine_init(struct hisi_qm *qm) writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); - /* HW V2 enable sm4 extra mode, as ctr/ecb */ - if (qm->ver < QM_HW_V3) + if (qm->ver < QM_HW_V3) { + /* HW V2 enable sm4 extra mode, as ctr/ecb */ writel_relaxed(SEC_BD_ERR_CHK_EN0, qm->io_base + SEC_BD_ERR_CHK_EN_REG0); - /* Enable sm4 xts mode multiple iv */ - writel_relaxed(SEC_BD_ERR_CHK_EN1, - qm->io_base + SEC_BD_ERR_CHK_EN_REG1); - writel_relaxed(SEC_BD_ERR_CHK_EN3, - qm->io_base + SEC_BD_ERR_CHK_EN_REG3); + /* HW V2 enable sm4 xts mode multiple iv */ + writel_relaxed(SEC_BD_ERR_CHK_EN1, + qm->io_base + SEC_BD_ERR_CHK_EN_REG1); + writel_relaxed(SEC_BD_ERR_CHK_EN3, + qm->io_base + SEC_BD_ERR_CHK_EN_REG3); + } /* config endian */ sec_set_endian(qm); From 1b05ece0c931536c0a38a9385e243a7962e933f6 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 16 Jun 2022 10:26:18 -0500 Subject: [PATCH 28/89] crypto: ccp - During shutdown, check SEV data pointer before using On shutdown, each CCP device instance performs shutdown processing. However, __sev_platform_shutdown_locked() uses the controlling psp structure to obtain the pointer to the sev_device structure. However, during driver initialization, it is possible that an error can be received from the firmware that results in the sev_data pointer being cleared from the controlling psp structure. The __sev_platform_shutdown_locked() function does not check for this situation and will segfault. While not common, this scenario should be accounted for. Add a check for a NULL sev_device structure before attempting to use it. Fixes: 5441a07a127f ("crypto: ccp - shutdown SEV firmware on kexec") Signed-off-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 0c92d940ac4ef..9f588c9728f8b 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -503,7 +503,7 @@ static int __sev_platform_shutdown_locked(int *error) struct sev_device *sev = psp_master->sev_data; int ret; - if (sev->state == SEV_STATE_UNINIT) + if (!sev || sev->state == SEV_STATE_UNINIT) return 0; ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); From f145d411a67efacc0731fc3f9c7b2d89fb62523a Mon Sep 17 00:00:00 2001 From: Ignat Korchagin Date: Fri, 17 Jun 2022 09:42:10 +0100 Subject: [PATCH 29/89] crypto: rsa - implement Chinese Remainder Theorem for faster private key operations Changes from v1: * exported mpi_sub and mpi_mul, otherwise the build fails when RSA is a module The kernel RSA ASN.1 private key parser already supports only private keys with additional values to be used with the Chinese Remainder Theorem [1], but these values are currently not used. This rudimentary CRT implementation speeds up RSA private key operations for the following Go benchmark up to ~3x. This implementation also tries to minimise the allocation of additional MPIs, so existing MPIs are reused as much as possible (hence the variable names are a bit weird). The benchmark used: ``` package keyring_test import ( "crypto" "crypto/rand" "crypto/rsa" "crypto/x509" "io" "syscall" "testing" "unsafe" ) type KeySerial int32 type Keyring int32 const ( KEY_SPEC_PROCESS_KEYRING Keyring = -2 KEYCTL_PKEY_SIGN = 27 ) var ( keyTypeAsym = []byte("asymmetric\x00") sha256pkcs1 = []byte("enc=pkcs1 hash=sha256\x00") ) func (keyring Keyring) LoadAsym(desc string, payload []byte) (KeySerial, error) { cdesc := []byte(desc + "\x00") serial, _, errno := syscall.Syscall6(syscall.SYS_ADD_KEY, uintptr(unsafe.Pointer(&keyTypeAsym[0])), uintptr(unsafe.Pointer(&cdesc[0])), uintptr(unsafe.Pointer(&payload[0])), uintptr(len(payload)), uintptr(keyring), uintptr(0)) if errno == 0 { return KeySerial(serial), nil } return KeySerial(serial), errno } type pkeyParams struct { key_id KeySerial in_len uint32 out_or_in2_len uint32 __spare [7]uint32 } // the output signature buffer is an input parameter here, because we want to // avoid Go buffer allocation leaking into our benchmarks func (key KeySerial) Sign(info, digest, out []byte) error { var params pkeyParams params.key_id = key params.in_len = uint32(len(digest)) params.out_or_in2_len = uint32(len(out)) _, _, errno := syscall.Syscall6(syscall.SYS_KEYCTL, KEYCTL_PKEY_SIGN, uintptr(unsafe.Pointer(¶ms)), uintptr(unsafe.Pointer(&info[0])), uintptr(unsafe.Pointer(&digest[0])), uintptr(unsafe.Pointer(&out[0])), uintptr(0)) if errno == 0 { return nil } return errno } func BenchmarkSign(b *testing.B) { priv, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { b.Fatalf("failed to generate private key: %v", err) } pkcs8, err := x509.MarshalPKCS8PrivateKey(priv) if err != nil { b.Fatalf("failed to serialize the private key to PKCS8 blob: %v", err) } serial, err := KEY_SPEC_PROCESS_KEYRING.LoadAsym("test rsa key", pkcs8) if err != nil { b.Fatalf("failed to load the private key into the keyring: %v", err) } b.Logf("loaded test rsa key: %v", serial) digest := make([]byte, 32) _, err = io.ReadFull(rand.Reader, digest) if err != nil { b.Fatalf("failed to generate a random digest: %v", err) } sig := make([]byte, 256) for n := 0; n < b.N; n++ { err = serial.Sign(sha256pkcs1, digest, sig) if err != nil { b.Fatalf("failed to sign the digest: %v", err) } } err = rsa.VerifyPKCS1v15(&priv.PublicKey, crypto.SHA256, digest, sig) if err != nil { b.Fatalf("failed to verify the signature: %v", err) } } ``` [1]: https://en.wikipedia.org/wiki/RSA_(cryptosystem)#Using_the_Chinese_remainder_algorithm Signed-off-by: Ignat Korchagin Reported-by: kernel test robot Signed-off-by: Herbert Xu --- crypto/rsa.c | 78 ++++++++++++++++++++++++++++++++++++++++++++--- lib/mpi/mpi-add.c | 2 +- lib/mpi/mpi-mul.c | 1 + 3 files changed, 75 insertions(+), 6 deletions(-) diff --git a/crypto/rsa.c b/crypto/rsa.c index 39e04176b04b2..0e555ee4addb7 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c @@ -17,6 +17,11 @@ struct rsa_mpi_key { MPI n; MPI e; MPI d; + MPI p; + MPI q; + MPI dp; + MPI dq; + MPI qinv; }; /* @@ -35,16 +40,49 @@ static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m) /* * RSADP function [RFC3447 sec 5.1.2] - * m = c^d mod n; + * m_1 = c^dP mod p; + * m_2 = c^dQ mod q; + * h = (m_1 - m_2) * qInv mod p; + * m = m_2 + q * h; */ -static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c) +static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c) { + MPI m2, m12_or_qh; + int ret = -ENOMEM; + /* (1) Validate 0 <= c < n */ if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) return -EINVAL; - /* (2) m = c^d mod n */ - return mpi_powm(m, c, key->d, key->n); + m2 = mpi_alloc(0); + m12_or_qh = mpi_alloc(0); + if (!m2 || !m12_or_qh) + goto err_free_mpi; + + /* (2i) m_1 = c^dP mod p */ + ret = mpi_powm(m_or_m1_or_h, c, key->dp, key->p); + if (ret) + goto err_free_mpi; + + /* (2i) m_2 = c^dQ mod q */ + ret = mpi_powm(m2, c, key->dq, key->q); + if (ret) + goto err_free_mpi; + + /* (2iii) h = (m_1 - m_2) * qInv mod p */ + mpi_sub(m12_or_qh, m_or_m1_or_h, m2); + mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p); + + /* (2iv) m = m_2 + q * h */ + mpi_mul(m12_or_qh, key->q, m_or_m1_or_h); + mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n); + + ret = 0; + +err_free_mpi: + mpi_free(m12_or_qh); + mpi_free(m2); + return ret; } static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm) @@ -112,7 +150,7 @@ static int rsa_dec(struct akcipher_request *req) if (!c) goto err_free_m; - ret = _rsa_dec(pkey, m, c); + ret = _rsa_dec_crt(pkey, m, c); if (ret) goto err_free_c; @@ -134,9 +172,19 @@ static void rsa_free_mpi_key(struct rsa_mpi_key *key) mpi_free(key->d); mpi_free(key->e); mpi_free(key->n); + mpi_free(key->p); + mpi_free(key->q); + mpi_free(key->dp); + mpi_free(key->dq); + mpi_free(key->qinv); key->d = NULL; key->e = NULL; key->n = NULL; + key->p = NULL; + key->q = NULL; + key->dp = NULL; + key->dq = NULL; + key->qinv = NULL; } static int rsa_check_key_length(unsigned int len) @@ -217,6 +265,26 @@ static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, if (!mpi_key->n) goto err; + mpi_key->p = mpi_read_raw_data(raw_key.p, raw_key.p_sz); + if (!mpi_key->p) + goto err; + + mpi_key->q = mpi_read_raw_data(raw_key.q, raw_key.q_sz); + if (!mpi_key->q) + goto err; + + mpi_key->dp = mpi_read_raw_data(raw_key.dp, raw_key.dp_sz); + if (!mpi_key->dp) + goto err; + + mpi_key->dq = mpi_read_raw_data(raw_key.dq, raw_key.dq_sz); + if (!mpi_key->dq) + goto err; + + mpi_key->qinv = mpi_read_raw_data(raw_key.qinv, raw_key.qinv_sz); + if (!mpi_key->qinv) + goto err; + if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) { rsa_free_mpi_key(mpi_key); return -EINVAL; diff --git a/lib/mpi/mpi-add.c b/lib/mpi/mpi-add.c index 2cdae54c1bd01..9056fc5167fc8 100644 --- a/lib/mpi/mpi-add.c +++ b/lib/mpi/mpi-add.c @@ -138,7 +138,7 @@ void mpi_sub(MPI w, MPI u, MPI v) mpi_add(w, u, vv); mpi_free(vv); } - +EXPORT_SYMBOL_GPL(mpi_sub); void mpi_addm(MPI w, MPI u, MPI v, MPI m) { diff --git a/lib/mpi/mpi-mul.c b/lib/mpi/mpi-mul.c index 8f5fa200f2971..7f4eda8560dc9 100644 --- a/lib/mpi/mpi-mul.c +++ b/lib/mpi/mpi-mul.c @@ -82,6 +82,7 @@ void mpi_mul(MPI w, MPI u, MPI v) if (tmp_limb) mpi_free_limb_space(tmp_limb); } +EXPORT_SYMBOL_GPL(mpi_mul); void mpi_mulm(MPI w, MPI u, MPI v, MPI m) { From c2a1b91e47984e477298912ffd55570095d67318 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Fri, 17 Jun 2022 09:59:44 +0000 Subject: [PATCH 30/89] crypto: qat - replace get_current_node() with numa_node_id() Currently the QAT driver code uses a self-defined wrapper function called get_current_node() when it wants to learn the current NUMA node. This implementation references the topology_physical_package_id[] array, which more or less coincidentally contains the NUMA node id, at least on x86. Because this is not universal, and Linux offers a direct function to learn the NUMA node ID, replace that function with a call to numa_node_id(), which would work everywhere. This fixes the QAT driver operation on arm64 machines. Reported-by: Yoan Picchi Signed-off-by: Andre Przywara Signed-off-by: Yoan Picchi Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_common_drv.h | 5 ----- drivers/crypto/qat/qat_common/qat_algs.c | 4 ++-- drivers/crypto/qat/qat_common/qat_asym_algs.c | 4 ++-- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 0464fa2579295..b364bc06c732a 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -49,11 +49,6 @@ struct service_hndl { struct list_head list; }; -static inline int get_current_node(void) -{ - return topology_physical_package_id(raw_smp_processor_id()); -} - int adf_service_register(struct service_hndl *service); int adf_service_unregister(struct service_hndl *service); diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 148edbe379e31..fb45fa83841c5 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -605,7 +605,7 @@ static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key, { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); struct qat_crypto_instance *inst = NULL; - int node = get_current_node(); + int node = numa_node_id(); struct device *dev; int ret; @@ -1065,7 +1065,7 @@ static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx, { struct qat_crypto_instance *inst = NULL; struct device *dev; - int node = get_current_node(); + int node = numa_node_id(); int ret; inst = qat_crypto_get_instance_node(node); diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 16d97db9ea15f..095ed2a404d2f 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -489,7 +489,7 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm) { struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct qat_crypto_instance *inst = - qat_crypto_get_instance_node(get_current_node()); + qat_crypto_get_instance_node(numa_node_id()); if (!inst) return -EINVAL; @@ -1225,7 +1225,7 @@ static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) { struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct qat_crypto_instance *inst = - qat_crypto_get_instance_node(get_current_node()); + qat_crypto_get_instance_node(numa_node_id()); if (!inst) return -EINVAL; From 9c846c5d2d4e63d75b2cb172625087cadadbe065 Mon Sep 17 00:00:00 2001 From: Yoan Picchi Date: Fri, 17 Jun 2022 09:59:45 +0000 Subject: [PATCH 31/89] crypto: qat - Removes the x86 dependency on the QAT drivers This dependency looks outdated. After the previous patch, we have been able to use this driver to encrypt some data and to create working VF on arm64. We have not tested it yet on any big endian machine, hence the new dependency Signed-off-by: Yoan Picchi Signed-off-by: Herbert Xu --- drivers/crypto/qat/Kconfig | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 4b90c0f22b03f..1220cc86f9100 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -17,7 +17,7 @@ config CRYPTO_DEV_QAT config CRYPTO_DEV_QAT_DH895xCC tristate "Support for Intel(R) DH895xCC" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select CRYPTO_DEV_QAT help Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology @@ -28,7 +28,7 @@ config CRYPTO_DEV_QAT_DH895xCC config CRYPTO_DEV_QAT_C3XXX tristate "Support for Intel(R) C3XXX" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select CRYPTO_DEV_QAT help Support for Intel(R) C3xxx with Intel(R) QuickAssist Technology @@ -39,7 +39,7 @@ config CRYPTO_DEV_QAT_C3XXX config CRYPTO_DEV_QAT_C62X tristate "Support for Intel(R) C62X" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select CRYPTO_DEV_QAT help Support for Intel(R) C62x with Intel(R) QuickAssist Technology @@ -50,7 +50,7 @@ config CRYPTO_DEV_QAT_C62X config CRYPTO_DEV_QAT_4XXX tristate "Support for Intel(R) QAT_4XXX" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select CRYPTO_DEV_QAT help Support for Intel(R) QuickAssist Technology QAT_4xxx @@ -61,7 +61,7 @@ config CRYPTO_DEV_QAT_4XXX config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select PCI_IOV select CRYPTO_DEV_QAT @@ -74,7 +74,7 @@ config CRYPTO_DEV_QAT_DH895xCCVF config CRYPTO_DEV_QAT_C3XXXVF tristate "Support for Intel(R) C3XXX Virtual Function" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select PCI_IOV select CRYPTO_DEV_QAT help @@ -86,7 +86,7 @@ config CRYPTO_DEV_QAT_C3XXXVF config CRYPTO_DEV_QAT_C62XVF tristate "Support for Intel(R) C62X Virtual Function" - depends on X86 && PCI + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) select PCI_IOV select CRYPTO_DEV_QAT help From d6c14da474bf260d73953fbf7992c98d9112aec7 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Mon, 20 Jun 2022 09:52:43 +0200 Subject: [PATCH 32/89] crypto: lib/blake2s - reduce stack frame usage in self test Using 3 blocks here doesn't give us much more than using 2, and it causes a stack frame size warning on certain compiler/config/arch combinations: lib/crypto/blake2s-selftest.c: In function 'blake2s_selftest': >> lib/crypto/blake2s-selftest.c:632:1: warning: the frame size of 1088 bytes is larger than 1024 bytes [-Wframe-larger-than=] 632 | } | ^ So this patch just reduces the block from 3 to 2, which makes the warning go away. Reported-by: kernel test robot Link: https://lore.kernel.org/linux-crypto/202206200851.gE3MHCgd-lkp@intel.com Fixes: 2d16803c562e ("crypto: blake2s - remove shash module") Signed-off-by: Jason A. Donenfeld Signed-off-by: Herbert Xu --- lib/crypto/blake2s-selftest.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c index 66f505220f43f..7d77dea155873 100644 --- a/lib/crypto/blake2s-selftest.c +++ b/lib/crypto/blake2s-selftest.c @@ -593,7 +593,7 @@ bool __init blake2s_selftest(void) enum { TEST_ALIGNMENT = 16 }; u8 unaligned_block[BLAKE2S_BLOCK_SIZE + TEST_ALIGNMENT - 1] __aligned(TEST_ALIGNMENT); - u8 blocks[BLAKE2S_BLOCK_SIZE * 3]; + u8 blocks[BLAKE2S_BLOCK_SIZE * 2]; struct blake2s_state state1, state2; get_random_bytes(blocks, sizeof(blocks)); @@ -603,8 +603,8 @@ bool __init blake2s_selftest(void) defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S) memcpy(&state1, &state, sizeof(state1)); memcpy(&state2, &state, sizeof(state2)); - blake2s_compress(&state1, blocks, 3, BLAKE2S_BLOCK_SIZE); - blake2s_compress_generic(&state2, blocks, 3, BLAKE2S_BLOCK_SIZE); + blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE); + blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE); if (memcmp(&state1, &state2, sizeof(state1))) { pr_err("blake2s random compress self-test %d: FAIL\n", i + 1); From 54a8b6802f03b6d0fa6ecad67f51466f8fa2ad6d Mon Sep 17 00:00:00 2001 From: Jiang Jian Date: Tue, 21 Jun 2022 18:22:54 +0800 Subject: [PATCH 33/89] crypto: nx - drop unexpected word "the" there is an unexpected word "the" in the comments that need to be dropped >- * The DDE is setup with the the DDE count, byte count, and address of >+ * The DDE is setup with the DDE count, byte count, and address of Signed-off-by: Jiang Jian Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-common-powernv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index f418817c0f43e..f34c75a862f24 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -75,7 +75,7 @@ static int (*nx842_powernv_exec)(const unsigned char *in, /** * setup_indirect_dde - Setup an indirect DDE * - * The DDE is setup with the the DDE count, byte count, and address of + * The DDE is setup with the DDE count, byte count, and address of * first direct DDE in the list. */ static void setup_indirect_dde(struct data_descriptor_entry *dde, From 882f0a59377da3fc5153c06790227930e5c00f88 Mon Sep 17 00:00:00 2001 From: Jiang Jian Date: Tue, 21 Jun 2022 18:27:33 +0800 Subject: [PATCH 34/89] crypto: ux500/hash - drop unexpected word "the" there is an unexpected word "the" in the comments that need to be dropped >- * specified in the the hw design spec. Either due to incorrect info in the >+ * specified in the hw design spec. Either due to incorrect info in the Signed-off-by: Jiang Jian Signed-off-by: Herbert Xu --- drivers/crypto/ux500/hash/hash_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 265ef3e96fdd3..f104e8a43036e 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -421,7 +421,7 @@ static int hash_get_device_data(struct hash_ctx *ctx, * @keylen: The lengt of the key. * * Note! This function DOES NOT write to the NBLW registry, even though - * specified in the the hw design spec. Either due to incorrect info in the + * specified in the hw design spec. Either due to incorrect info in the * spec or due to a bug in the hw. */ static void hash_hw_write_key(struct hash_device_data *device_data, From 1b069597c254a2969cf21142c3881b2d5e80c658 Mon Sep 17 00:00:00 2001 From: Jilin Yuan Date: Wed, 22 Jun 2022 17:07:30 +0800 Subject: [PATCH 35/89] crypto: arm64/aes-neon - Fix typo in comment Delete the redundant word 'the'. Signed-off-by: Jilin Yuan Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-neon.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S index e47d3ec2cfb46..9de7fbc797af7 100644 --- a/arch/arm64/crypto/aes-neon.S +++ b/arch/arm64/crypto/aes-neon.S @@ -66,7 +66,7 @@ prepare crypto_aes_inv_sbox, .LReverse_ShiftRows, \temp .endm - /* apply SubBytes transformation using the the preloaded Sbox */ + /* apply SubBytes transformation using the preloaded Sbox */ .macro sub_bytes, in sub v9.16b, \in\().16b, v15.16b tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b From e34525c3975832971451cb0908594c310fc83cd5 Mon Sep 17 00:00:00 2001 From: Jiang Jian Date: Thu, 23 Jun 2022 00:02:22 +0800 Subject: [PATCH 36/89] crypto: cavium - fix typos in comments Replace 'is' with 'it' file: drivers/crypto/cavium/cpt/cpt_hw_types.h line: 268 * which if the line hits and is is dirty will cause the line to be changed to: * which if the line hits and it is dirty will cause the line to be Signed-off-by: Jiang Jian Signed-off-by: Herbert Xu --- drivers/crypto/cavium/cpt/cpt_hw_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h index 96bc963bb8040..8ec6edc69f3fe 100644 --- a/drivers/crypto/cavium/cpt/cpt_hw_types.h +++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h @@ -265,7 +265,7 @@ union cptx_pf_exe_bist_status { * big-endian format in memory. * iqb_ldwb:1 [7:7](R/W) Instruction load don't write back. * 0 = The hardware issues NCB transient load (LDT) towards the cache, - * which if the line hits and is is dirty will cause the line to be + * which if the line hits and it is dirty will cause the line to be * written back before being replaced. * 1 = The hardware issues NCB LDWB read-and-invalidate command towards * the cache when fetching the last word of instructions; as a result the From dac230179fdc9ffa074fcb1533a0dd56f162a00a Mon Sep 17 00:00:00 2001 From: Jiang Jian Date: Thu, 23 Jun 2022 15:11:23 +0800 Subject: [PATCH 37/89] crypto: caam - drop unexpected word 'a' in comments Drop the unexpected word 'a' in the comments that need to be dropped * This is a a cache of buffers, from which the users of CAAM QI driver --> * This is a cache of buffers, from which the users of CAAM QI driver Signed-off-by: Jiang Jian Reviewed-by: Gaurav Jain Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 2 +- drivers/crypto/caam/qi.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 6753f0e6e55d1..4b81fb33f199b 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -29,7 +29,7 @@ SHA512_DIGEST_SIZE * 2) /* - * This is a a cache of buffers, from which the users of CAAM QI driver + * This is a cache of buffers, from which the users of CAAM QI driver * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. * NOTE: A more elegant solution would be to have some headroom in the frames * being processed. This can be added by the dpaa2-eth driver. This would diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 8163f5df8ebf7..49439d0d1b3c2 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -75,7 +75,7 @@ bool caam_congested __read_mostly; EXPORT_SYMBOL(caam_congested); /* - * This is a a cache of buffers, from which the users of CAAM QI driver + * This is a cache of buffers, from which the users of CAAM QI driver * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than * doing malloc on the hotpath. * NOTE: A more elegant solution would be to have some headroom in the frames From a89db0595f52ca4d05c9301b4b0bae4cce2ecaf7 Mon Sep 17 00:00:00 2001 From: Jiang Jian Date: Thu, 23 Jun 2022 18:25:36 +0800 Subject: [PATCH 38/89] crypto: caam - drop unexpected word 'for' in comments there is an unexpected word 'for' in the comments that need to be dropped file - drivers/crypto/caam/caamhash_desc.c line - 25 * must be false for for ahash first and digest changed to: * must be false for ahash first and digest Signed-off-by: Jiang Jian Reviewed-by: Gaurav Jain Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash_desc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c index 78383d77da99d..6195645099363 100644 --- a/drivers/crypto/caam/caamhash_desc.c +++ b/drivers/crypto/caam/caamhash_desc.c @@ -22,7 +22,7 @@ * @ctx_len: size of Context Register * @import_ctx: true if previous Context Register needs to be restored * must be true for ahash update and final - * must be false for for ahash first and digest + * must be false for ahash first and digest * @era: SEC Era */ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, From 8e7ae8bafe98a288727d647dace03fed2bdcc5fc Mon Sep 17 00:00:00 2001 From: Jiang Jian Date: Thu, 23 Jun 2022 18:30:14 +0800 Subject: [PATCH 39/89] crypto: vmx - drop unexpected word 'for' in comments there is an unexpected word 'for' in the comments that need to be dropped file - drivers/crypto/vmx/ghashp8-ppc.pl line - 19 "# GHASH for for PowerISA v2.07." changed to: "# GHASH for PowerISA v2.07." Signed-off-by: Jiang Jian Signed-off-by: Herbert Xu --- drivers/crypto/vmx/ghashp8-ppc.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl index 09bba1852eec1..041e633c214f5 100644 --- a/drivers/crypto/vmx/ghashp8-ppc.pl +++ b/drivers/crypto/vmx/ghashp8-ppc.pl @@ -16,7 +16,7 @@ # details see https://www.openssl.org/~appro/cryptogams/. # ==================================================================== # -# GHASH for for PowerISA v2.07. +# GHASH for PowerISA v2.07. # # July 2014 # From 0bb8f125253843c445b70fc6ef4fb21aa7b25625 Mon Sep 17 00:00:00 2001 From: lei he Date: Fri, 24 Jun 2022 18:06:25 +0800 Subject: [PATCH 40/89] crypto: testmgr - fix version number of RSA tests According to PKCS#1 standard, the 'otherPrimeInfos' field contains the information for the additional primes r_3, ..., r_u, in order. It shall be omitted if the version is 0 and shall contain at least one instance of OtherPrimeInfo if the version is 1, see: https://www.rfc-editor.org/rfc/rfc3447#page-44 Replace the version number '1' with 0, otherwise, some drivers may not pass the run-time tests. Signed-off-by: lei he Signed-off-by: Herbert Xu --- crypto/testmgr.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 4f3955ea40bf6..8e2dce86dd483 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -186,7 +186,7 @@ static const struct akcipher_testvec rsa_tv_template[] = { #ifndef CONFIG_CRYPTO_FIPS .key = "\x30\x81\x9A" /* sequence of 154 bytes */ - "\x02\x01\x01" /* version - integer of 1 byte */ + "\x02\x01\x00" /* version - integer of 1 byte */ "\x02\x41" /* modulus - integer of 65 bytes */ "\x00\xAA\x36\xAB\xCE\x88\xAC\xFD\xFF\x55\x52\x3C\x7F\xC4\x52\x3F" "\x90\xEF\xA0\x0D\xF3\x77\x4A\x25\x9F\x2E\x62\xB4\xC5\xD9\x9C\xB5" @@ -216,7 +216,7 @@ static const struct akcipher_testvec rsa_tv_template[] = { }, { .key = "\x30\x82\x01\x1D" /* sequence of 285 bytes */ - "\x02\x01\x01" /* version - integer of 1 byte */ + "\x02\x01\x00" /* version - integer of 1 byte */ "\x02\x81\x81" /* modulus - integer of 129 bytes */ "\x00\xBB\xF8\x2F\x09\x06\x82\xCE\x9C\x23\x38\xAC\x2B\x9D\xA8\x71" "\xF7\x36\x8D\x07\xEE\xD4\x10\x43\xA4\x40\xD6\xB6\xF0\x74\x54\xF5" @@ -260,7 +260,7 @@ static const struct akcipher_testvec rsa_tv_template[] = { #endif .key = "\x30\x82\x02\x20" /* sequence of 544 bytes */ - "\x02\x01\x01" /* version - integer of 1 byte */ + "\x02\x01\x00" /* version - integer of 1 byte */ "\x02\x82\x01\x01\x00" /* modulus - integer of 256 bytes */ "\xDB\x10\x1A\xC2\xA3\xF1\xDC\xFF\x13\x6B\xED\x44\xDF\xF0\x02\x6D" "\x13\xC7\x88\xDA\x70\x6B\x54\xF1\xE8\x27\xDC\xC3\x0F\x99\x6A\xFA" From fac76f2260893dde5aa05bb693b4c13e8ed0454b Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Fri, 24 Jun 2022 07:13:38 -0400 Subject: [PATCH 41/89] crypto: arm64/gcm - Select AEAD for GHASH_ARM64_CE Otherwise, we could fail to compile. ld: arch/arm64/crypto/ghash-ce-glue.o: in function 'ghash_ce_mod_exit': ghash-ce-glue.c:(.exit.text+0x24): undefined reference to 'crypto_unregister_aead' ld: arch/arm64/crypto/ghash-ce-glue.o: in function 'ghash_ce_mod_init': ghash-ce-glue.c:(.init.text+0x34): undefined reference to 'crypto_register_aead' Fixes: 537c1445ab0b ("crypto: arm64/gcm - implement native driver using v8 Crypto Extensions") Signed-off-by: Qian Cai Signed-off-by: Herbert Xu --- arch/arm64/crypto/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 4391a463abd77..60db5bb2ddda5 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -71,6 +71,7 @@ config CRYPTO_GHASH_ARM64_CE select CRYPTO_HASH select CRYPTO_GF128MUL select CRYPTO_LIB_AES + select CRYPTO_AEAD config CRYPTO_POLYVAL_ARM64_CE tristate "POLYVAL using ARMv8 Crypto Extensions (for HCTR2)" From 5ee52118ac1481dd8a8f7e6a9bfe6ee05ac6ec92 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 27 Jun 2022 09:36:49 +0100 Subject: [PATCH 42/89] crypto: qat - expose device state through sysfs for 4xxx Expose the device state through an attribute in sysfs and allow to change it. This is to stop and shutdown a QAT device in order to change its configuration. The state attribute has been added to a newly created `qat` attribute group which will contain all _QAT specific_ attributes. The logic that implements the sysfs entries is part of a new file, adf_sysfs.c. This exposes an entry point to allow the driver to create attributes. The function that creates the sysfs attributes is called from the probe function of the driver and not in the state machine init function to allow the change of states even if the device is in the down state. In order to restore the device configuration between a transition from down to up, the function that configures the devices has been abstracted into the HW data structure. The `state` attribute is only exposed for qat_4xxx devices. Signed-off-by: Giovanni Cabiddu Co-developed-by: Tomasz Kowallik Signed-off-by: Tomasz Kowallik Reviewed-by: Adam Guerin Reviewed-by: Fiona Trahe Reviewed-by: Wojciech Ziemba Reviewed-by: Vladis Dronov Signed-off-by: Herbert Xu --- Documentation/ABI/testing/sysfs-driver-qat | 21 ++++ .../crypto/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + .../crypto/qat/qat_4xxx/adf_4xxx_hw_data.h | 1 + drivers/crypto/qat/qat_4xxx/adf_drv.c | 6 +- drivers/crypto/qat/qat_common/Makefile | 1 + .../crypto/qat/qat_common/adf_accel_devices.h | 1 + .../crypto/qat/qat_common/adf_common_drv.h | 2 + drivers/crypto/qat/qat_common/adf_sysfs.c | 119 ++++++++++++++++++ 8 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 Documentation/ABI/testing/sysfs-driver-qat create mode 100644 drivers/crypto/qat/qat_common/adf_sysfs.c diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat new file mode 100644 index 0000000000000..769b09cefa897 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -0,0 +1,21 @@ +What: /sys/bus/pci/devices//qat/state +Date: June 2022 +KernelVersion: 5.20 +Contact: qat-linux@intel.com +Description: Reports the current state of the QAT device and allows to + change it. + + This attribute is RW. + + Returned values: + up: the device is up and running + down: the device is down + + Allowed values: + up: initialize and start the device + down: stop the device and bring it down + + It is possible to transition the device from up to down only + if the device is up and vice versa. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c index fb5970a684844..fda5f699ff575 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -357,6 +357,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; + hw_data->dev_config = adf_crypto_dev_config; adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h index 1034752845ca2..9d49248931f6a 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -70,5 +70,6 @@ enum icp_qat_4xxx_slice_mask { void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data); void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data); +int adf_crypto_dev_config(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c index 181fa1c8b3c78..2f212561acc47 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c @@ -53,7 +53,7 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev) return 0; } -static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) +int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; int banks = GET_MAX_BANKS(accel_dev); @@ -289,6 +289,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err_disable_aer; } + ret = adf_sysfs_init(accel_dev); + if (ret) + goto out_err_disable_aer; + ret = adf_crypto_dev_config(accel_dev); if (ret) goto out_err_disable_aer; diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index 04f058acc4d37..80919cfcc29da 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -10,6 +10,7 @@ intel_qat-objs := adf_cfg.o \ adf_transport.o \ adf_admin.o \ adf_hw_arbiter.o \ + adf_sysfs.o \ adf_gen2_hw_data.o \ adf_gen4_hw_data.o \ adf_gen4_pm.o \ diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index ede6458c9dbfd..0a55a4f34dcfd 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -199,6 +199,7 @@ struct adf_hw_device_data { char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_num_objs)(void); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); + int (*dev_config)(struct adf_accel_dev *accel_dev); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; const char *fw_name; diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index b364bc06c732a..b841e85c8f5d6 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -127,6 +127,8 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev); int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev); +int adf_sysfs_init(struct adf_accel_dev *accel_dev); + int qat_hal_init(struct adf_accel_dev *accel_dev); void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle); int qat_hal_start(struct icp_qat_fw_loader_handle *handle); diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c new file mode 100644 index 0000000000000..8f47a5694dd70 --- /dev/null +++ b/drivers/crypto/qat/qat_common/adf_sysfs.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2022 Intel Corporation */ +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_common_drv.h" + +static const char * const state_operations[] = { + [DEV_DOWN] = "down", + [DEV_UP] = "up", +}; + +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + char *state; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + state = adf_dev_started(accel_dev) ? "up" : "down"; + return sysfs_emit(buf, "%s\n", state); +} + +static ssize_t state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + u32 accel_id; + int ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + accel_id = accel_dev->accel_id; + + if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) { + dev_info(dev, "Device qat_dev%d is busy\n", accel_id); + return -EBUSY; + } + + ret = sysfs_match_string(state_operations, buf); + if (ret < 0) + return ret; + + switch (ret) { + case DEV_DOWN: + if (!adf_dev_started(accel_dev)) { + dev_info(dev, "Device qat_dev%d already down\n", + accel_id); + return -EINVAL; + } + + dev_info(dev, "Stopping device qat_dev%d\n", accel_id); + + adf_dev_stop(accel_dev); + adf_dev_shutdown(accel_dev); + + break; + case DEV_UP: + if (adf_dev_started(accel_dev)) { + dev_info(dev, "Device qat_dev%d already up\n", + accel_id); + return -EINVAL; + } + + dev_info(dev, "Starting device qat_dev%d\n", accel_id); + + ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev); + if (!ret) + ret = adf_dev_init(accel_dev); + if (!ret) + ret = adf_dev_start(accel_dev); + + if (ret < 0) { + dev_err(dev, "Failed to start device qat_dev%d\n", + accel_id); + adf_dev_stop(accel_dev); + adf_dev_shutdown(accel_dev); + return ret; + } + break; + default: + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR_RW(state); + +static struct attribute *qat_attrs[] = { + &dev_attr_state.attr, + NULL, +}; + +static struct attribute_group qat_group = { + .attrs = qat_attrs, + .name = "qat", +}; + +int adf_sysfs_init(struct adf_accel_dev *accel_dev) +{ + int ret; + + ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to create qat attribute group: %d\n", ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(adf_sysfs_init); From 92bf269fbfe94018f15405e1644049de7c7b46dd Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 27 Jun 2022 09:36:50 +0100 Subject: [PATCH 43/89] crypto: qat - change behaviour of adf_cfg_add_key_value_param() The function adf_cfg_add_key_value_param() allows to insert duplicates entries in the key value store of the driver. Change the behaviour of that function to the following policy: - if the key doesn't exist, add it; - if the key already exists with a different value, then delete it and replace it with a new one containing the new value; - if the key exists with the same value, then return without doing anything. The behaviour of this function has been changed in order to easily update key-values in the driver database. In particular this is required to update the value of the ServiceEnables key used to change the service loaded on a device. Signed-off-by: Giovanni Cabiddu Reviewed-by: Adam Guerin Reviewed-by: Fiona Trahe Reviewed-by: Wojciech Ziemba Reviewed-by: Vladis Dronov Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_cfg.c | 41 ++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index b5b208cbe5a12..e61b3e13db3bd 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -128,6 +128,24 @@ static void adf_cfg_keyval_add(struct adf_cfg_key_val *new, list_add_tail(&new->list, &sec->param_head); } +static void adf_cfg_keyval_remove(const char *key, struct adf_cfg_section *sec) +{ + struct list_head *head = &sec->param_head; + struct list_head *list_ptr, *tmp; + + list_for_each_prev_safe(list_ptr, tmp, head) { + struct adf_cfg_key_val *ptr = + list_entry(list_ptr, struct adf_cfg_key_val, list); + + if (strncmp(ptr->key, key, sizeof(ptr->key))) + continue; + + list_del(list_ptr); + kfree(ptr); + break; + } +} + static void adf_cfg_keyval_del_all(struct list_head *head) { struct list_head *list_ptr, *tmp; @@ -208,7 +226,8 @@ static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev, * @type: Type - string, int or address * * Function adds configuration key - value entry in the appropriate section - * in the given acceleration device + * in the given acceleration device. If the key exists already, the value + * is updated. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. @@ -222,6 +241,8 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, struct adf_cfg_key_val *key_val; struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev, section_name); + char temp_val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + if (!section) return -EFAULT; @@ -246,6 +267,24 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, return -EINVAL; } key_val->type = type; + + /* Add the key-value pair as below policy: + * 1. if the key doesn't exist, add it; + * 2. if the key already exists with a different value then update it + * to the new value (the key is deleted and the newly created + * key_val containing the new value is added to the database); + * 3. if the key exists with the same value, then return without doing + * anything (the newly created key_val is freed). + */ + if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) { + if (strncmp(temp_val, key_val->val, sizeof(temp_val))) { + adf_cfg_keyval_remove(key, section); + } else { + kfree(key_val); + return 0; + } + } + down_write(&cfg->lock); adf_cfg_keyval_add(key_val, section); up_write(&cfg->lock); From 16c1ed95d1c4956e428c8daa2783bcc7fa7f6fb9 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 27 Jun 2022 09:36:51 +0100 Subject: [PATCH 44/89] crypto: qat - relocate and rename adf_sriov_prepare_restart() The function adf_sriov_prepare_restart() is used in adf_sriov.c to stop and shutdown a device preserving its configuration. Since this function will be re-used by the logic that allows to reconfigure the device through sysfs, move it to adf_init.c and rename it as adf_dev_shutdown_cache_cfg(); Signed-off-by: Giovanni Cabiddu Reviewed-by: Adam Guerin Reviewed-by: Fiona Trahe Reviewed-by: Wojciech Ziemba Reviewed-by: Vladis Dronov Signed-off-by: Herbert Xu --- .../crypto/qat/qat_common/adf_common_drv.h | 1 + drivers/crypto/qat/qat_common/adf_init.c | 26 +++++++++++++++++ drivers/crypto/qat/qat_common/adf_sriov.c | 28 +------------------ 3 files changed, 28 insertions(+), 27 deletions(-) diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index b841e85c8f5d6..7bb477c3ce25f 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -56,6 +56,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev); int adf_dev_start(struct adf_accel_dev *accel_dev); void adf_dev_stop(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev); +int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index c2c718f1b4895..33a9a46d69494 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -363,3 +363,29 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) } return 0; } + +int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + int ret; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + + adf_dev_stop(accel_dev); + adf_dev_shutdown(accel_dev); + + if (!ret) { + ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); + if (ret) + return ret; + + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, + services, ADF_STR); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index f38b2ffde146b..b2db1d70d71fb 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -120,32 +120,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_disable_sriov); -static int adf_sriov_prepare_restart(struct adf_accel_dev *accel_dev) -{ - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - int ret; - - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - - adf_dev_stop(accel_dev); - adf_dev_shutdown(accel_dev); - - if (!ret) { - ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); - if (ret) - return ret; - - ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, - services, ADF_STR); - if (ret) - return ret; - } - - return 0; -} - /** * adf_sriov_configure() - Enable SRIOV for the device * @pdev: Pointer to PCI device. @@ -185,7 +159,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) return -EBUSY; } - ret = adf_sriov_prepare_restart(accel_dev); + ret = adf_dev_shutdown_cache_cfg(accel_dev); if (ret) return ret; } From d4cfb144f60551d80732c93c892fe76fc8df860d Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 27 Jun 2022 09:36:52 +0100 Subject: [PATCH 45/89] crypto: qat - expose device config through sysfs for 4xxx qat_4xxx devices can be configured to allow either crypto or compression operations. At the moment, devices are configured statically according to the following rule: - odd numbered devices assigned to compression services - even numbered devices assigned to crypto services Expose the sysfs attribute /sys/bus/pci/devices//qat/cfg_services to allow to detect the configuration of a device and to change it. The `cfg_service` attribute is only exposed for qat_4xxx devices and it is limited to two configurations: (1) "sym;asym" for crypto services and "dc" for compression services. Signed-off-by: Giovanni Cabiddu Co-developed-by: Tomasz Kowallik Signed-off-by: Tomasz Kowallik Reviewed-by: Adam Guerin Reviewed-by: Fiona Trahe Reviewed-by: Wojciech Ziemba Reviewed-by: Vladis Dronov Signed-off-by: Herbert Xu --- Documentation/ABI/testing/sysfs-driver-qat | 39 +++++++++++ drivers/crypto/qat/qat_common/adf_sysfs.c | 80 ++++++++++++++++++++-- 2 files changed, 115 insertions(+), 4 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index 769b09cefa897..a600531e95628 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -19,3 +19,42 @@ Description: Reports the current state of the QAT device and allows to if the device is up and vice versa. This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/cfg_services +Date: June 2022 +KernelVersion: 5.20 +Contact: qat-linux@intel.com +Description: Reports the current configuration of the QAT device and allows + to change it. + + This attribute is RW. + + Returned values: + sym;asym: the device is configured for running + crypto services + dc: the device is configured for running + compression services + + Allowed values: + sym;asym: configure the device for running + crypto services + dc: configure the device for running + compression services + + It is possible to set the configuration only if the device + is in the `down` state (see /sys/bus/pci/devices//qat/state) + + The following example shows how to change the configuration of + a device configured for running crypto services in order to + run data compression: + # cat /sys/bus/pci/devices//qat/state + up + # cat /sys/bus/pci/devices//qat/cfg_services + sym;asym + # echo down > /sys/bus/pci/devices//qat/state + # echo dc > /sys/bus/pci/devices//qat/cfg_services + # echo up > /sys/bus/pci/devices//qat/state + # cat /sys/bus/pci/devices//qat/cfg_services + dc + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c index 8f47a5694dd70..e8b078e719c20 100644 --- a/drivers/crypto/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/qat/qat_common/adf_sysfs.c @@ -58,8 +58,9 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, dev_info(dev, "Stopping device qat_dev%d\n", accel_id); - adf_dev_stop(accel_dev); - adf_dev_shutdown(accel_dev); + ret = adf_dev_shutdown_cache_cfg(accel_dev); + if (ret < 0) + return -EINVAL; break; case DEV_UP: @@ -80,8 +81,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, if (ret < 0) { dev_err(dev, "Failed to start device qat_dev%d\n", accel_id); - adf_dev_stop(accel_dev); - adf_dev_shutdown(accel_dev); + adf_dev_shutdown_cache_cfg(accel_dev); return ret; } break; @@ -92,10 +92,82 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, return count; } +static const char * const services_operations[] = { + ADF_CFG_CY, + ADF_CFG_DC, +}; + +static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + struct adf_accel_dev *accel_dev; + int ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", services); +} + +static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev, + const char *services) +{ + return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services, + ADF_STR); +} + +static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_hw_device_data *hw_data; + struct adf_accel_dev *accel_dev; + int ret; + + ret = sysfs_match_string(services_operations, buf); + if (ret < 0) + return ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + if (adf_dev_started(accel_dev)) { + dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n", + accel_dev->accel_id); + return -EINVAL; + } + + ret = adf_sysfs_update_dev_config(accel_dev, services_operations[ret]); + if (ret < 0) + return ret; + + hw_data = GET_HW_DATA(accel_dev); + + /* Update capabilities mask after change in configuration. + * A call to this function is required as capabilities are, at the + * moment, tied to configuration + */ + hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); + if (!hw_data->accel_capabilities_mask) + return -EINVAL; + + return count; +} + static DEVICE_ATTR_RW(state); +static DEVICE_ATTR_RW(cfg_services); static struct attribute *qat_attrs[] = { &dev_attr_state.attr, + &dev_attr_cfg_services.attr, NULL, }; From cc8166bfc829043020b5cc3b7cdba02a17d03b6d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 30 Jun 2022 16:11:49 +0800 Subject: [PATCH 46/89] crypto: vmx - Fix warning on p8_ghash_alg The compiler complains that p8_ghash_alg isn't declared which is because the header file aesp8-ppc.h isn't included in ghash.c. This patch fixes the warning. Signed-off-by: Herbert Xu Acked-by: Breno Leitao Signed-off-by: Herbert Xu --- drivers/crypto/vmx/ghash.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 5bc5710a6de0b..77eca20bc7ac6 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -23,6 +23,7 @@ #include #include #include +#include "aesp8-ppc.h" void gcm_init_p8(u128 htable[16], const u64 Xi[2]); void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); From 9984a6447389a01cc5e8501b2ead3024b2d99c19 Mon Sep 17 00:00:00 2001 From: Liang He Date: Thu, 30 Jun 2022 16:36:56 +0800 Subject: [PATCH 47/89] crypto: amcc - Hold the reference returned by of_find_compatible_node In crypto4xx_probe(), we should hold the reference returned by of_find_compatible_node() and use it to call of_node_put to keep refcount balance. Signed-off-by: Liang He Signed-off-by: Herbert Xu --- drivers/crypto/amcc/crypto4xx_core.c | 40 +++++++++++++++++----------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 8278d98074e9a..280f4b0e71334 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -1378,6 +1378,7 @@ static int crypto4xx_probe(struct platform_device *ofdev) struct resource res; struct device *dev = &ofdev->dev; struct crypto4xx_core_device *core_dev; + struct device_node *np; u32 pvr; bool is_revb = true; @@ -1385,29 +1386,36 @@ static int crypto4xx_probe(struct platform_device *ofdev) if (rc) return -ENODEV; - if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) { + np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto"); + if (np) { mtdcri(SDR0, PPC460EX_SDR0_SRST, mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET); mtdcri(SDR0, PPC460EX_SDR0_SRST, mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET); - } else if (of_find_compatible_node(NULL, NULL, - "amcc,ppc405ex-crypto")) { - mtdcri(SDR0, PPC405EX_SDR0_SRST, - mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); - mtdcri(SDR0, PPC405EX_SDR0_SRST, - mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); - is_revb = false; - } else if (of_find_compatible_node(NULL, NULL, - "amcc,ppc460sx-crypto")) { - mtdcri(SDR0, PPC460SX_SDR0_SRST, - mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET); - mtdcri(SDR0, PPC460SX_SDR0_SRST, - mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET); } else { - printk(KERN_ERR "Crypto Function Not supported!\n"); - return -EINVAL; + np = of_find_compatible_node(NULL, NULL, "amcc,ppc405ex-crypto"); + if (np) { + mtdcri(SDR0, PPC405EX_SDR0_SRST, + mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); + mtdcri(SDR0, PPC405EX_SDR0_SRST, + mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); + is_revb = false; + } else { + np = of_find_compatible_node(NULL, NULL, "amcc,ppc460sx-crypto"); + if (np) { + mtdcri(SDR0, PPC460SX_SDR0_SRST, + mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET); + mtdcri(SDR0, PPC460SX_SDR0_SRST, + mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET); + } else { + printk(KERN_ERR "Crypto Function Not supported!\n"); + return -EINVAL; + } + } } + of_node_put(np); + core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL); if (!core_dev) return -ENOMEM; From 8ccc9cc47c8c9e69c7bd9a7694a1d8c33108dd28 Mon Sep 17 00:00:00 2001 From: Liang He Date: Thu, 30 Jun 2022 16:36:57 +0800 Subject: [PATCH 48/89] crypto: nx - Hold the reference returned by of_find_compatible_node In nx842_pseries_init(), we should hold the reference returned by of_find_compatible_node() and use it to call of_node_put to keep refcount balance. Signed-off-by: Liang He Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-common-pseries.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 7584a34ba88c2..3ea334b7f820c 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -1208,10 +1208,13 @@ static struct vio_driver nx842_vio_driver = { static int __init nx842_pseries_init(void) { struct nx842_devdata *new_devdata; + struct device_node *np; int ret; - if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) + np = of_find_compatible_node(NULL, NULL, "ibm,compression"); + if (!np) return -ENODEV; + of_node_put(np); RCU_INIT_POINTER(devdata, NULL); new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); From 32c0f7d4194c4352f902f07d2ab990994800596e Mon Sep 17 00:00:00 2001 From: Jianglei Nie Date: Thu, 30 Jun 2022 22:31:32 +0800 Subject: [PATCH 49/89] crypto: hisilicon/sec - fix inconsistent argument The argument passed to sec_queue_aw_alloc() should be SEC_QUEUE_AW_FROCE_NOALLOC instead of SEC_QUEUE_AR_FROCE_NOALLOC. Signed-off-by: Jianglei Nie Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec/sec_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index c8de1b51c8430..e75851326c1e0 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -892,7 +892,7 @@ bool sec_queue_can_enqueue(struct sec_queue *queue, int num) static void sec_queue_hw_init(struct sec_queue *queue) { sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC); - sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC); + sec_queue_aw_alloc(queue, SEC_QUEUE_AW_FROCE_NOALLOC); sec_queue_ar_pkgattr(queue, 1); sec_queue_aw_pkgattr(queue, 1); From 02884a4f12de11f54d4ca67a07dd1f111d96fdbd Mon Sep 17 00:00:00 2001 From: Zhengchao Shao Date: Fri, 1 Jul 2022 09:55:11 +0800 Subject: [PATCH 50/89] crypto: hisilicon/sec - don't sleep when in softirq When kunpeng920 encryption driver is used to deencrypt and decrypt packets during the softirq, it is not allowed to use mutex lock. The kernel will report the following error: BUG: scheduling while atomic: swapper/57/0/0x00000300 Call trace: dump_backtrace+0x0/0x1e4 show_stack+0x20/0x2c dump_stack+0xd8/0x140 __schedule_bug+0x68/0x80 __schedule+0x728/0x840 schedule+0x50/0xe0 schedule_preempt_disabled+0x18/0x24 __mutex_lock.constprop.0+0x594/0x5dc __mutex_lock_slowpath+0x1c/0x30 mutex_lock+0x50/0x60 sec_request_init+0x8c/0x1a0 [hisi_sec2] sec_process+0x28/0x1ac [hisi_sec2] sec_skcipher_crypto+0xf4/0x1d4 [hisi_sec2] sec_skcipher_encrypt+0x1c/0x30 [hisi_sec2] crypto_skcipher_encrypt+0x2c/0x40 crypto_authenc_encrypt+0xc8/0xfc [authenc] crypto_aead_encrypt+0x2c/0x40 echainiv_encrypt+0x144/0x1a0 [echainiv] crypto_aead_encrypt+0x2c/0x40 esp_output_tail+0x348/0x5c0 [esp4] esp_output+0x120/0x19c [esp4] xfrm_output_one+0x25c/0x4d4 xfrm_output_resume+0x6c/0x1fc xfrm_output+0xac/0x3c0 xfrm4_output+0x64/0x130 ip_build_and_send_pkt+0x158/0x20c tcp_v4_send_synack+0xdc/0x1f0 tcp_conn_request+0x7d0/0x994 tcp_v4_conn_request+0x58/0x6c tcp_v6_conn_request+0xf0/0x100 tcp_rcv_state_process+0x1cc/0xd60 tcp_v4_do_rcv+0x10c/0x250 tcp_v4_rcv+0xfc4/0x10a4 ip_protocol_deliver_rcu+0xf4/0x200 ip_local_deliver_finish+0x58/0x70 ip_local_deliver+0x68/0x120 ip_sublist_rcv_finish+0x70/0x94 ip_list_rcv_finish.constprop.0+0x17c/0x1d0 ip_sublist_rcv+0x40/0xb0 ip_list_rcv+0x140/0x1dc __netif_receive_skb_list_core+0x154/0x28c __netif_receive_skb_list+0x120/0x1a0 netif_receive_skb_list_internal+0xe4/0x1f0 napi_complete_done+0x70/0x1f0 gro_cell_poll+0x9c/0xb0 napi_poll+0xcc/0x264 net_rx_action+0xd4/0x21c __do_softirq+0x130/0x358 irq_exit+0x11c/0x13c __handle_domain_irq+0x88/0xf0 gic_handle_irq+0x78/0x2c0 el1_irq+0xb8/0x140 arch_cpu_idle+0x18/0x40 default_idle_call+0x5c/0x1c0 cpuidle_idle_call+0x174/0x1b0 do_idle+0xc8/0x160 cpu_startup_entry+0x30/0x11c secondary_start_kernel+0x158/0x1e4 softirq: huh, entered softirq 3 NET_RX 0000000093774ee4 with preempt_count 00000100, exited with fffffe00? Fixes: 416d82204df4 ("crypto: hisilicon - add HiSilicon SEC V2 driver") Signed-off-by: Zhengchao Shao Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec.h | 2 +- drivers/crypto/hisilicon/sec2/sec_crypto.c | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 42bb486f3b6d5..d2a0bc93e7525 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -119,7 +119,7 @@ struct sec_qp_ctx { struct idr req_idr; struct sec_alg_res res[QM_Q_DEPTH]; struct sec_ctx *ctx; - struct mutex req_lock; + spinlock_t req_lock; struct list_head backlog; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 6eebe739893c5..71dfa7db63947 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -127,11 +127,11 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) { int req_id; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(req_id < 0)) { dev_err(req->ctx->dev, "alloc req id fail!\n"); return req_id; @@ -156,9 +156,9 @@ static void sec_free_req_id(struct sec_req *req) qp_ctx->req_list[req_id] = NULL; req->qp_ctx = NULL; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); idr_remove(&qp_ctx->req_idr, req_id); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); } static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) @@ -273,7 +273,7 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) return -EBUSY; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); if (ctx->fake_req_limit <= @@ -281,10 +281,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) list_add_tail(&req->backlog_head, &qp_ctx->backlog); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); return -EBUSY; } - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(ret == -EBUSY)) return -ENOBUFS; @@ -487,7 +487,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, qp->req_cb = sec_req_cb; - mutex_init(&qp_ctx->req_lock); + spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); INIT_LIST_HEAD(&qp_ctx->backlog); @@ -1382,7 +1382,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, { struct sec_req *backlog_req = NULL; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); if (ctx->fake_req_limit >= atomic_read(&qp_ctx->qp->qp_status.used) && !list_empty(&qp_ctx->backlog)) { @@ -1390,7 +1390,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, typeof(*backlog_req), backlog_head); list_del(&backlog_req->backlog_head); } - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); return backlog_req; } From 68740ab505431f268dc1ee26a54b871e75f0ddaa Mon Sep 17 00:00:00 2001 From: Zhengchao Shao Date: Fri, 1 Jul 2022 09:59:54 +0800 Subject: [PATCH 51/89] crypto: hisilicon - Kunpeng916 crypto driver don't sleep when in softirq When kunpeng916 encryption driver is used to deencrypt and decrypt packets during the softirq, it is not allowed to use mutex lock. Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver") Signed-off-by: Zhengchao Shao Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec/sec_algs.c | 14 +++++++------- drivers/crypto/hisilicon/sec/sec_drv.h | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 0a3c8f019b025..490e1542305e1 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -449,7 +449,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, */ } - mutex_lock(&ctx->queue->queuelock); + spin_lock_bh(&ctx->queue->queuelock); /* Put the IV in place for chained cases */ switch (ctx->cipher_alg) { case SEC_C_AES_CBC_128: @@ -509,7 +509,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, list_del(&backlog_req->backlog_head); } } - mutex_unlock(&ctx->queue->queuelock); + spin_unlock_bh(&ctx->queue->queuelock); mutex_lock(&sec_req->lock); list_del(&sec_req_el->head); @@ -798,7 +798,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, */ /* Grab a big lock for a long time to avoid concurrency issues */ - mutex_lock(&queue->queuelock); + spin_lock_bh(&queue->queuelock); /* * Can go on to queue if we have space in either: @@ -814,15 +814,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, ret = -EBUSY; if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { list_add_tail(&sec_req->backlog_head, &ctx->backlog); - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); goto out; } - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); goto err_free_elements; } ret = sec_send_request(sec_req, queue); - mutex_unlock(&queue->queuelock); + spin_unlock_bh(&queue->queuelock); if (ret) goto err_free_elements; @@ -881,7 +881,7 @@ static int sec_alg_skcipher_init(struct crypto_skcipher *tfm) if (IS_ERR(ctx->queue)) return PTR_ERR(ctx->queue); - mutex_init(&ctx->queue->queuelock); + spin_lock_init(&ctx->queue->queuelock); ctx->queue->havesoftqueue = false; return 0; diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h index 179a8250d691c..e2a50bf2234b9 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.h +++ b/drivers/crypto/hisilicon/sec/sec_drv.h @@ -347,7 +347,7 @@ struct sec_queue { DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN); DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *)); bool havesoftqueue; - struct mutex queuelock; + spinlock_t queuelock; void *shadow[SEC_QUEUE_LEN]; }; From 932be3e95b228d13cd1b8aaf5c0578c76e3f7dc6 Mon Sep 17 00:00:00 2001 From: Ofer Heifetz Date: Sat, 2 Jul 2022 10:14:26 +0300 Subject: [PATCH 52/89] crypto: inside-secure - fix packed bit-field result descriptor When mixing bit-field and none bit-filed in packed struct the none bit-field starts at a distinct memory location, thus adding an additional byte to the overall structure which is used in memory zero-ing and other configuration calculations. Fix this by removing the none bit-field that has a following bit-field. Signed-off-by: Ofer Heifetz Acked-by: Antoine Tenart Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index ce1e611a163e7..797ff91512e0d 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -497,15 +497,15 @@ struct result_data_desc { u32 packet_length:17; u32 error_code:15; - u8 bypass_length:4; - u8 e15:1; - u16 rsvd0; - u8 hash_bytes:1; - u8 hash_length:6; - u8 generic_bytes:1; - u8 checksum:1; - u8 next_header:1; - u8 length:1; + u32 bypass_length:4; + u32 e15:1; + u32 rsvd0:16; + u32 hash_bytes:1; + u32 hash_length:6; + u32 generic_bytes:1; + u32 checksum:1; + u32 next_header:1; + u32 length:1; u16 application_id; u16 rsvd1; From 79e6e2f3f3ff345947075341781e900e4f70db81 Mon Sep 17 00:00:00 2001 From: Ignat Korchagin Date: Mon, 4 Jul 2022 11:38:40 +0100 Subject: [PATCH 53/89] crypto: testmgr - populate RSA CRT parameters in RSA test vectors Changes from v1: * replace some accidental spaces with tabs In commit f145d411a67e ("crypto: rsa - implement Chinese Remainder Theorem for faster private key operations") we have started to use the additional primes and coefficients for RSA private key operations. However, these additional parameters are not present (defined as 0 integers) in the RSA test vectors. Some parameters were borrowed from OpenSSL, so I was able to find the source. I could not find the public source for 1 vector though, so had to recover the parameters by implementing Appendix C from [1]. [1]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Br1.pdf Fixes: f145d411a67e ("crypto: rsa - implement Chinese Remainder Theorem for faster private key operations") Reported-by: Tasmiya Nalatwad Signed-off-by: Ignat Korchagin Signed-off-by: Herbert Xu --- crypto/testmgr.h | 121 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 100 insertions(+), 21 deletions(-) diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 8e2dce86dd483..f1dffdace219e 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -185,7 +185,7 @@ static const struct akcipher_testvec rsa_tv_template[] = { { #ifndef CONFIG_CRYPTO_FIPS .key = - "\x30\x81\x9A" /* sequence of 154 bytes */ + "\x30\x82\x01\x38" /* sequence of 312 bytes */ "\x02\x01\x00" /* version - integer of 1 byte */ "\x02\x41" /* modulus - integer of 65 bytes */ "\x00\xAA\x36\xAB\xCE\x88\xAC\xFD\xFF\x55\x52\x3C\x7F\xC4\x52\x3F" @@ -199,23 +199,36 @@ static const struct akcipher_testvec rsa_tv_template[] = { "\xC2\xCD\x2D\xFF\x43\x40\x98\xCD\x20\xD8\xA1\x38\xD0\x90\xBF\x64" "\x79\x7C\x3F\xA7\xA2\xCD\xCB\x3C\xD1\xE0\xBD\xBA\x26\x54\xB4\xF9" "\xDF\x8E\x8A\xE5\x9D\x73\x3D\x9F\x33\xB3\x01\x62\x4A\xFD\x1D\x51" - "\x02\x01\x00" /* prime1 - integer of 1 byte */ - "\x02\x01\x00" /* prime2 - integer of 1 byte */ - "\x02\x01\x00" /* exponent1 - integer of 1 byte */ - "\x02\x01\x00" /* exponent2 - integer of 1 byte */ - "\x02\x01\x00", /* coefficient - integer of 1 byte */ + "\x02\x21" /* prime1 - integer of 33 bytes */ + "\x00\xD8\x40\xB4\x16\x66\xB4\x2E\x92\xEA\x0D\xA3\xB4\x32\x04\xB5" + "\xCF\xCE\x33\x52\x52\x4D\x04\x16\xA5\xA4\x41\xE7\x00\xAF\x46\x12" + "\x0D" + "\x02\x21" /* prime2 - integer of 33 bytes */ + "\x00\xC9\x7F\xB1\xF0\x27\xF4\x53\xF6\x34\x12\x33\xEA\xAA\xD1\xD9" + "\x35\x3F\x6C\x42\xD0\x88\x66\xB1\xD0\x5A\x0F\x20\x35\x02\x8B\x9D" + "\x89" + "\x02\x20" /* exponent1 - integer of 32 bytes */ + "\x59\x0B\x95\x72\xA2\xC2\xA9\xC4\x06\x05\x9D\xC2\xAB\x2F\x1D\xAF" + "\xEB\x7E\x8B\x4F\x10\xA7\x54\x9E\x8E\xED\xF5\xB4\xFC\xE0\x9E\x05" + "\x02\x21" /* exponent2 - integer of 33 bytes */ + "\x00\x8E\x3C\x05\x21\xFE\x15\xE0\xEA\x06\xA3\x6F\xF0\xF1\x0C\x99" + "\x52\xC3\x5B\x7A\x75\x14\xFD\x32\x38\xB8\x0A\xAD\x52\x98\x62\x8D" + "\x51" + "\x02\x20" /* coefficient - integer of 32 bytes */ + "\x36\x3F\xF7\x18\x9D\xA8\xE9\x0B\x1D\x34\x1F\x71\xD0\x9B\x76\xA8" + "\xA9\x43\xE1\x1D\x10\xB2\x4D\x24\x9F\x2D\xEA\xFE\xF8\x0C\x18\x26", .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", .c = "\x63\x1c\xcd\x7b\xe1\x7e\xe4\xde\xc9\xa8\x89\xa1\x74\xcb\x3c\x63" "\x7d\x24\xec\x83\xc3\x15\xe4\x7f\x73\x05\x34\xd1\xec\x22\xbb\x8a" "\x5e\x32\x39\x6d\xc1\x1d\x7d\x50\x3b\x9f\x7a\xad\xf0\x2e\x25\x53" "\x9f\x6e\xbd\x4c\x55\x84\x0c\x9b\xcf\x1a\x4b\x51\x1e\x9e\x0c\x06", - .key_len = 157, + .key_len = 316, .m_size = 8, .c_size = 64, }, { .key = - "\x30\x82\x01\x1D" /* sequence of 285 bytes */ + "\x30\x82\x02\x5B" /* sequence of 603 bytes */ "\x02\x01\x00" /* version - integer of 1 byte */ "\x02\x81\x81" /* modulus - integer of 129 bytes */ "\x00\xBB\xF8\x2F\x09\x06\x82\xCE\x9C\x23\x38\xAC\x2B\x9D\xA8\x71" @@ -238,12 +251,35 @@ static const struct akcipher_testvec rsa_tv_template[] = { "\x93\x99\x26\xED\x4F\x74\xA1\x3E\xDD\xFB\xE1\xA1\xCE\xCC\x48\x94" "\xAF\x94\x28\xC2\xB7\xB8\x88\x3F\xE4\x46\x3A\x4B\xC8\x5B\x1C\xB3" "\xC1" - "\x02\x01\x00" /* prime1 - integer of 1 byte */ - "\x02\x01\x00" /* prime2 - integer of 1 byte */ - "\x02\x01\x00" /* exponent1 - integer of 1 byte */ - "\x02\x01\x00" /* exponent2 - integer of 1 byte */ - "\x02\x01\x00", /* coefficient - integer of 1 byte */ - .key_len = 289, + "\x02\x41" /* prime1 - integer of 65 bytes */ + "\x00\xEE\xCF\xAE\x81\xB1\xB9\xB3\xC9\x08\x81\x0B\x10\xA1\xB5\x60" + "\x01\x99\xEB\x9F\x44\xAE\xF4\xFD\xA4\x93\xB8\x1A\x9E\x3D\x84\xF6" + "\x32\x12\x4E\xF0\x23\x6E\x5D\x1E\x3B\x7E\x28\xFA\xE7\xAA\x04\x0A" + "\x2D\x5B\x25\x21\x76\x45\x9D\x1F\x39\x75\x41\xBA\x2A\x58\xFB\x65" + "\x99" + "\x02\x41" /* prime2 - integer of 65 bytes */ + "\x00\xC9\x7F\xB1\xF0\x27\xF4\x53\xF6\x34\x12\x33\xEA\xAA\xD1\xD9" + "\x35\x3F\x6C\x42\xD0\x88\x66\xB1\xD0\x5A\x0F\x20\x35\x02\x8B\x9D" + "\x86\x98\x40\xB4\x16\x66\xB4\x2E\x92\xEA\x0D\xA3\xB4\x32\x04\xB5" + "\xCF\xCE\x33\x52\x52\x4D\x04\x16\xA5\xA4\x41\xE7\x00\xAF\x46\x15" + "\x03" + "\x02\x40" /* exponent1 - integer of 64 bytes */ + "\x54\x49\x4C\xA6\x3E\xBA\x03\x37\xE4\xE2\x40\x23\xFC\xD6\x9A\x5A" + "\xEB\x07\xDD\xDC\x01\x83\xA4\xD0\xAC\x9B\x54\xB0\x51\xF2\xB1\x3E" + "\xD9\x49\x09\x75\xEA\xB7\x74\x14\xFF\x59\xC1\xF7\x69\x2E\x9A\x2E" + "\x20\x2B\x38\xFC\x91\x0A\x47\x41\x74\xAD\xC9\x3C\x1F\x67\xC9\x81" + "\x02\x40" /* exponent2 - integer of 64 bytes */ + "\x47\x1E\x02\x90\xFF\x0A\xF0\x75\x03\x51\xB7\xF8\x78\x86\x4C\xA9" + "\x61\xAD\xBD\x3A\x8A\x7E\x99\x1C\x5C\x05\x56\xA9\x4C\x31\x46\xA7" + "\xF9\x80\x3F\x8F\x6F\x8A\xE3\x42\xE9\x31\xFD\x8A\xE4\x7A\x22\x0D" + "\x1B\x99\xA4\x95\x84\x98\x07\xFE\x39\xF9\x24\x5A\x98\x36\xDA\x3D" + "\x02\x41", /* coefficient - integer of 65 bytes */ + "\x00\xB0\x6C\x4F\xDA\xBB\x63\x01\x19\x8D\x26\x5B\xDB\xAE\x94\x23" + "\xB3\x80\xF2\x71\xF7\x34\x53\x88\x50\x93\x07\x7F\xCD\x39\xE2\x11" + "\x9F\xC9\x86\x32\x15\x4F\x58\x83\xB1\x67\xA9\x67\xBF\x40\x2B\x4E" + "\x9E\x2E\x0F\x96\x56\xE6\x98\xEA\x36\x66\xED\xFB\x25\x79\x80\x39" + "\xF7", + .key_len = 607, .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", .c = "\x74\x1b\x55\xac\x47\xb5\x08\x0a\x6e\x2b\x2d\xf7\x94\xb8\x8a\x95" @@ -259,7 +295,7 @@ static const struct akcipher_testvec rsa_tv_template[] = { }, { #endif .key = - "\x30\x82\x02\x20" /* sequence of 544 bytes */ + "\x30\x82\x04\xA3" /* sequence of 1187 bytes */ "\x02\x01\x00" /* version - integer of 1 byte */ "\x02\x82\x01\x01\x00" /* modulus - integer of 256 bytes */ "\xDB\x10\x1A\xC2\xA3\xF1\xDC\xFF\x13\x6B\xED\x44\xDF\xF0\x02\x6D" @@ -296,12 +332,55 @@ static const struct akcipher_testvec rsa_tv_template[] = { "\x62\xFF\xE9\x46\xB8\xD8\x44\xDB\xA5\xCC\x31\x54\x34\xCE\x3E\x82" "\xD6\xBF\x7A\x0B\x64\x21\x6D\x88\x7E\x5B\x45\x12\x1E\x63\x8D\x49" "\xA7\x1D\xD9\x1E\x06\xCD\xE8\xBA\x2C\x8C\x69\x32\xEA\xBE\x60\x71" - "\x02\x01\x00" /* prime1 - integer of 1 byte */ - "\x02\x01\x00" /* prime2 - integer of 1 byte */ - "\x02\x01\x00" /* exponent1 - integer of 1 byte */ - "\x02\x01\x00" /* exponent2 - integer of 1 byte */ - "\x02\x01\x00", /* coefficient - integer of 1 byte */ - .key_len = 548, + "\x02\x81\x81" /* prime1 - integer of 129 bytes */ + "\x00\xFA\xAC\xE1\x37\x5E\x32\x11\x34\xC6\x72\x58\x2D\x91\x06\x3E" + "\x77\xE7\x11\x21\xCD\x4A\xF8\xA4\x3F\x0F\xEF\x31\xE3\xF3\x55\xA0" + "\xB9\xAC\xB6\xCB\xBB\x41\xD0\x32\x81\x9A\x8F\x7A\x99\x30\x77\x6C" + "\x68\x27\xE2\x96\xB5\x72\xC9\xC3\xD4\x42\xAA\xAA\xCA\x95\x8F\xFF" + "\xC9\x9B\x52\x34\x30\x1D\xCF\xFE\xCF\x3C\x56\x68\x6E\xEF\xE7\x6C" + "\xD7\xFB\x99\xF5\x4A\xA5\x21\x1F\x2B\xEA\x93\xE8\x98\x26\xC4\x6E" + "\x42\x21\x5E\xA0\xA1\x2A\x58\x35\xBB\x10\xE7\xBA\x27\x0A\x3B\xB3" + "\xAF\xE2\x75\x36\x04\xAC\x56\xA0\xAB\x52\xDE\xCE\xDD\x2C\x28\x77" + "\x03" + "\x02\x81\x81" /* prime2 - integer of 129 bytes */ + "\x00\xDF\xB7\x52\xB6\xD7\xC0\xE2\x96\xE7\xC9\xFE\x5D\x71\x5A\xC4" + "\x40\x96\x2F\xE5\x87\xEA\xF3\xA5\x77\x11\x67\x3C\x8D\x56\x08\xA7" + "\xB5\x67\xFA\x37\xA8\xB8\xCF\x61\xE8\x63\xD8\x38\x06\x21\x2B\x92" + "\x09\xA6\x39\x3A\xEA\xA8\xB4\x45\x4B\x36\x10\x4C\xE4\x00\x66\x71" + "\x65\xF8\x0B\x94\x59\x4F\x8C\xFD\xD5\x34\xA2\xE7\x62\x84\x0A\xA7" + "\xBB\xDB\xD9\x8A\xCD\x05\xE1\xCC\x57\x7B\xF1\xF1\x1F\x11\x9D\xBA" + "\x3E\x45\x18\x99\x1B\x41\x64\x43\xEE\x97\x5D\x77\x13\x5B\x74\x69" + "\x73\x87\x95\x05\x07\xBE\x45\x07\x17\x7E\x4A\x69\x22\xF3\xDB\x05" + "\x39" + "\x02\x81\x80" /* exponent1 - integer of 128 bytes */ + "\x5E\xD8\xDC\xDA\x53\x44\xC4\x67\xE0\x92\x51\x34\xE4\x83\xA5\x4D" + "\x3E\xDB\xA7\x9B\x82\xBB\x73\x81\xFC\xE8\x77\x4B\x15\xBE\x17\x73" + "\x49\x9B\x5C\x98\xBC\xBD\x26\xEF\x0C\xE9\x2E\xED\x19\x7E\x86\x41" + "\x1E\x9E\x48\x81\xDD\x2D\xE4\x6F\xC2\xCD\xCA\x93\x9E\x65\x7E\xD5" + "\xEC\x73\xFD\x15\x1B\xA2\xA0\x7A\x0F\x0D\x6E\xB4\x53\x07\x90\x92" + "\x64\x3B\x8B\xA9\x33\xB3\xC5\x94\x9B\x4C\x5D\x9C\x7C\x46\xA4\xA5" + "\x56\xF4\xF3\xF8\x27\x0A\x7B\x42\x0D\x92\x70\x47\xE7\x42\x51\xA9" + "\xC2\x18\xB1\x58\xB1\x50\x91\xB8\x61\x41\xB6\xA9\xCE\xD4\x7C\xBB" + "\x02\x81\x80" /* exponent2 - integer of 128 bytes */ + "\x54\x09\x1F\x0F\x03\xD8\xB6\xC5\x0C\xE8\xB9\x9E\x0C\x38\x96\x43" + "\xD4\xA6\xC5\x47\xDB\x20\x0E\xE5\xBD\x29\xD4\x7B\x1A\xF8\x41\x57" + "\x49\x69\x9A\x82\xCC\x79\x4A\x43\xEB\x4D\x8B\x2D\xF2\x43\xD5\xA5" + "\xBE\x44\xFD\x36\xAC\x8C\x9B\x02\xF7\x9A\x03\xE8\x19\xA6\x61\xAE" + "\x76\x10\x93\x77\x41\x04\xAB\x4C\xED\x6A\xCC\x14\x1B\x99\x8D\x0C" + "\x6A\x37\x3B\x86\x6C\x51\x37\x5B\x1D\x79\xF2\xA3\x43\x10\xC6\xA7" + "\x21\x79\x6D\xF9\xE9\x04\x6A\xE8\x32\xFF\xAE\xFD\x1C\x7B\x8C\x29" + "\x13\xA3\x0C\xB2\xAD\xEC\x6C\x0F\x8D\x27\x12\x7B\x48\xB2\xDB\x31" + "\x02\x81\x81", /* coefficient - integer of 129 bytes */ + "\x00\x8D\x1B\x05\xCA\x24\x1F\x0C\x53\x19\x52\x74\x63\x21\xFA\x78" + "\x46\x79\xAF\x5C\xDE\x30\xA4\x6C\x20\x38\xE6\x97\x39\xB8\x7A\x70" + "\x0D\x8B\x6C\x6D\x13\x74\xD5\x1C\xDE\xA9\xF4\x60\x37\xFE\x68\x77" + "\x5E\x0B\x4E\x5E\x03\x31\x30\xDF\xD6\xAE\x85\xD0\x81\xBB\x61\xC7" + "\xB1\x04\x5A\xC4\x6D\x56\x1C\xD9\x64\xE7\x85\x7F\x88\x91\xC9\x60" + "\x28\x05\xE2\xC6\x24\x8F\xDD\x61\x64\xD8\x09\xDE\x7E\xD3\x4A\x61" + "\x1A\xD3\x73\x58\x4B\xD8\xA0\x54\x25\x48\x83\x6F\x82\x6C\xAF\x36" + "\x51\x2A\x5D\x14\x2F\x41\x25\x00\xDD\xF8\xF3\x95\xFE\x31\x25\x50" + "\x12", + .key_len = 1191, .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a", .c = "\xb2\x97\x76\xb4\xae\x3e\x38\x3c\x7e\x64\x1f\xcc\xa2\x7f\xf6\xbe" From e4e712bbbd6d73263d964d6cb390b373738b62ab Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 4 Jul 2022 09:42:48 +0000 Subject: [PATCH 54/89] crypto: aria - Implement ARIA symmetric cipher algorithm ARIA(RFC 5794) is a symmetric block cipher algorithm. This algorithm is being used widely in South Korea as a standard cipher algorithm. This code is written based on the ARIA implementation of OpenSSL. The OpenSSL code is based on the distributed source code[1] by KISA. ARIA has three key sizes and corresponding rounds. ARIA128: 12 rounds. ARIA192: 14 rounds. ARIA245: 16 rounds. [1] https://seed.kisa.or.kr/kisa/Board/19/detailView.do (Korean) Signed-off-by: Taehee Yoo Signed-off-by: Herbert Xu --- crypto/Kconfig | 15 ++ crypto/Makefile | 1 + crypto/aria.c | 288 ++++++++++++++++++++++++++ include/crypto/aria.h | 461 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 765 insertions(+) create mode 100644 crypto/aria.c create mode 100644 include/crypto/aria.h diff --git a/crypto/Kconfig b/crypto/Kconfig index 59489a300cd10..7d98a2b4ac9cd 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1508,6 +1508,21 @@ config CRYPTO_SEED See also: +config CRYPTO_ARIA + tristate "ARIA cipher algorithm" + select CRYPTO_ALGAPI + help + ARIA cipher algorithm (RFC5794). + + ARIA is a standard encryption algorithm of the Republic of Korea. + The ARIA specifies three key sizes and rounds. + 128-bit: 12 rounds. + 192-bit: 14 rounds. + 256-bit: 16 rounds. + + See also: + + config CRYPTO_SERPENT tristate "Serpent cipher algorithm" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index a4a84860fe43d..167c004dbf4f9 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -148,6 +148,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_SEED) += seed.o +obj-$(CONFIG_CRYPTO_ARIA) += aria.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o diff --git a/crypto/aria.c b/crypto/aria.c new file mode 100644 index 0000000000000..ac3dffac34bbc --- /dev/null +++ b/crypto/aria.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Cryptographic API. + * + * ARIA Cipher Algorithm. + * + * Documentation of ARIA can be found in RFC 5794. + * Copyright (c) 2022 Taehee Yoo + * + * Information for ARIA + * http://210.104.33.10/ARIA/index-e.html (English) + * http://seed.kisa.or.kr/ (Korean) + * + * Public domain version is distributed above. + */ + +#include + +static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, + unsigned int key_len) +{ + const __be32 *key = (const __be32 *)in_key; + u32 w0[4], w1[4], w2[4], w3[4]; + u32 reg0, reg1, reg2, reg3; + const u32 *ck; + int rkidx = 0; + + ck = &key_rc[(key_len - 16) / 8][0]; + + w0[0] = be32_to_cpu(key[0]); + w0[1] = be32_to_cpu(key[1]); + w0[2] = be32_to_cpu(key[2]); + w0[3] = be32_to_cpu(key[3]); + + reg0 = w0[0] ^ ck[0]; + reg1 = w0[1] ^ ck[1]; + reg2 = w0[2] ^ ck[2]; + reg3 = w0[3] ^ ck[3]; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + + if (key_len > 16) { + w1[0] = be32_to_cpu(key[4]); + w1[1] = be32_to_cpu(key[5]); + if (key_len > 24) { + w1[2] = be32_to_cpu(key[6]); + w1[3] = be32_to_cpu(key[7]); + } else { + w1[2] = 0; + w1[3] = 0; + } + } else { + w1[0] = 0; + w1[1] = 0; + w1[2] = 0; + w1[3] = 0; + } + + w1[0] ^= reg0; + w1[1] ^= reg1; + w1[2] ^= reg2; + w1[3] ^= reg3; + + reg0 = w1[0]; + reg1 = w1[1]; + reg2 = w1[2]; + reg3 = w1[3]; + + reg0 ^= ck[4]; + reg1 ^= ck[5]; + reg2 ^= ck[6]; + reg3 ^= ck[7]; + + aria_subst_diff_even(®0, ®1, ®2, ®3); + + reg0 ^= w0[0]; + reg1 ^= w0[1]; + reg2 ^= w0[2]; + reg3 ^= w0[3]; + + w2[0] = reg0; + w2[1] = reg1; + w2[2] = reg2; + w2[3] = reg3; + + reg0 ^= ck[8]; + reg1 ^= ck[9]; + reg2 ^= ck[10]; + reg3 ^= ck[11]; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + + w3[0] = reg0 ^ w1[0]; + w3[1] = reg1 ^ w1[1]; + w3[2] = reg2 ^ w1[2]; + w3[3] = reg3 ^ w1[3]; + + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 19); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 19); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 31); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 31); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 67); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 67); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 97); + if (key_len > 16) { + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w1, w2, 97); + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w2, w3, 97); + + if (key_len > 24) { + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w3, w0, 97); + + rkidx++; + aria_gsrk(ctx->enc_key[rkidx], w0, w1, 109); + } + } +} + +static void aria_set_decrypt_key(struct aria_ctx *ctx) +{ + int i; + + for (i = 0; i < 4; i++) { + ctx->dec_key[0][i] = ctx->enc_key[ctx->rounds][i]; + ctx->dec_key[ctx->rounds][i] = ctx->enc_key[0][i]; + } + + for (i = 1; i < ctx->rounds; i++) { + ctx->dec_key[i][0] = aria_m(ctx->enc_key[ctx->rounds - i][0]); + ctx->dec_key[i][1] = aria_m(ctx->enc_key[ctx->rounds - i][1]); + ctx->dec_key[i][2] = aria_m(ctx->enc_key[ctx->rounds - i][2]); + ctx->dec_key[i][3] = aria_m(ctx->enc_key[ctx->rounds - i][3]); + + aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + aria_diff_byte(&ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], + &ctx->dec_key[i][2], &ctx->dec_key[i][3]); + } +} + +static int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + if (key_len != 16 && key_len != 24 && key_len != 32) + return -EINVAL; + + ctx->key_length = key_len; + ctx->rounds = (key_len + 32) / 4; + + aria_set_encrypt_key(ctx, in_key, key_len); + aria_set_decrypt_key(ctx); + + return 0; +} + +static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in, + u32 key[][ARIA_RD_KEY_WORDS]) +{ + const __be32 *src = (const __be32 *)in; + __be32 *dst = (__be32 *)out; + u32 reg0, reg1, reg2, reg3; + int rounds, rkidx = 0; + + rounds = ctx->rounds; + + reg0 = be32_to_cpu(src[0]); + reg1 = be32_to_cpu(src[1]); + reg2 = be32_to_cpu(src[2]); + reg3 = be32_to_cpu(src[3]); + + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + while ((rounds -= 2) > 0) { + aria_subst_diff_even(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + + aria_subst_diff_odd(®0, ®1, ®2, ®3); + aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); + rkidx++; + } + + reg0 = key[rkidx][0] ^ make_u32((u8)(x1[get_u8(reg0, 0)]), + (u8)(x2[get_u8(reg0, 1)] >> 8), + (u8)(s1[get_u8(reg0, 2)]), + (u8)(s2[get_u8(reg0, 3)])); + reg1 = key[rkidx][1] ^ make_u32((u8)(x1[get_u8(reg1, 0)]), + (u8)(x2[get_u8(reg1, 1)] >> 8), + (u8)(s1[get_u8(reg1, 2)]), + (u8)(s2[get_u8(reg1, 3)])); + reg2 = key[rkidx][2] ^ make_u32((u8)(x1[get_u8(reg2, 0)]), + (u8)(x2[get_u8(reg2, 1)] >> 8), + (u8)(s1[get_u8(reg2, 2)]), + (u8)(s2[get_u8(reg2, 3)])); + reg3 = key[rkidx][3] ^ make_u32((u8)(x1[get_u8(reg3, 0)]), + (u8)(x2[get_u8(reg3, 1)] >> 8), + (u8)(s1[get_u8(reg3, 2)]), + (u8)(s2[get_u8(reg3, 3)])); + + dst[0] = cpu_to_be32(reg0); + dst[1] = cpu_to_be32(reg1); + dst[2] = cpu_to_be32(reg2); + dst[3] = cpu_to_be32(reg3); +} + +static void aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + __aria_crypt(ctx, out, in, ctx->enc_key); +} + +static void aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aria_ctx *ctx = crypto_tfm_ctx(tfm); + + __aria_crypt(ctx, out, in, ctx->dec_key); +} + +static struct crypto_alg aria_alg = { + .cra_name = "aria", + .cra_driver_name = "aria-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = ARIA_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aria_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = ARIA_MIN_KEY_SIZE, + .cia_max_keysize = ARIA_MAX_KEY_SIZE, + .cia_setkey = aria_set_key, + .cia_encrypt = aria_encrypt, + .cia_decrypt = aria_decrypt + } + } +}; + +static int __init aria_init(void) +{ + return crypto_register_alg(&aria_alg); +} + +static void __exit aria_fini(void) +{ + crypto_unregister_alg(&aria_alg); +} + +subsys_initcall(aria_init); +module_exit(aria_fini); + +MODULE_DESCRIPTION("ARIA Cipher Algorithm"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Taehee Yoo "); +MODULE_ALIAS_CRYPTO("aria"); diff --git a/include/crypto/aria.h b/include/crypto/aria.h new file mode 100644 index 0000000000000..4a86661788e86 --- /dev/null +++ b/include/crypto/aria.h @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Cryptographic API. + * + * ARIA Cipher Algorithm. + * + * Documentation of ARIA can be found in RFC 5794. + * Copyright (c) 2022 Taehee Yoo + * Copyright (c) 2022 Taehee Yoo + * + * Information for ARIA + * http://210.104.33.10/ARIA/index-e.html (English) + * http://seed.kisa.or.kr/ (Korean) + * + * Public domain version is distributed above. + */ + +#ifndef _CRYPTO_ARIA_H +#define _CRYPTO_ARIA_H + +#include +#include +#include +#include +#include +#include + +#define ARIA_MIN_KEY_SIZE 16 +#define ARIA_MAX_KEY_SIZE 32 +#define ARIA_BLOCK_SIZE 16 +#define ARIA_MAX_RD_KEYS 17 +#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32)) + +struct aria_ctx { + int key_length; + int rounds; + u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; + u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; +}; + +static const u32 key_rc[5][4] = { + { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 }, + { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 }, + { 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e }, + { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 }, + { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 } +}; + +static const u32 s1[256] = { + 0x00636363, 0x007c7c7c, 0x00777777, 0x007b7b7b, + 0x00f2f2f2, 0x006b6b6b, 0x006f6f6f, 0x00c5c5c5, + 0x00303030, 0x00010101, 0x00676767, 0x002b2b2b, + 0x00fefefe, 0x00d7d7d7, 0x00ababab, 0x00767676, + 0x00cacaca, 0x00828282, 0x00c9c9c9, 0x007d7d7d, + 0x00fafafa, 0x00595959, 0x00474747, 0x00f0f0f0, + 0x00adadad, 0x00d4d4d4, 0x00a2a2a2, 0x00afafaf, + 0x009c9c9c, 0x00a4a4a4, 0x00727272, 0x00c0c0c0, + 0x00b7b7b7, 0x00fdfdfd, 0x00939393, 0x00262626, + 0x00363636, 0x003f3f3f, 0x00f7f7f7, 0x00cccccc, + 0x00343434, 0x00a5a5a5, 0x00e5e5e5, 0x00f1f1f1, + 0x00717171, 0x00d8d8d8, 0x00313131, 0x00151515, + 0x00040404, 0x00c7c7c7, 0x00232323, 0x00c3c3c3, + 0x00181818, 0x00969696, 0x00050505, 0x009a9a9a, + 0x00070707, 0x00121212, 0x00808080, 0x00e2e2e2, + 0x00ebebeb, 0x00272727, 0x00b2b2b2, 0x00757575, + 0x00090909, 0x00838383, 0x002c2c2c, 0x001a1a1a, + 0x001b1b1b, 0x006e6e6e, 0x005a5a5a, 0x00a0a0a0, + 0x00525252, 0x003b3b3b, 0x00d6d6d6, 0x00b3b3b3, + 0x00292929, 0x00e3e3e3, 0x002f2f2f, 0x00848484, + 0x00535353, 0x00d1d1d1, 0x00000000, 0x00ededed, + 0x00202020, 0x00fcfcfc, 0x00b1b1b1, 0x005b5b5b, + 0x006a6a6a, 0x00cbcbcb, 0x00bebebe, 0x00393939, + 0x004a4a4a, 0x004c4c4c, 0x00585858, 0x00cfcfcf, + 0x00d0d0d0, 0x00efefef, 0x00aaaaaa, 0x00fbfbfb, + 0x00434343, 0x004d4d4d, 0x00333333, 0x00858585, + 0x00454545, 0x00f9f9f9, 0x00020202, 0x007f7f7f, + 0x00505050, 0x003c3c3c, 0x009f9f9f, 0x00a8a8a8, + 0x00515151, 0x00a3a3a3, 0x00404040, 0x008f8f8f, + 0x00929292, 0x009d9d9d, 0x00383838, 0x00f5f5f5, + 0x00bcbcbc, 0x00b6b6b6, 0x00dadada, 0x00212121, + 0x00101010, 0x00ffffff, 0x00f3f3f3, 0x00d2d2d2, + 0x00cdcdcd, 0x000c0c0c, 0x00131313, 0x00ececec, + 0x005f5f5f, 0x00979797, 0x00444444, 0x00171717, + 0x00c4c4c4, 0x00a7a7a7, 0x007e7e7e, 0x003d3d3d, + 0x00646464, 0x005d5d5d, 0x00191919, 0x00737373, + 0x00606060, 0x00818181, 0x004f4f4f, 0x00dcdcdc, + 0x00222222, 0x002a2a2a, 0x00909090, 0x00888888, + 0x00464646, 0x00eeeeee, 0x00b8b8b8, 0x00141414, + 0x00dedede, 0x005e5e5e, 0x000b0b0b, 0x00dbdbdb, + 0x00e0e0e0, 0x00323232, 0x003a3a3a, 0x000a0a0a, + 0x00494949, 0x00060606, 0x00242424, 0x005c5c5c, + 0x00c2c2c2, 0x00d3d3d3, 0x00acacac, 0x00626262, + 0x00919191, 0x00959595, 0x00e4e4e4, 0x00797979, + 0x00e7e7e7, 0x00c8c8c8, 0x00373737, 0x006d6d6d, + 0x008d8d8d, 0x00d5d5d5, 0x004e4e4e, 0x00a9a9a9, + 0x006c6c6c, 0x00565656, 0x00f4f4f4, 0x00eaeaea, + 0x00656565, 0x007a7a7a, 0x00aeaeae, 0x00080808, + 0x00bababa, 0x00787878, 0x00252525, 0x002e2e2e, + 0x001c1c1c, 0x00a6a6a6, 0x00b4b4b4, 0x00c6c6c6, + 0x00e8e8e8, 0x00dddddd, 0x00747474, 0x001f1f1f, + 0x004b4b4b, 0x00bdbdbd, 0x008b8b8b, 0x008a8a8a, + 0x00707070, 0x003e3e3e, 0x00b5b5b5, 0x00666666, + 0x00484848, 0x00030303, 0x00f6f6f6, 0x000e0e0e, + 0x00616161, 0x00353535, 0x00575757, 0x00b9b9b9, + 0x00868686, 0x00c1c1c1, 0x001d1d1d, 0x009e9e9e, + 0x00e1e1e1, 0x00f8f8f8, 0x00989898, 0x00111111, + 0x00696969, 0x00d9d9d9, 0x008e8e8e, 0x00949494, + 0x009b9b9b, 0x001e1e1e, 0x00878787, 0x00e9e9e9, + 0x00cecece, 0x00555555, 0x00282828, 0x00dfdfdf, + 0x008c8c8c, 0x00a1a1a1, 0x00898989, 0x000d0d0d, + 0x00bfbfbf, 0x00e6e6e6, 0x00424242, 0x00686868, + 0x00414141, 0x00999999, 0x002d2d2d, 0x000f0f0f, + 0x00b0b0b0, 0x00545454, 0x00bbbbbb, 0x00161616 +}; + +static const u32 s2[256] = { + 0xe200e2e2, 0x4e004e4e, 0x54005454, 0xfc00fcfc, + 0x94009494, 0xc200c2c2, 0x4a004a4a, 0xcc00cccc, + 0x62006262, 0x0d000d0d, 0x6a006a6a, 0x46004646, + 0x3c003c3c, 0x4d004d4d, 0x8b008b8b, 0xd100d1d1, + 0x5e005e5e, 0xfa00fafa, 0x64006464, 0xcb00cbcb, + 0xb400b4b4, 0x97009797, 0xbe00bebe, 0x2b002b2b, + 0xbc00bcbc, 0x77007777, 0x2e002e2e, 0x03000303, + 0xd300d3d3, 0x19001919, 0x59005959, 0xc100c1c1, + 0x1d001d1d, 0x06000606, 0x41004141, 0x6b006b6b, + 0x55005555, 0xf000f0f0, 0x99009999, 0x69006969, + 0xea00eaea, 0x9c009c9c, 0x18001818, 0xae00aeae, + 0x63006363, 0xdf00dfdf, 0xe700e7e7, 0xbb00bbbb, + 0x00000000, 0x73007373, 0x66006666, 0xfb00fbfb, + 0x96009696, 0x4c004c4c, 0x85008585, 0xe400e4e4, + 0x3a003a3a, 0x09000909, 0x45004545, 0xaa00aaaa, + 0x0f000f0f, 0xee00eeee, 0x10001010, 0xeb00ebeb, + 0x2d002d2d, 0x7f007f7f, 0xf400f4f4, 0x29002929, + 0xac00acac, 0xcf00cfcf, 0xad00adad, 0x91009191, + 0x8d008d8d, 0x78007878, 0xc800c8c8, 0x95009595, + 0xf900f9f9, 0x2f002f2f, 0xce00cece, 0xcd00cdcd, + 0x08000808, 0x7a007a7a, 0x88008888, 0x38003838, + 0x5c005c5c, 0x83008383, 0x2a002a2a, 0x28002828, + 0x47004747, 0xdb00dbdb, 0xb800b8b8, 0xc700c7c7, + 0x93009393, 0xa400a4a4, 0x12001212, 0x53005353, + 0xff00ffff, 0x87008787, 0x0e000e0e, 0x31003131, + 0x36003636, 0x21002121, 0x58005858, 0x48004848, + 0x01000101, 0x8e008e8e, 0x37003737, 0x74007474, + 0x32003232, 0xca00caca, 0xe900e9e9, 0xb100b1b1, + 0xb700b7b7, 0xab00abab, 0x0c000c0c, 0xd700d7d7, + 0xc400c4c4, 0x56005656, 0x42004242, 0x26002626, + 0x07000707, 0x98009898, 0x60006060, 0xd900d9d9, + 0xb600b6b6, 0xb900b9b9, 0x11001111, 0x40004040, + 0xec00ecec, 0x20002020, 0x8c008c8c, 0xbd00bdbd, + 0xa000a0a0, 0xc900c9c9, 0x84008484, 0x04000404, + 0x49004949, 0x23002323, 0xf100f1f1, 0x4f004f4f, + 0x50005050, 0x1f001f1f, 0x13001313, 0xdc00dcdc, + 0xd800d8d8, 0xc000c0c0, 0x9e009e9e, 0x57005757, + 0xe300e3e3, 0xc300c3c3, 0x7b007b7b, 0x65006565, + 0x3b003b3b, 0x02000202, 0x8f008f8f, 0x3e003e3e, + 0xe800e8e8, 0x25002525, 0x92009292, 0xe500e5e5, + 0x15001515, 0xdd00dddd, 0xfd00fdfd, 0x17001717, + 0xa900a9a9, 0xbf00bfbf, 0xd400d4d4, 0x9a009a9a, + 0x7e007e7e, 0xc500c5c5, 0x39003939, 0x67006767, + 0xfe00fefe, 0x76007676, 0x9d009d9d, 0x43004343, + 0xa700a7a7, 0xe100e1e1, 0xd000d0d0, 0xf500f5f5, + 0x68006868, 0xf200f2f2, 0x1b001b1b, 0x34003434, + 0x70007070, 0x05000505, 0xa300a3a3, 0x8a008a8a, + 0xd500d5d5, 0x79007979, 0x86008686, 0xa800a8a8, + 0x30003030, 0xc600c6c6, 0x51005151, 0x4b004b4b, + 0x1e001e1e, 0xa600a6a6, 0x27002727, 0xf600f6f6, + 0x35003535, 0xd200d2d2, 0x6e006e6e, 0x24002424, + 0x16001616, 0x82008282, 0x5f005f5f, 0xda00dada, + 0xe600e6e6, 0x75007575, 0xa200a2a2, 0xef00efef, + 0x2c002c2c, 0xb200b2b2, 0x1c001c1c, 0x9f009f9f, + 0x5d005d5d, 0x6f006f6f, 0x80008080, 0x0a000a0a, + 0x72007272, 0x44004444, 0x9b009b9b, 0x6c006c6c, + 0x90009090, 0x0b000b0b, 0x5b005b5b, 0x33003333, + 0x7d007d7d, 0x5a005a5a, 0x52005252, 0xf300f3f3, + 0x61006161, 0xa100a1a1, 0xf700f7f7, 0xb000b0b0, + 0xd600d6d6, 0x3f003f3f, 0x7c007c7c, 0x6d006d6d, + 0xed00eded, 0x14001414, 0xe000e0e0, 0xa500a5a5, + 0x3d003d3d, 0x22002222, 0xb300b3b3, 0xf800f8f8, + 0x89008989, 0xde00dede, 0x71007171, 0x1a001a1a, + 0xaf00afaf, 0xba00baba, 0xb500b5b5, 0x81008181 +}; + +static const u32 x1[256] = { + 0x52520052, 0x09090009, 0x6a6a006a, 0xd5d500d5, + 0x30300030, 0x36360036, 0xa5a500a5, 0x38380038, + 0xbfbf00bf, 0x40400040, 0xa3a300a3, 0x9e9e009e, + 0x81810081, 0xf3f300f3, 0xd7d700d7, 0xfbfb00fb, + 0x7c7c007c, 0xe3e300e3, 0x39390039, 0x82820082, + 0x9b9b009b, 0x2f2f002f, 0xffff00ff, 0x87870087, + 0x34340034, 0x8e8e008e, 0x43430043, 0x44440044, + 0xc4c400c4, 0xdede00de, 0xe9e900e9, 0xcbcb00cb, + 0x54540054, 0x7b7b007b, 0x94940094, 0x32320032, + 0xa6a600a6, 0xc2c200c2, 0x23230023, 0x3d3d003d, + 0xeeee00ee, 0x4c4c004c, 0x95950095, 0x0b0b000b, + 0x42420042, 0xfafa00fa, 0xc3c300c3, 0x4e4e004e, + 0x08080008, 0x2e2e002e, 0xa1a100a1, 0x66660066, + 0x28280028, 0xd9d900d9, 0x24240024, 0xb2b200b2, + 0x76760076, 0x5b5b005b, 0xa2a200a2, 0x49490049, + 0x6d6d006d, 0x8b8b008b, 0xd1d100d1, 0x25250025, + 0x72720072, 0xf8f800f8, 0xf6f600f6, 0x64640064, + 0x86860086, 0x68680068, 0x98980098, 0x16160016, + 0xd4d400d4, 0xa4a400a4, 0x5c5c005c, 0xcccc00cc, + 0x5d5d005d, 0x65650065, 0xb6b600b6, 0x92920092, + 0x6c6c006c, 0x70700070, 0x48480048, 0x50500050, + 0xfdfd00fd, 0xeded00ed, 0xb9b900b9, 0xdada00da, + 0x5e5e005e, 0x15150015, 0x46460046, 0x57570057, + 0xa7a700a7, 0x8d8d008d, 0x9d9d009d, 0x84840084, + 0x90900090, 0xd8d800d8, 0xabab00ab, 0x00000000, + 0x8c8c008c, 0xbcbc00bc, 0xd3d300d3, 0x0a0a000a, + 0xf7f700f7, 0xe4e400e4, 0x58580058, 0x05050005, + 0xb8b800b8, 0xb3b300b3, 0x45450045, 0x06060006, + 0xd0d000d0, 0x2c2c002c, 0x1e1e001e, 0x8f8f008f, + 0xcaca00ca, 0x3f3f003f, 0x0f0f000f, 0x02020002, + 0xc1c100c1, 0xafaf00af, 0xbdbd00bd, 0x03030003, + 0x01010001, 0x13130013, 0x8a8a008a, 0x6b6b006b, + 0x3a3a003a, 0x91910091, 0x11110011, 0x41410041, + 0x4f4f004f, 0x67670067, 0xdcdc00dc, 0xeaea00ea, + 0x97970097, 0xf2f200f2, 0xcfcf00cf, 0xcece00ce, + 0xf0f000f0, 0xb4b400b4, 0xe6e600e6, 0x73730073, + 0x96960096, 0xacac00ac, 0x74740074, 0x22220022, + 0xe7e700e7, 0xadad00ad, 0x35350035, 0x85850085, + 0xe2e200e2, 0xf9f900f9, 0x37370037, 0xe8e800e8, + 0x1c1c001c, 0x75750075, 0xdfdf00df, 0x6e6e006e, + 0x47470047, 0xf1f100f1, 0x1a1a001a, 0x71710071, + 0x1d1d001d, 0x29290029, 0xc5c500c5, 0x89890089, + 0x6f6f006f, 0xb7b700b7, 0x62620062, 0x0e0e000e, + 0xaaaa00aa, 0x18180018, 0xbebe00be, 0x1b1b001b, + 0xfcfc00fc, 0x56560056, 0x3e3e003e, 0x4b4b004b, + 0xc6c600c6, 0xd2d200d2, 0x79790079, 0x20200020, + 0x9a9a009a, 0xdbdb00db, 0xc0c000c0, 0xfefe00fe, + 0x78780078, 0xcdcd00cd, 0x5a5a005a, 0xf4f400f4, + 0x1f1f001f, 0xdddd00dd, 0xa8a800a8, 0x33330033, + 0x88880088, 0x07070007, 0xc7c700c7, 0x31310031, + 0xb1b100b1, 0x12120012, 0x10100010, 0x59590059, + 0x27270027, 0x80800080, 0xecec00ec, 0x5f5f005f, + 0x60600060, 0x51510051, 0x7f7f007f, 0xa9a900a9, + 0x19190019, 0xb5b500b5, 0x4a4a004a, 0x0d0d000d, + 0x2d2d002d, 0xe5e500e5, 0x7a7a007a, 0x9f9f009f, + 0x93930093, 0xc9c900c9, 0x9c9c009c, 0xefef00ef, + 0xa0a000a0, 0xe0e000e0, 0x3b3b003b, 0x4d4d004d, + 0xaeae00ae, 0x2a2a002a, 0xf5f500f5, 0xb0b000b0, + 0xc8c800c8, 0xebeb00eb, 0xbbbb00bb, 0x3c3c003c, + 0x83830083, 0x53530053, 0x99990099, 0x61610061, + 0x17170017, 0x2b2b002b, 0x04040004, 0x7e7e007e, + 0xbaba00ba, 0x77770077, 0xd6d600d6, 0x26260026, + 0xe1e100e1, 0x69690069, 0x14140014, 0x63630063, + 0x55550055, 0x21210021, 0x0c0c000c, 0x7d7d007d +}; + +static const u32 x2[256] = { + 0x30303000, 0x68686800, 0x99999900, 0x1b1b1b00, + 0x87878700, 0xb9b9b900, 0x21212100, 0x78787800, + 0x50505000, 0x39393900, 0xdbdbdb00, 0xe1e1e100, + 0x72727200, 0x09090900, 0x62626200, 0x3c3c3c00, + 0x3e3e3e00, 0x7e7e7e00, 0x5e5e5e00, 0x8e8e8e00, + 0xf1f1f100, 0xa0a0a000, 0xcccccc00, 0xa3a3a300, + 0x2a2a2a00, 0x1d1d1d00, 0xfbfbfb00, 0xb6b6b600, + 0xd6d6d600, 0x20202000, 0xc4c4c400, 0x8d8d8d00, + 0x81818100, 0x65656500, 0xf5f5f500, 0x89898900, + 0xcbcbcb00, 0x9d9d9d00, 0x77777700, 0xc6c6c600, + 0x57575700, 0x43434300, 0x56565600, 0x17171700, + 0xd4d4d400, 0x40404000, 0x1a1a1a00, 0x4d4d4d00, + 0xc0c0c000, 0x63636300, 0x6c6c6c00, 0xe3e3e300, + 0xb7b7b700, 0xc8c8c800, 0x64646400, 0x6a6a6a00, + 0x53535300, 0xaaaaaa00, 0x38383800, 0x98989800, + 0x0c0c0c00, 0xf4f4f400, 0x9b9b9b00, 0xededed00, + 0x7f7f7f00, 0x22222200, 0x76767600, 0xafafaf00, + 0xdddddd00, 0x3a3a3a00, 0x0b0b0b00, 0x58585800, + 0x67676700, 0x88888800, 0x06060600, 0xc3c3c300, + 0x35353500, 0x0d0d0d00, 0x01010100, 0x8b8b8b00, + 0x8c8c8c00, 0xc2c2c200, 0xe6e6e600, 0x5f5f5f00, + 0x02020200, 0x24242400, 0x75757500, 0x93939300, + 0x66666600, 0x1e1e1e00, 0xe5e5e500, 0xe2e2e200, + 0x54545400, 0xd8d8d800, 0x10101000, 0xcecece00, + 0x7a7a7a00, 0xe8e8e800, 0x08080800, 0x2c2c2c00, + 0x12121200, 0x97979700, 0x32323200, 0xababab00, + 0xb4b4b400, 0x27272700, 0x0a0a0a00, 0x23232300, + 0xdfdfdf00, 0xefefef00, 0xcacaca00, 0xd9d9d900, + 0xb8b8b800, 0xfafafa00, 0xdcdcdc00, 0x31313100, + 0x6b6b6b00, 0xd1d1d100, 0xadadad00, 0x19191900, + 0x49494900, 0xbdbdbd00, 0x51515100, 0x96969600, + 0xeeeeee00, 0xe4e4e400, 0xa8a8a800, 0x41414100, + 0xdadada00, 0xffffff00, 0xcdcdcd00, 0x55555500, + 0x86868600, 0x36363600, 0xbebebe00, 0x61616100, + 0x52525200, 0xf8f8f800, 0xbbbbbb00, 0x0e0e0e00, + 0x82828200, 0x48484800, 0x69696900, 0x9a9a9a00, + 0xe0e0e000, 0x47474700, 0x9e9e9e00, 0x5c5c5c00, + 0x04040400, 0x4b4b4b00, 0x34343400, 0x15151500, + 0x79797900, 0x26262600, 0xa7a7a700, 0xdedede00, + 0x29292900, 0xaeaeae00, 0x92929200, 0xd7d7d700, + 0x84848400, 0xe9e9e900, 0xd2d2d200, 0xbababa00, + 0x5d5d5d00, 0xf3f3f300, 0xc5c5c500, 0xb0b0b000, + 0xbfbfbf00, 0xa4a4a400, 0x3b3b3b00, 0x71717100, + 0x44444400, 0x46464600, 0x2b2b2b00, 0xfcfcfc00, + 0xebebeb00, 0x6f6f6f00, 0xd5d5d500, 0xf6f6f600, + 0x14141400, 0xfefefe00, 0x7c7c7c00, 0x70707000, + 0x5a5a5a00, 0x7d7d7d00, 0xfdfdfd00, 0x2f2f2f00, + 0x18181800, 0x83838300, 0x16161600, 0xa5a5a500, + 0x91919100, 0x1f1f1f00, 0x05050500, 0x95959500, + 0x74747400, 0xa9a9a900, 0xc1c1c100, 0x5b5b5b00, + 0x4a4a4a00, 0x85858500, 0x6d6d6d00, 0x13131300, + 0x07070700, 0x4f4f4f00, 0x4e4e4e00, 0x45454500, + 0xb2b2b200, 0x0f0f0f00, 0xc9c9c900, 0x1c1c1c00, + 0xa6a6a600, 0xbcbcbc00, 0xececec00, 0x73737300, + 0x90909000, 0x7b7b7b00, 0xcfcfcf00, 0x59595900, + 0x8f8f8f00, 0xa1a1a100, 0xf9f9f900, 0x2d2d2d00, + 0xf2f2f200, 0xb1b1b100, 0x00000000, 0x94949400, + 0x37373700, 0x9f9f9f00, 0xd0d0d000, 0x2e2e2e00, + 0x9c9c9c00, 0x6e6e6e00, 0x28282800, 0x3f3f3f00, + 0x80808000, 0xf0f0f000, 0x3d3d3d00, 0xd3d3d300, + 0x25252500, 0x8a8a8a00, 0xb5b5b500, 0xe7e7e700, + 0x42424200, 0xb3b3b300, 0xc7c7c700, 0xeaeaea00, + 0xf7f7f700, 0x4c4c4c00, 0x11111100, 0x33333300, + 0x03030300, 0xa2a2a200, 0xacacac00, 0x60606000 +}; + +static inline u32 rotl32(u32 v, u32 r) +{ + return ((v << r) | (v >> (32 - r))); +} + +static inline u32 rotr32(u32 v, u32 r) +{ + return ((v >> r) | (v << (32 - r))); +} + +static inline u32 bswap32(u32 v) +{ + return ((v << 24) ^ + (v >> 24) ^ + ((v & 0x0000ff00) << 8) ^ + ((v & 0x00ff0000) >> 8)); +} + +static inline u8 get_u8(u32 x, u32 y) +{ + return (x >> ((3 - y) * 8)); +} + +static inline u32 make_u32(u8 v0, u8 v1, u8 v2, u8 v3) +{ + return ((u32)v0 << 24) | ((u32)v1 << 16) | ((u32)v2 << 8) | ((u32)v3); +} + +static inline u32 aria_m(u32 t0) +{ + return rotr32(t0, 8) ^ rotr32(t0 ^ rotr32(t0, 8), 16); +} + +/* S-Box Layer 1 + M */ +static inline void aria_sbox_layer1_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 = s1[get_u8(*t0, 0)] ^ + s2[get_u8(*t0, 1)] ^ + x1[get_u8(*t0, 2)] ^ + x2[get_u8(*t0, 3)]; + *t1 = s1[get_u8(*t1, 0)] ^ + s2[get_u8(*t1, 1)] ^ + x1[get_u8(*t1, 2)] ^ + x2[get_u8(*t1, 3)]; + *t2 = s1[get_u8(*t2, 0)] ^ + s2[get_u8(*t2, 1)] ^ + x1[get_u8(*t2, 2)] ^ + x2[get_u8(*t2, 3)]; + *t3 = s1[get_u8(*t3, 0)] ^ + s2[get_u8(*t3, 1)] ^ + x1[get_u8(*t3, 2)] ^ + x2[get_u8(*t3, 3)]; +} + +/* S-Box Layer 2 + M */ +static inline void aria_sbox_layer2_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 = x1[get_u8(*t0, 0)] ^ + x2[get_u8(*t0, 1)] ^ + s1[get_u8(*t0, 2)] ^ + s2[get_u8(*t0, 3)]; + *t1 = x1[get_u8(*t1, 0)] ^ + x2[get_u8(*t1, 1)] ^ + s1[get_u8(*t1, 2)] ^ + s2[get_u8(*t1, 3)]; + *t2 = x1[get_u8(*t2, 0)] ^ + x2[get_u8(*t2, 1)] ^ + s1[get_u8(*t2, 2)] ^ + s2[get_u8(*t2, 3)]; + *t3 = x1[get_u8(*t3, 0)] ^ + x2[get_u8(*t3, 1)] ^ + s1[get_u8(*t3, 2)] ^ + s2[get_u8(*t3, 3)]; +} + +/* Word-level diffusion */ +static inline void aria_diff_word(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + *t1 ^= *t2; + *t2 ^= *t3; + *t0 ^= *t1; + + *t3 ^= *t1; + *t2 ^= *t0; + *t1 ^= *t2; +} + +/* Byte-level diffusion */ +static inline void aria_diff_byte(u32 *t1, u32 *t2, u32 *t3) +{ + *t1 = ((*t1 << 8) & 0xff00ff00) ^ ((*t1 >> 8) & 0x00ff00ff); + *t2 = rotr32(*t2, 16); + *t3 = bswap32(*t3); +} + +/* Key XOR Layer */ +static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2, + u32 *t3) +{ + *t0 ^= rk[0]; + *t1 ^= rk[1]; + *t2 ^= rk[2]; + *t3 ^= rk[3]; +} +/* Odd round Substitution & Diffusion */ +static inline void aria_subst_diff_odd(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + aria_sbox_layer1_with_pre_diff(t0, t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); + aria_diff_byte(t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); +} + +/* Even round Substitution & Diffusion */ +static inline void aria_subst_diff_even(u32 *t0, u32 *t1, u32 *t2, u32 *t3) +{ + aria_sbox_layer2_with_pre_diff(t0, t1, t2, t3); + aria_diff_word(t0, t1, t2, t3); + aria_diff_byte(t3, t0, t1); + aria_diff_word(t0, t1, t2, t3); +} + +/* Q, R Macro expanded ARIA GSRK */ +static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n) +{ + int q = 4 - (n / 32); + int r = n % 32; + + rk[0] = (x[0]) ^ + ((y[q % 4]) >> r) ^ + ((y[(q + 3) % 4]) << (32 - r)); + rk[1] = (x[1]) ^ + ((y[(q + 1) % 4]) >> r) ^ + ((y[q % 4]) << (32 - r)); + rk[2] = (x[2]) ^ + ((y[(q + 2) % 4]) >> r) ^ + ((y[(q + 1) % 4]) << (32 - r)); + rk[3] = (x[3]) ^ + ((y[(q + 3) % 4]) >> r) ^ + ((y[(q + 2) % 4]) << (32 - r)); +} + +#endif From 01ce31de7043e17b0d7d47f5e038f067db618113 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 4 Jul 2022 09:42:49 +0000 Subject: [PATCH 55/89] crypto: testmgr - add ARIA testmgr tests It contains ARIA ecb(aria), cbc(aria), cfb(aria), ctr(aria), and gcm(aria). ecb testvector is from RFC standard. cbc, cfb, and ctr testvectors are from KISA[1], who developed ARIA algorithm. gcm(aria) is from openssl test vector. [1] https://seed.kisa.or.kr/kisa/kcmvp/EgovVerification.do (Korean) Signed-off-by: Taehee Yoo Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 38 +- crypto/testmgr.c | 31 + crypto/testmgr.h | 2860 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 2928 insertions(+), 1 deletion(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a8831060c4cee..f56d1a9cf0a78 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -71,7 +71,7 @@ static const char *check[] = { "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "xeta", "fcrypt", - "camellia", "seed", "rmd160", + "camellia", "seed", "rmd160", "aria", "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", "streebog256", "streebog512", NULL @@ -1730,6 +1730,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("polyval"); break; + case 58: + ret += tcrypt_test("gcm(aria)"); + break; + case 100: ret += tcrypt_test("hmac(md5)"); break; @@ -1866,6 +1870,12 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("cfb(sm4)"); ret += tcrypt_test("ctr(sm4)"); break; + case 192: + ret += tcrypt_test("ecb(aria)"); + ret += tcrypt_test("cbc(aria)"); + ret += tcrypt_test("cfb(aria)"); + ret += tcrypt_test("ctr(aria)"); + break; case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, speed_template_16_24_32); @@ -2192,6 +2202,32 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) 0, speed_template_32); break; + case 227: + test_cipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cbc(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cbc(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cfb(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("cfb(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0, + speed_template_16_24_32); + test_cipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0, + speed_template_16_24_32); + break; + + case 228: + test_aead_speed("gcm(aria)", ENCRYPT, sec, + NULL, 0, 16, 8, speed_template_16_24_32); + test_aead_speed("gcm(aria)", DECRYPT, sec, + NULL, 0, 16, 8, speed_template_16_24_32); + break; + case 300: if (alg) { test_hash_speed(alg, sec, generic_hash_speed_template); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 7a8a567499603..5349ffee6bbd4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4388,6 +4388,12 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(anubis_cbc_tv_template) }, + }, { + .alg = "cbc(aria)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aria_cbc_tv_template) + }, }, { .alg = "cbc(blowfish)", .test = alg_test_skcipher, @@ -4505,6 +4511,12 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(aes_cfb_tv_template) }, + }, { + .alg = "cfb(aria)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aria_cfb_tv_template) + }, }, { .alg = "cfb(sm4)", .test = alg_test_skcipher, @@ -4574,6 +4586,12 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(aes_ctr_tv_template) } + }, { + .alg = "ctr(aria)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aria_ctr_tv_template) + } }, { .alg = "ctr(blowfish)", .test = alg_test_skcipher, @@ -4834,6 +4852,12 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(arc4_tv_template) } + }, { + .alg = "ecb(aria)", + .test = alg_test_skcipher, + .suite = { + .cipher = __VECS(aria_tv_template) + } }, { .alg = "ecb(blowfish)", .test = alg_test_skcipher, @@ -5050,6 +5074,13 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .aead = __VECS(aes_gcm_tv_template) } + }, { + .alg = "gcm(aria)", + .generic_driver = "gcm_base(ctr(aria-generic),ghash-generic)", + .test = alg_test_aead, + .suite = { + .aead = __VECS(aria_gcm_tv_template) + } }, { .alg = "gcm(sm4)", .generic_driver = "gcm_base(ctr(sm4-generic),ghash-generic)", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index f1dffdace219e..dee88510f58d5 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -26536,6 +26536,2866 @@ static const struct cipher_testvec seed_tv_template[] = { } }; +/* + * ARIA test vectors + */ +static const struct cipher_testvec aria_tv_template[] = { + { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .klen = 16, + .ptext = "\x00\x11\x22\x33\x44\x55\x66\x77" + "\x88\x99\xaa\xbb\xcc\xdd\xee\xff", + .ctext = "\xd7\x18\xfb\xd6\xab\x64\x4c\x73" + "\x9d\xa9\x5f\x3b\xe6\x45\x17\x78", + .len = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17", + .klen = 24, + .ptext = "\x00\x11\x22\x33\x44\x55\x66\x77" + "\x88\x99\xaa\xbb\xcc\xdd\xee\xff", + .ctext = "\x26\x44\x9c\x18\x05\xdb\xe7\xaa" + "\x25\xa4\x68\xce\x26\x3a\x9e\x79", + .len = 16, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + .klen = 32, + .ptext = "\x00\x11\x22\x33\x44\x55\x66\x77" + "\x88\x99\xaa\xbb\xcc\xdd\xee\xff", + .ctext = "\xf9\x2b\xd7\xc7\x9f\xb7\x2e\x2f" + "\x2b\x8f\x80\xc1\x97\x2d\x24\xfc", + .len = 16, + } +}; + +static const struct cipher_testvec aria_cbc_tv_template[] = { + { + .key = "\x7c\x95\x0d\x07\xe6\x14\x98\x92" + "\x07\xac\x22\x41\x4d\x23\x27\x37", + .klen = 16, + .iv = "\x9d\xd5\x62\xce\x3d\x07\xd9\x89" + "\xf2\x78\x19\x4b\x65\x39\xc3\xc6", + .ptext = "\xcb\xbf\x47\x35\xc5\x37\xf0\x4e" + "\x85\x19\x21\x72\x33\x00\xde\x28", + .ctext = "\xf4\x80\x89\x89\x4a\x37\xda\x98" + "\x80\x52\x74\x75\xd9\xef\x58\xff", + .len = 16, + }, { + .key = "\x8f\xb9\x8d\xc9\xd7\x99\xfe\x7d" + "\xeb\x14\xaa\x65\xaf\x8c\x38\x1a", + .klen = 16, + .iv = "\xb1\x67\x46\x57\x0c\x64\x65\xf2" + "\x8c\x2f\x65\x11\x12\x33\xd4\x9a", + .ptext = "\x3a\xaf\xc1\xeb\x3c\x0c\xc5\xcc" + "\x10\x6e\x45\xa1\xd6\x89\xf1\xe5" + "\x74\xb6\x90\xd3\x81\x45\x00\x66" + "\x62\x15\x78\x84\xb2\x63\x11\x76", + .ctext = "\x3d\x7d\x3a\xeb\x23\x85\x3e\x72" + "\x12\x45\xbb\x5b\x42\x99\xec\xa0" + "\xa2\xbe\x75\xd6\xb1\xd8\xea\x6f" + "\x97\xfe\xfd\xcc\xfc\x08\x38\x00", + .len = 32, + }, { + .key = "\xe8\xe0\x85\x9c\x33\x06\x36\x5f" + "\xa9\xab\x72\x66\xa1\xd7\xf5\x0d", + .klen = 16, + .iv = "\x5d\xd3\xaf\x13\xed\x82\xc8\x92" + "\x4f\xf4\xe2\x35\xdb\x39\x9e\xa5", + .ptext = "\xdf\x73\x61\x44\x86\x2f\x58\x1e" + "\xfe\xf6\xb9\x1d\xd9\x1e\x4c\x7c" + "\xb4\xe6\x2b\x7d\x17\xc3\xc6\x5f" + "\x9d\xf4\x29\x8a\x55\x5c\x82\x0e" + "\x67\x91\xdd\x4b\xfb\x31\x33\xf1" + "\x56\x75\xa3\x2c\x46\x08\xff\x18", + .ctext = "\x85\x07\x8c\x88\x70\x7b\x39\xb8" + "\xfd\x1d\xa1\xd0\x89\x5f\x3f\x85" + "\x18\x5a\xde\x64\xbd\x54\xd5\x67" + "\xd1\x27\x4c\x98\x82\x76\xea\x22" + "\x52\x98\x79\xb4\x1d\xe8\x16\xd0" + "\xc6\xea\xf7\xbb\x38\x89\xf2\x5d", + .len = 48, + }, { + .key = "\xc1\x19\x8a\x7b\xc9\xaf\x00\xb3" + "\x92\x3c\xd7\xed\xe7\x76\xc5\x98", + .klen = 16, + .iv = "\xca\x62\x82\x1a\x5b\xb1\xcf\xc1" + "\xfb\x50\xb7\xfc\xb0\x3b\x15\xcb", + .ptext = "\xcb\x92\x56\x74\xc9\xee\x80\x78" + "\x78\xf5\x73\xc5\x5b\x2c\x70\x2d" + "\x4e\x0d\xd7\x17\x6d\x5a\x35\x74" + "\x33\xb0\x7d\xf5\xdf\x5f\x96\x7b" + "\x1c\x79\x16\xd0\xe0\x29\x4e\x94" + "\x95\x46\x86\x7a\x77\x28\x89\xb4" + "\x3d\xbb\x65\xab\xfb\xd1\x6c\xf4" + "\x47\xbd\x7e\x7f\x9b\x1d\x8b\x12", + .ctext = "\x69\xd2\x56\xdf\xa8\x1a\x97\xbd" + "\x69\xb5\xbb\x6b\x29\x1d\x5f\x0f" + "\xdf\x5f\x63\xc0\x83\x0b\xd7\xb1" + "\x31\x2d\xbf\x73\xe1\xe5\x5d\x0e" + "\x0c\x8d\xc4\x8a\xa9\xbd\x5f\xc7" + "\xb5\x61\xa0\x2b\x90\x64\x1a\xde" + "\xd2\xe1\x61\xb9\xce\xf4\x0b\x1c" + "\x9c\x43\x69\x6d\xb2\x32\x98\x44", + .len = 64, + }, { + .key = "\xfa\xf7\x53\xf6\xd6\x08\x70\xf1" + "\x32\x58\x97\x74\x04\x12\x1b\x14", + .klen = 16, + .iv = "\xdd\x93\xb2\x3e\xcb\xc1\x7c\x27" + "\x7f\x9e\x41\x03\xab\x1d\xfb\x77", + .ptext = "\xae\x34\x94\x50\x73\x32\xf0\x75" + "\x96\x53\x2e\x1a\xc9\x91\x2b\x37" + "\x77\xbe\x48\x39\xa7\xd0\x6e\xf7" + "\x22\x7c\x4f\xe7\xd8\x06\xee\x92" + "\x80\x57\x61\x45\x7f\x50\xd5\x0a" + "\x0b\x5e\xd4\xd6\x90\x4e\xc3\x04" + "\x52\x63\xaf\x02\x55\xa6\x49\x4b" + "\x7a\x7e\x2e\x95\xea\x80\x6c\x4b" + "\xb7\x88\x42\x3d\xc1\x09\x28\x97" + "\xd7\xa1\x0f\x0f\x1f\xf1\xea\x63", + .ctext = "\x6b\x83\x00\xf1\x79\xb2\x23\xbf" + "\x17\x26\x8a\xef\xd3\xe1\x0e\x82" + "\x5b\xc7\xde\x3e\x39\x72\x2d\xb0" + "\xad\x25\x3b\xe6\x3b\x9f\xe9\x4b" + "\x6e\xe8\x77\xf5\x9d\x7d\x00\xae" + "\x73\x7b\x81\xff\xe3\x55\x8e\x90" + "\xdf\xe4\xcd\xd5\xdc\x16\x8b\x7a" + "\xe5\x04\x92\x18\xff\xcc\x63\x1b" + "\x53\xf3\x26\x44\x5c\x48\x1d\xa2" + "\x1f\x3f\xe0\x8b\x8f\x6f\xc2\x38", + .len = 80, + }, { + .key = "\xb8\xab\x6d\x03\x9d\xec\x15\x0a" + "\xcd\xcd\x68\x73\xa9\x35\x7e\x8a", + .klen = 16, + .iv = "\x9d\xf1\xc0\xa0\x02\x06\xf0\x03" + "\x43\x45\x6a\x2e\x3f\x21\xa9\x3c", + .ptext = "\xef\xbe\x0c\xa3\x49\x4a\xda\x1e" + "\x64\x90\x85\xeb\xdc\xca\x2b\x37" + "\x78\xb7\x62\xd7\x0a\xee\x35\x38" + "\x97\x72\x6a\x99\xb8\x86\x07\x77" + "\x40\xc3\x14\x49\x1f\x67\xa1\x6e" + "\x87\xf0\x0b\x64\x4d\xea\x7c\x3a" + "\x91\x05\xb1\x48\xa1\x6a\x00\x1d" + "\x1b\x4f\x99\xb9\x52\xc9\x0c\xfd" + "\xf3\xe2\x0b\x5f\xe9\xec\x71\xe2" + "\x7d\x15\x84\x46\xc2\x3b\x77\x7b" + "\x30\x01\x34\x5c\x8f\x22\x58\x9a" + "\x17\x05\x7e\xf6\xd5\x92\xc0\xb4", + .ctext = "\x79\x50\x9b\x34\xd7\x22\x9a\x72" + "\x61\xd7\xd8\xa9\xdb\xcf\x2f\xb0" + "\x81\x11\xe3\xed\xa0\xe4\xbd\x8d" + "\xe6\xf2\x52\x52\x40\xec\x9f\x3b" + "\xd4\x48\xc6\xdf\xfd\x36\x90\x8a" + "\x2f\x3b\xb0\xfb\xf4\x2b\x99\xa5" + "\xb2\x39\xc7\x52\x57\x2b\xbc\xd7" + "\x3f\x06\x10\x15\x2e\xf7\xaa\x79" + "\xd6\x6a\xe5\x4e\x2d\x0f\x5f\xaf" + "\xf9\x5a\x63\x28\x33\xf0\x85\x8a" + "\x06\x45\xce\x73\xaa\x96\x1d\xcc" + "\x6e\xb9\x25\xb8\x4c\xfe\xeb\x64", + .len = 96, + }, { + .key = "\x50\x45\x7b\x4c\x6d\x80\x53\x62" + "\x90\x26\x77\xf8\x04\x65\x26\xe3", + .klen = 16, + .iv = "\x9d\xd3\x73\x7b\x9b\xbd\x45\x97" + "\xd2\xbb\xa1\xb9\x08\x88\x2c\x85", + .ptext = "\x9f\x11\xeb\x78\x74\xcc\x4e\xd6" + "\x06\x4b\x6d\xe4\xdb\x11\x91\x58" + "\x1f\xa4\xf6\x0e\x8f\xe4\xcf\xfc" + "\x95\x9a\x8b\x68\xb4\x54\x57\x58" + "\x27\x71\xe4\x4b\xc5\x78\x6a\x26" + "\x28\xae\xed\x71\x0e\xe7\xbf\xc3" + "\xff\x9c\x46\x7b\x31\x3e\xff\xb1" + "\xa8\xca\xc3\x6d\xa1\x9e\x49\x16" + "\x31\x8b\xed\x2d\x2a\x2b\xaf\x3b" + "\x3e\x74\x7f\x07\x67\x8e\xb8\x0d" + "\x86\xe2\xea\x2c\x4a\x74\xdc\x9f" + "\x53\x72\xd1\x2e\x97\x0d\x0b\xa5" + "\x05\x87\x8e\x86\x69\x8d\x26\xfb" + "\x90\xc8\xab\x0e\xac\xaf\x84\x1c", + .ctext = "\x3c\x91\xab\x71\xe4\x77\x3e\xb0" + "\x7f\x20\x2e\xd0\xe1\xbe\xfd\x3c" + "\x06\x6c\x36\x75\x46\x27\xfd\x2d" + "\xba\x0f\xf0\x3c\x6d\x1e\x4b\x20" + "\xe9\x5e\x30\xd8\x03\xc6\xa0\x86" + "\xa8\xc7\xa4\x7f\x0e\x1f\x35\x55" + "\x24\x53\x02\xd5\x77\x30\x73\xdc" + "\xa5\xaf\x19\x92\x5b\x36\x86\x0e" + "\xcf\xf2\x5c\x00\xde\x92\xbf\x89" + "\x76\x46\xd5\x26\xb1\x8d\xa4\xef" + "\x61\x7e\x78\xb4\x68\xf5\x5b\x1d" + "\x39\x65\x32\x3a\xad\xff\x8b\x37" + "\x60\xc2\x8a\xaf\x48\x96\x8b\x9f" + "\x12\x6c\x70\x77\x95\xf3\x58\xb0", + .len = 112, + }, { + .key = "\xf9\x9f\x6a\x87\xa1\x2d\x6e\xac" + "\xde\xbb\x3e\x15\x5e\x49\xa4\xef", + .klen = 16, + .iv = "\xeb\x8e\x4f\xbe\x4b\x47\xd6\x4f" + "\x65\xd0\xfa\xee\xa6\xf1\x2c\xda", + .ptext = "\xa3\xfa\x4f\xf6\x00\x12\xbe\xc1" + "\x90\xcc\x91\x88\xbd\xfb\x1c\xdb" + "\x2b\xc8\xb9\x3d\x98\x01\xc8\x1f" + "\x07\xb4\xf3\x10\x1d\xfd\xb7\x2e" + "\xcb\x1c\x1f\xe0\x2d\xca\xd3\xc7" + "\xb2\xce\x52\xf1\x7e\xcb\x7c\x50" + "\x0c\x5c\x53\x6b\x18\x62\x02\x54" + "\xbc\x9d\x1f\xda\xd9\x7a\x2d\xff" + "\xb8\x2c\x65\xad\xf1\xfe\xb6\xa4" + "\x8c\xe8\x0a\xb7\x67\x60\xcb\x38" + "\xd7\x72\xa5\xb1\x92\x13\x8e\xd4" + "\xcd\xb3\x04\xb5\xa1\x11\x96\x37" + "\xb3\x53\xa6\xc4\x14\x56\x6d\x42" + "\x66\x43\x40\x42\x41\x63\x11\x7a" + "\xd5\x34\x38\x75\xd0\xbc\x74\x89" + "\x82\x1d\x2c\x0a\x3e\x6a\xfb\xbd", + .ctext = "\x09\x58\xf3\x22\xe5\x10\xf6\x3d" + "\xba\xb1\xfa\x5a\x16\xfe\xc5\x32" + "\x3d\x34\x59\x2e\x81\xde\x99\x2f" + "\xeb\x6a\x97\x86\x1f\x47\x8d\xe6" + "\x87\x79\x0e\xfe\xa4\xca\x09\xdc" + "\x24\x9b\xbb\xb1\x90\x33\xce\xd7" + "\x62\xfd\xfd\xa3\x65\x50\x07\x7c" + "\x4c\xa2\x10\xc7\x32\x0a\x0d\x5e" + "\x22\x29\x40\x71\xe5\xcc\x3a\x5b" + "\x5b\x53\x51\xa5\x5b\xc1\x76\x05" + "\x84\x6e\xe3\x58\x2b\xf2\x28\x76" + "\x5c\x66\x90\xfe\x63\x30\x1c\x45" + "\x26\x34\x80\xfe\x76\x87\x5b\xb1" + "\x63\x10\x09\xf6\x9d\x35\xcb\xee" + "\x3c\x60\x9d\x77\x5b\x36\x70\x09" + "\x4b\x63\x63\x90\x97\x3a\x6c\x8a", + .len = 128, + }, { + .key = "\x04\xb9\x6c\x8f\x5e\x79\x02\x87" + "\x88\x06\x7c\xfa\xd3\x7b\x56\xfe", + .klen = 16, + .iv = "\x4b\xc8\x93\x20\x98\x04\xba\x5a" + "\x22\x04\x1f\x3f\x79\x2c\x63\x79", + .ptext = "\xf3\x85\x3e\x75\x97\x10\x7c\x5d" + "\x39\x5a\x46\x47\xe7\x51\xa3\xac" + "\x84\x56\x3f\x1b\xb3\x93\x6a\x2e" + "\xf7\x8f\x63\xbe\x18\xff\xd7\x53" + "\xc8\xe0\xa5\xde\x86\xc2\xe4\xab" + "\xc3\x67\x27\x91\x43\x8c\xff\x6c" + "\xc7\x07\xc2\xcd\xe9\x12\x8b\xef" + "\x47\xe7\x82\xed\xe3\x8d\x5e\x33" + "\xca\xf1\x28\x32\xf4\x38\x41\x59" + "\x6c\x54\xa6\x40\xb0\xd5\x73\x26" + "\x5b\x02\xa6\x9d\x01\x29\x26\x84" + "\x5b\x33\x04\x36\xa4\x7b\x00\x01" + "\x42\xe1\x4f\xda\xa9\x1a\x9b\x4e" + "\x7d\x4a\x4c\xbc\xf6\xd4\x06\xc2" + "\x89\x70\x72\xf5\xc5\x7f\x42\xd5" + "\x7b\x9c\x6f\x00\x21\x74\xc5\xa5" + "\x78\xd7\xa2\x3c\x6d\x0f\xfb\x74" + "\x3d\x70\x9f\x6d\xdd\x30\xc0\x28", + .ctext = "\xc0\x49\x98\xb9\xf6\x58\xeb\x56" + "\x36\x76\x7a\x40\x7c\x27\x80\x62" + "\xe3\xcb\x9c\x87\x2c\x03\xc2\x0c" + "\x82\x00\x50\xd2\xe4\x61\x4d\x54" + "\x88\x10\x6f\x0a\xb4\x25\x57\xba" + "\xf0\x07\xe3\x55\x06\xb3\x72\xe9" + "\x2f\x9f\x1e\x50\xa8\x15\x69\x71" + "\xe3\xe5\x50\x32\xe5\xe0\x47\x0f" + "\x3a\xaa\x7d\xc0\x09\x0e\xdb\x1a" + "\xae\xb6\xa5\x87\x63\xd6\xbe\x8b" + "\xb2\x3d\x10\x1e\xb3\x68\xcf\x8a" + "\xe5\xa8\x89\xa9\xfe\x79\x13\x77" + "\xc4\x3f\x6f\x9f\xdd\x76\x5b\xf2" + "\x05\x67\x8a\x58\xb4\x31\xac\x64" + "\x6f\xc4\xc1\x6b\x08\x79\x3f\xe5" + "\x1c\x9a\x66\x3f\x7d\x1f\x18\xb1" + "\x07\xa5\x7b\x4f\x2c\x43\x33\x84" + "\xab\x1b\xc0\x7d\x49\x2f\x27\x9b", + .len = 144, + }, { + .key = "\x99\x79\xaf\x3c\xfb\xbd\xe7\xca" + "\xee\x4a\x4d\xb2\x23\x1e\xb6\x07", + .klen = 16, + .iv = "\xb4\xfc\xaa\xc1\x08\xbf\x68\xb2" + "\xf6\xef\x29\xbc\x2d\x92\xa9\x40", + .ptext = "\xd3\x44\xe4\xd9\x6c\x8a\x1d\x4b" + "\xfe\x64\x25\xb6\x72\x21\xda\x10" + "\x3e\x77\xee\xd1\x41\xd3\xea\xf0" + "\xee\xee\x72\x0f\xad\xa1\xca\xf3" + "\x7e\xfa\x99\x36\xe0\x8f\xed\x40" + "\xf1\x12\x80\x73\xd6\x26\x3a\xa6" + "\x5d\x71\xf6\xd5\xe1\xf3\x89\x16" + "\x6f\x96\x00\xcf\x26\x06\x2a\x27" + "\xe4\xc2\x57\xba\x1f\x74\x5e\x91" + "\x10\x7e\xe5\x51\x17\xd5\xdc\xb2" + "\x5b\x12\x4b\x33\xb1\xc6\x4e\x0d" + "\xbf\x0e\x5d\x65\x61\x68\xd1\xc5" + "\x4b\xc5\xa4\xcd\xf0\xe0\x79\x26" + "\xa3\xcd\xdc\xb8\xfc\xd5\xca\x1d" + "\x7e\x81\x74\x55\x76\xf5\x40\xbb" + "\x26\x7f\x11\x37\x23\x70\xc8\xb6" + "\xfc\x2b\x0b\xd7\x1c\x7b\x45\xe7" + "\xf2\x2a\xed\x10\x4f\xcf\x0c\xcd" + "\x0f\xe7\xf9\xa1\xfb\x27\x67\x09" + "\xee\x11\xa2\xaf\x37\xc6\x16\xe0", + .ctext = "\x60\xce\x9a\xdb\xb2\xe8\xa2\x64" + "\x35\x9c\x5b\x97\x21\x9b\x95\x89" + "\x7b\x89\x15\x01\x97\x8b\xec\x9b" + "\xb9\xce\x7d\xb9\x9d\xcc\xd0\xa0" + "\xda\x39\x5d\xfd\xb9\x51\xe7\x2f" + "\xe7\x9b\x73\x1b\x07\xfb\xfd\xbb" + "\xce\x84\x68\x76\x12\xc9\x6c\x38" + "\xc0\xdc\x67\x96\x5e\x63\xcf\xe5" + "\x57\x84\x7a\x14\x8c\xab\x38\x94" + "\x1c\x27\xc3\xe0\x03\x58\xfe\x98" + "\x97\xfc\x96\xba\x65\x87\x1e\x44" + "\xf8\x00\x91\x6a\x14\x05\xf3\xf9" + "\x8e\x3e\x7a\x3c\x41\x96\x15\x4f" + "\xa8\xc0\x73\x1f\x1b\xeb\xaf\xec" + "\xc4\x5a\x35\xed\x42\x2f\x47\xea" + "\xfd\x2f\x29\xf6\x0f\x58\x8b\x3d" + "\x15\x81\xe3\xa4\xa6\x5f\x33\x33" + "\xe9\x0d\x06\x4f\x7f\x89\x2c\x3d" + "\x18\x45\x1f\xd1\xc5\x74\xf7\x52" + "\x2f\x9b\x72\x3d\x1f\xad\x12\x1b", + .len = 160, + }, { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1" + "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0", + .klen = 24, + .iv = "\xfd\xab\x56\xa6\x6e\xda\x7c\x57" + "\x36\x36\x89\x09\xcd\xa8\xd3\x91", + .ptext = "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0" + "\x51\xe3\x8c\xe9\x76\xcd\xff\x37", + .ctext = "\x2d\x8f\x39\x71\x0a\x2c\xc9\x93" + "\xb6\x1a\x5c\x53\x06\x4d\xaa\xcf", + .len = 16, + }, { + .key = "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe" + "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22", + .klen = 24, + .iv = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e", + .ptext = "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1" + "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d", + .ctext = "\xc1\x53\x86\xf8\x60\x5d\x72\x59" + "\x7e\xdf\xc8\xdb\x85\xd6\x9f\x2a" + "\xa1\xda\xe5\x85\x78\x4f\x1b\x6f" + "\x58\xf3\x2b\xff\x34\xe4\x97\x4e", + .len = 32, + }, { + .key = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4" + "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0", + .klen = 24, + .iv = "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2", + .ptext = "\x39\x56\x34\x63\x2c\xc5\x51\x13" + "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e" + "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17" + "\xd9\xa0\x57\x80\xc8\x96\x70\x86", + .ctext = "\x25\x5f\x66\x15\xb5\x62\xfb\x55" + "\xb3\x77\xa1\x7d\x03\xba\x86\x0a" + "\x0d\x5b\xbb\x06\xe9\xe2\xa8\x41" + "\xa3\x58\xd6\x4b\xcb\x7f\xd0\x15" + "\x3b\x02\x74\x5d\x4c\x4c\xb0\xa5" + "\x06\xc9\x59\x53\x2a\x36\xeb\x59", + .len = 48, + }, { + .key = "\x07\x2c\xf4\x61\x79\x09\x01\x8f" + "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57", + .klen = 24, + .iv = "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40", + .ptext = "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c" + "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa" + "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac" + "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44", + .ctext = "\x91\x02\xa9\xd3\x4b\x9a\x8f\xe6" + "\x9f\xe4\x51\x57\xc9\x42\xda\x68" + "\xca\xf6\x54\x51\x90\xec\x20\x2e" + "\xab\x25\x6c\xd9\x8b\x99\xa6\x1c" + "\x72\xc9\x01\xd6\xbc\x2b\x26\x78" + "\x42\x00\x84\x0a\xdd\xa8\xd9\xb5" + "\xc6\xc8\x30\xb6\xab\xea\x71\x84" + "\xb2\x57\x97\x32\xdb\x35\x23\xd8", + .len = 64, + }, { + .key = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d", + .klen = 24, + .iv = "\xb2\x92\x83\x70\x1e\xa3\x97\xa6" + "\x65\x53\x39\xeb\x53\x8f\xb1\x38", + .ptext = "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8" + "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8" + "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3" + "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a" + "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41", + .ctext = "\x28\x23\x3a\x4a\x18\xb7\xb6\x05" + "\xd4\x1b\x6a\x9e\xa7\xf2\x38\x01" + "\x78\xd3\xb0\x1b\x95\x68\x59\xf1" + "\xc0\xed\x30\x46\x2e\xb9\xa6\xdc" + "\xde\xef\xa6\x85\x19\xfc\x4d\x36" + "\x5d\x24\x92\x62\x75\x32\x76\x6d" + "\x6d\xa9\x07\xe1\x4f\x59\x84\x1a" + "\x68\x9a\x07\x48\xd3\x86\xf6\xf1" + "\x5b\xf9\x35\xec\x7c\xaf\x47\x13" + "\x9c\xc9\x33\x12\x10\x2f\x94\x8a", + .len = 80, + }, { + .key = "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50", + .klen = 24, + .iv = "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2", + .ptext = "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95" + "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda" + "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1" + "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad" + "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e", + .ctext = "\x38\x5b\x16\xef\xb8\x8c\x74\x7a" + "\x55\x17\x71\xa7\x7d\x34\xd7\x6a" + "\xc6\x31\x55\x6f\xbb\x61\xf4\x12" + "\x81\x8c\x91\x0d\x10\xdb\xd5\x22" + "\x77\x36\x32\xb6\x77\xb1\x5e\x21" + "\xb5\xec\xf9\x64\x04\x90\x6f\xc6" + "\x8a\x86\x23\xb5\xfe\xa4\xb6\x84" + "\x91\xa1\x60\xe3\xd7\xf3\xb9\xda" + "\x96\x23\x4a\xb3\xab\x75\x84\x04" + "\x15\x1a\xbb\xe8\x02\x1e\x80\x7c" + "\xc1\x93\x01\x0f\x5c\x4a\xde\x85" + "\xbb\x93\x05\x66\x53\x74\x40\x56", + .len = 96, + }, { + .key = "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb", + .klen = 24, + .iv = "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17", + .ptext = "\x58\x2b\x1d\x73\x9a\x9c\x63\x18" + "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0" + "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95" + "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15" + "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12" + "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41", + .ctext = "\x4b\x56\xe0\xc2\x65\x2f\x7c\x6f" + "\xee\x22\xeb\x34\x1c\xa5\xb7\xc8" + "\x35\xd7\x51\xfd\x6a\xf4\xdd\xc3" + "\x38\xf4\xfc\x9d\x2e\xc2\x77\xb7" + "\x93\x8e\x8c\xb3\x44\x9b\xaf\xbb" + "\x99\xb9\xa8\x38\x1c\xfe\x63\xfb" + "\x1f\xa0\xaa\x35\x29\x7b\x87\x49" + "\x8e\x93\xa5\xb8\x5a\x85\x37\xa7" + "\x67\x69\x49\xbd\xc3\xfa\x89\x1c" + "\xf5\x60\x9b\xe7\x71\x96\x95\xd9" + "\x0b\x98\xe6\x74\x1d\xa3\xd9\x89" + "\x03\xe4\xf6\x66\xb3\x73\xb1\xac" + "\x9f\xee\x8f\xc2\x96\xcc\x97\x78" + "\x1b\x96\x63\x64\x00\x9c\x2d\x29", + .len = 112, + }, { + .key = "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3", + .klen = 24, + .iv = "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe", + .ptext = "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c" + "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2" + "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c" + "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c" + "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33" + "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a", + .ctext = "\x4d\x35\x70\xf1\x25\x02\x1d\x7f" + "\x9e\x0f\x5b\x4b\x65\xab\xcc\x6b" + "\x62\xab\x2b\xfa\xc0\x66\xee\x56" + "\xb4\x66\x95\x22\x84\x39\xd8\x3f" + "\x74\xba\x4f\x3f\xcd\xef\xcf\xf6" + "\x76\xeb\x9e\x8a\xec\x9c\x31\xa0" + "\x3e\x0c\xf9\xfa\x57\x90\xb4\x02" + "\xac\xc8\x28\xda\xa0\x05\xb7\x7e" + "\x75\x9c\x79\x36\xa9\x2f\x1a\x36" + "\x56\x77\xda\x74\xc7\xb3\xdf\xf3" + "\xb9\x83\x10\xf3\x6b\xe1\xdf\xcb" + "\x11\x70\xb1\xa0\x68\x48\x26\x95" + "\x10\x91\x94\xf3\xe9\x82\xb4\x8a" + "\xaa\xde\xf8\x9f\xce\x82\x47\x18" + "\x37\x5d\xda\x34\x74\x4d\x36\xbd" + "\xa5\x6c\xa4\xb3\x70\xad\x00\xbd", + .len = 128, + }, { + .key = "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8", + .klen = 24, + .iv = "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e", + .ptext = "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a" + "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62" + "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46" + "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b" + "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12" + "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c", + .ctext = "\xa1\x4a\x83\xb2\xe0\xef\x3d\x94" + "\xa4\x34\x66\x93\xb4\x89\x4e\x12" + "\xe5\x61\xc9\xea\xe0\x16\x96\x1a" + "\x3e\x94\x20\x81\xd4\x12\x7f\xf4" + "\xb8\x3f\xc9\xe2\x99\xb5\x0f\x9e" + "\x71\x86\x4f\x13\x78\x4e\xf1\x51" + "\xd4\x7d\x6e\x47\x31\x9a\xd8\xf7" + "\xb9\xb1\x17\xd0\xbd\xbf\x72\x86" + "\xb4\x58\x85\xf0\x05\x67\xc4\x00" + "\xca\xcb\xa7\x1a\x1d\x88\x29\xf4" + "\xe2\xf6\xdd\x5a\x3e\x5a\xbb\x29" + "\x48\x5a\x4a\x18\xcd\x5c\xf1\x09" + "\x5b\xbe\x1a\x43\x12\xc5\x6e\x6e" + "\x5e\x6d\x3b\x22\xf7\x58\xbd\xc8" + "\xb1\x04\xaf\x44\x9c\x2b\x98\x5a" + "\x14\xb7\x35\xb8\x9a\xce\x32\x28" + "\x1f\x8d\x08\x8a\xb9\x82\xf0\xa5" + "\x6a\x37\x29\xb6\x29\x3a\x53\x5e", + .len = 144, + }, { + .key = "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57", + .klen = 24, + .iv = "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97", + .ptext = "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01" + "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53" + "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66" + "\xe0\xb1\xb3\xbd\x1c\x76\x36\xfd" + "\x45\x87\xa4\x14\x1b\xef\xe7\x16" + "\xf7\xfa\x30\x3d\xb9\x52\x8f\x2e" + "\x01\x68\xc1\x7d\xa2\x15\x49\x74" + "\x53\x82\xc2\x10\xa8\x45\x73\x4d" + "\x41\xcc\x24\xa3\x42\xff\x30\xd1" + "\x02\x21\xdc\xd9\x08\xf7\xe7\x4c" + "\x33\x2d\x62\xc7\x38\xf5\xc2\xbe" + "\x52\xf1\x34\x78\x34\x53\x30\x5b" + "\x43\x43\x51\x6a\x02\x81\x64\x0c", + .ctext = "\xd9\xed\xc8\xc7\x66\xcd\x06\xc5" + "\xc1\x25\x9b\xf5\x14\x71\x1d\x69" + "\xc9\x7c\x04\x40\xab\xc0\x44\xf4" + "\xa1\xe6\x57\x8b\x35\x62\x4e\x3f" + "\xce\x4a\x99\xcd\x95\xc4\xd1\xf3" + "\xbc\x25\xa2\x18\xe6\xd1\xf7\xc0" + "\x13\x98\x60\x4c\x5c\xb1\x4f\x7a" + "\xbc\x45\x12\x52\xe8\x71\xb0\xf1" + "\x18\xef\x6f\x8a\x63\x35\x17\xae" + "\x90\x31\x41\x9d\xf4\xdc\x35\xcc" + "\x49\x72\x10\x11\x3b\xe3\x40\x7a" + "\x8e\x21\x39\xd0\x5b\x82\xb1\xe9" + "\x0c\x37\x5a\x7c\x11\xcb\x96\xd9" + "\xd4\x1c\x47\x4b\x70\xcb\xca\x08" + "\x5f\x71\xe9\x48\xf6\x29\xd8\xbb" + "\x5c\xad\x9b\x23\x9f\x62\xaf\xef" + "\x8e\xd8\x99\x1d\x60\xad\xc3\x6f" + "\xed\x06\x1a\xec\xfa\xc0\x0f\x0d" + "\xb7\x00\x02\x45\x7c\x94\x23\xb6" + "\xd7\x26\x6a\x16\x62\xc4\xd9\xee", + .len = 160, + }, { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1" + "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0" + "\xfd\xab\x56\xa6\x6e\xda\x7c\x57", + .klen = 32, + .iv = "\x36\x36\x89\x09\xcd\xa8\xd3\x91" + "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0", + .ptext = "\x51\xe3\x8c\xe9\x76\xcd\xff\x37" + "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe", + .ctext = "\x05\x31\x46\x6d\xb8\xf4\x92\x64" + "\x46\xfd\x0d\x96\x60\x01\xd7\x94", + .len = 16, + }, { + .key = "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22" + "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e", + .klen = 32, + .iv = "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1", + .ptext = "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d" + "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4", + .ctext = "\x24\x36\xe4\x14\xb7\xe1\x56\x8a" + "\xf3\xc5\xaf\x0e\xa7\xeb\xbd\xcd" + "\x2d\xe9\xd7\x19\xae\x24\x5d\x3b" + "\x1d\xfb\xdc\x21\xb3\x1a\x37\x0b", + .len = 32, + }, { + .key = "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0" + "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2" + "\x39\x56\x34\x63\x2c\xc5\x51\x13", + .klen = 32, + .iv = "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e", + .ptext = "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17" + "\xd9\xa0\x57\x80\xc8\x96\x70\x86" + "\x07\x2c\xf4\x61\x79\x09\x01\x8f" + "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57", + .ctext = "\x2e\x73\x60\xec\xd3\x95\x78\xe8" + "\x0f\x98\x1a\xc2\x92\x49\x0b\x49" + "\x71\x42\xf4\xb0\xaa\x8b\xf8\x53" + "\x16\xab\x6d\x74\xc0\xda\xab\xcd" + "\x85\x52\x11\x20\x2c\x59\x16\x00" + "\x26\x47\x4a\xea\x08\x5f\x38\x68", + .len = 48, + }, { + .key = "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40" + "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c", + .klen = 32, + .iv = "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa", + .ptext = "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac" + "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44" + "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d" + "\xb2\x92\x83\x70\x1e\xa3\x97\xa6", + .ctext = "\xfb\xd3\xc3\x8b\xf7\x89\xcc\x31" + "\xb1\x7f\xc3\x91\xdc\x04\xc6\xd7" + "\x33\xbd\xe0\xee\x0c\xd5\x70\xed" + "\x1b\x1d\xad\x49\x6f\x5c\xa1\x68" + "\xd7\x03\xc9\x65\xa7\x90\x30\x2b" + "\x26\xeb\xf4\x7a\xac\xcc\x03\xe1" + "\x6a\xe5\xdb\x23\x10\x8a\xcd\x70" + "\x39\x4d\x7a\xc9\xcd\x62\xd1\x65", + .len = 64, + }, { + .key = "\x65\x53\x39\xeb\x53\x8f\xb1\x38" + "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8", + .klen = 32, + .iv = "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8", + .ptext = "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3" + "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a" + "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41" + "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50" + "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2", + .ctext = "\xa2\x51\x28\xc2\x5e\x58\x1c\xaf" + "\x84\x92\x1c\xe1\x92\xf0\xf9\x9e" + "\xf2\xb3\xc6\x2b\x34\xd2\x8d\xa0" + "\xb3\xd7\x87\x56\xeb\xd9\x32\x6a" + "\xca\x90\x28\x26\x49\x34\xca\x41" + "\xce\xc5\x9e\xd6\xfe\x57\x71\x3c" + "\x98\xaf\xdd\xfc\x7d\xdf\x26\x7e" + "\xb7\x9c\xd5\x15\xe5\x81\x7a\x4f" + "\x4f\x4f\xe5\x77\xf2\x2e\x67\x68" + "\x52\xc1\xac\x28\x2c\x88\xf4\x38", + .len = 80, + }, { + .key = "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95", + .klen = 32, + .iv = "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda", + .ptext = "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1" + "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad" + "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e" + "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb" + "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17" + "\x58\x2b\x1d\x73\x9a\x9c\x63\x18", + .ctext = "\xd1\xce\xbe\xe0\x4a\x6e\x6d\x7f" + "\x89\x19\x28\xb1\xca\xe8\xc1\x9c" + "\x8c\x0b\x7d\x63\xfe\xff\x3d\xf4" + "\x65\x9e\xd6\xe7\x2f\x5a\xc1\x31" + "\x1e\xe7\x59\x27\x54\x92\xcc\xaa" + "\x5b\x3d\xeb\xe7\x96\xc1\x49\x54" + "\x18\xf3\x14\xaa\x56\x03\x28\x53" + "\xaa\x0a\x91\xdf\x92\x96\x9b\x06" + "\x1a\x24\x02\x09\xe7\xa6\xdc\x75" + "\xeb\x00\x1d\xf5\xf2\xa7\x4a\x9d" + "\x75\x80\xb7\x47\x63\xfc\xad\x18" + "\x85\x5f\xfc\x64\x03\x72\x38\xe7", + .len = 96, + }, { + .key = "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0", + .klen = 32, + .iv = "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95", + .ptext = "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15" + "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12" + "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41" + "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3" + "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe" + "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c", + .ctext = "\x0b\x07\xdc\x6a\x47\x45\xd2\xb0" + "\xa3\xf2\x42\x2f\xa4\x79\x6b\x4c" + "\x53\x9c\x8a\x2f\x48\x9c\xf2\x89" + "\x73\x8b\xdd\x97\xde\x41\x06\xc8" + "\x8a\x30\x7a\xa9\x90\x4a\x43\xd0" + "\xd5\xee\x16\x51\x44\xda\xe4\xb8" + "\xe8\x5f\x6f\xef\x84\xf3\x44\x43" + "\xbd\xdc\xc3\xdf\x65\x2b\xaf\xf6" + "\xfe\xd0\x4a\x5b\x30\x47\x8c\xaf" + "\x8d\xed\x2d\x91\xa1\x03\x9a\x80" + "\x58\xdd\xaa\x8f\x3b\x6b\x39\x10" + "\xe5\x92\xbc\xac\xaa\x25\xa1\x13" + "\x7e\xaa\x03\x83\x05\x83\x11\xfe" + "\x19\x5f\x04\x01\x48\x00\x3b\x58", + .len = 112, + }, { + .key = "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2", + .klen = 32, + .iv = "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c", + .ptext = "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c" + "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33" + "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a" + "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8" + "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e" + "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a", + .ctext = "\xfe\xba\x8f\x68\x47\x55\xaa\x61" + "\x48\xdd\xf3\x7c\xc4\xdc\xa6\x93" + "\x4e\x72\x3f\xc7\xd0\x2b\x9b\xac" + "\xc1\xb5\x95\xf8\x8e\x75\x62\x0c" + "\x05\x6a\x90\x76\x35\xed\x73\xf2" + "\x0f\x44\x3d\xaf\xd4\x00\xeb\x1d" + "\xad\x27\xf2\x2f\x55\x65\x91\x0f" + "\xe4\x04\x9c\xfb\x8a\x18\x22\x8e" + "\x21\xbe\x93\x09\xdd\x3e\x93\x34" + "\x60\x82\xcd\xff\x42\x10\xed\x43" + "\x3a\x4b\xb8\x5c\x6c\xa8\x9e\x1c" + "\x95\x6a\x17\xa7\xa3\xe0\x7d\xdb" + "\x6e\xca\xaf\xc1\x1f\xb2\x86\x15" + "\xf0\xc1\x55\x72\xf2\x74\x44\xeb" + "\x09\x09\x83\x8b\x2c\xc9\x63\x13" + "\x99\xe3\xe1\x4b\x5c\xf7\xb1\x04", + .len = 128, + }, { + .key = "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62", + .klen = 32, + .iv = "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46", + .ptext = "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b" + "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12" + "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c" + "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57" + "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97" + "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01", + .ctext = "\xa5\x19\x33\xad\x2d\x1a\x7b\x34" + "\xb0\x21\x68\x0e\x20\x11\x7a\x37" + "\xef\x35\x33\x64\x31\x0a\x42\x77" + "\x2c\x7f\x1a\x34\xd6\x93\x2d\xe9" + "\x26\xb9\x15\xec\x4f\x83\xbd\x48" + "\x5b\xe9\x63\xea\x10\x3b\xec\xfb" + "\xb0\x5e\x81\x90\xf0\x07\x43\xc4" + "\xda\x54\x69\x98\x13\x5d\x93\x16" + "\xca\x06\x81\x64\x36\xbe\x36\xa2" + "\xd4\xd8\x48\x63\xc7\x53\x39\x93" + "\x6d\x6b\xd6\x49\x00\x72\x5e\x02" + "\xc7\x88\x61\x0f\x10\x88\xd4\x9e" + "\x17\x81\xa4\xdc\x43\x4e\x83\x43" + "\xd4\xc3\xd7\x25\x9a\xd4\x76\xde" + "\x88\xe3\x98\x5a\x0e\x80\x23\xfb" + "\x49\xb3\x83\xf6\xb9\x16\x00\x06" + "\xa5\x06\x24\x17\x65\xbb\x68\xa9" + "\x56\x6d\xeb\xcd\x3c\x14\xd2\x64", + .len = 144, + }, { + .key = "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53", + .klen = 32, + .iv = "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66", + .ptext = "\xe0\xb1\xb3\xbd\x1c\x76\x36\xfd" + "\x45\x87\xa4\x14\x1b\xef\xe7\x16" + "\xf7\xfa\x30\x3d\xb9\x52\x8f\x2e" + "\x01\x68\xc1\x7d\xa2\x15\x49\x74" + "\x53\x82\xc2\x10\xa8\x45\x73\x4d" + "\x41\xcc\x24\xa3\x42\xff\x30\xd1" + "\x02\x21\xdc\xd9\x08\xf7\xe7\x4c" + "\x33\x2d\x62\xc7\x38\xf5\xc2\xbe" + "\x52\xf1\x34\x78\x34\x53\x30\x5b" + "\x43\x43\x51\x6a\x02\x81\x64\x0c" + "\xcd\x4b\xbf\x0f\xcb\x81\xd4\xec" + "\x1e\x07\x05\x4d\x5c\x6b\xba\xcc" + "\x43\xc7\xb1\xfe\xa8\xe9\x96\xb0" + "\xb1\xb2\xd4\x70\x44\xbc\xaa\x50" + "\xbf\x3f\x81\xe6\xea\x36\x7d\x97" + "\x2a\xbd\x52\x16\xf7\xbe\x59\x27" + "\x8f\xcc\xe3\xa9\xec\x4f\xcd\xd3" + "\xf4\xe2\x54\xbe\xf1\xf9\x2b\x23" + "\x40\xc7\xcb\x67\x4d\x5f\x0b\xd4" + "\xbf\x19\xf0\x2a\xef\x37\xc6\x56", + .ctext = "\x0a\x69\xd8\x67\x33\x2a\x2f\xa9" + "\x26\x79\x65\xd6\x75\x1e\x98\xe8" + "\x52\x56\x32\xbf\x67\x71\xf4\x01" + "\xb1\x6f\xef\xf9\xc9\xad\xb3\x49" + "\x7a\x4f\x24\x9a\xae\x06\x62\x26" + "\x3e\xe4\xa7\x6f\x5a\xbf\xe9\x52" + "\x13\x01\x74\x8b\x6e\xb1\x65\x24" + "\xaa\x8d\xbb\x54\x21\x20\x60\xa4" + "\xb7\xa5\xf9\x4e\x7b\xf5\x0b\x70" + "\xd2\xb9\xdc\x9b\xdb\x2c\xb2\x43" + "\xf7\x71\x30\xa5\x13\x6f\x16\x75" + "\xd0\xdf\x72\xae\xe4\xed\xc1\xa3" + "\x81\xe0\xd5\xc0\x0e\x62\xe8\xe5" + "\x86\x2c\x37\xde\xf8\xb0\x21\xe4" + "\xcd\xa6\x76\x9b\xa1\x56\xd3\x67" + "\x70\x69\xd6\x5d\xc7\x65\x19\x59" + "\x43\x9c\xca\x32\xe9\xd1\x48\x92" + "\x71\x79\x87\x73\x24\xcb\xc0\x0f" + "\x23\x3b\x8f\x51\x8a\xb3\x3a\x9c" + "\x74\xa4\x19\xa7\xe4\x4f\x6b\x32", + .len = 160, + } +}; + +static const struct cipher_testvec aria_ctr_tv_template[] = { + { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1", + .klen = 16, + .iv = "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0" + "\xfd\xab\x56\xa6\x6e\xda\x7c\x57", + .ptext = "\x36\x36\x89\x09\xcd\xa8\xd3\x91" + "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0", + .ctext = "\x19\x28\xb5\xf2\x1c\xbc\xf8\xaf" + "\xb9\xae\x1b\x23\x4f\xe1\x6e\x40", + .len = 16, + }, { + .key = "\x51\xe3\x8c\xe9\x76\xcd\xff\x37" + "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe", + .klen = 16, + .iv = "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22", + .ptext = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e" + "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1", + .ctext = "\x3f\x8c\xa9\x19\xd6\xb4\xfb\xed" + "\x9c\x6d\xaa\x1b\xe1\xc1\xe6\xa8" + "\xa9\x0a\x63\xd3\xa2\x1e\x6b\xa8" + "\x52\x97\x1e\x81\x34\x6f\x98\x0e", + .len = 32, + }, { + .key = "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d", + .klen = 16, + .iv = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4", + .ptext = "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0" + "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2" + "\x39\x56\x34\x63\x2c\xc5\x51\x13" + "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e", + .ctext = "\x28\xd8\xa7\xf8\x74\x98\x00\xfc" + "\xd6\x48\xad\xbd\xbe\x3f\x0e\x7b" + "\x3d\x46\xfd\xde\x3e\x4f\x12\x43" + "\xac\x85\xda\xff\x70\x24\x44\x9d" + "\x1e\xf8\x9f\x30\xba\xca\xe0\x97" + "\x03\x6d\xe1\x1d\xc7\x21\x79\x37", + .len = 48, + }, { + .key = "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17", + .klen = 16, + .iv = "\xd9\xa0\x57\x80\xc8\x96\x70\x86" + "\x07\x2c\xf4\x61\x79\x09\x01\x8f", + .ptext = "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57" + "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40" + "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c" + "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa", + .ctext = "\x29\x31\x55\xd2\xe5\x0b\x81\x39" + "\xf9\xbc\x63\xe2\xfa\x26\x99\xde" + "\xde\x18\x93\x68\x81\x7b\x0a\x4d" + "\xf6\x03\xe1\xee\xf9\x0e\x1f\xe8" + "\xa8\x80\x81\x46\xdc\x24\x43\x3f" + "\xff\xfe\x8c\x3e\x17\x0a\x6d\xa2" + "\x47\x55\x62\xa0\x03\x4e\x48\x67" + "\xa2\x64\xc0\x9b\x6c\xa4\xfd\x6a", + .len = 64, + }, { + .key = "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac", + .klen = 16, + .iv = "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44", + .ptext = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d" + "\xb2\x92\x83\x70\x1e\xa3\x97\xa6" + "\x65\x53\x39\xeb\x53\x8f\xb1\x38" + "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8" + "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8", + .ctext = "\x38\xbc\xf5\x9d\x0e\x26\xa6\x18" + "\x95\x0b\x23\x54\x09\xa1\xf9\x46" + "\x12\xf1\x42\x57\xa1\xaa\x52\xfa" + "\x8a\xbd\xf2\x03\x63\x4e\xbc\xf7" + "\x21\xea\xed\xca\xdd\x42\x41\x94" + "\xe4\x6c\x07\x06\x19\x59\x30\xff" + "\x8c\x9d\x51\xbf\x2c\x2e\x5b\xa5" + "\x7d\x11\xec\x6b\x21\x08\x12\x18" + "\xe4\xdf\x5a\xfd\xa6\x5f\xee\x2f" + "\x5c\x24\xb7\xea\xc1\xcd\x6d\x68", + .len = 80, + }, { + .key = "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3", + .klen = 16, + .iv = "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a", + .ptext = "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41" + "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50" + "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2" + "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95" + "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda", + .ctext = "\xdf\x79\x58\x30\x6f\x47\x12\x78" + "\x04\xb2\x0b\x1a\x62\x22\xe2\x9f" + "\xfe\xc2\xf5\x6d\x9e\x0e\x2e\x56" + "\x76\x01\x7f\x25\x8f\x6e\xc5\xf3" + "\x91\xff\xcd\x67\xc6\xae\x0b\x01" + "\x4d\x5f\x40\x25\x88\xc5\xe0\x3d" + "\x37\x62\x12\x58\xfe\xc5\x4a\x21" + "\x4a\x86\x8d\x94\xdd\xfd\xe6\xf6" + "\x1e\xa6\x78\x4f\x90\x66\xda\xe4" + "\x4e\x64\xa8\x05\xc6\xd8\x7d\xfb" + "\xac\xc9\x1d\x14\xb5\xb0\xfa\x9c" + "\xe8\x84\xef\x87\xbe\xb4\x2a\x87", + .len = 96, + }, { + .key = "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1", + .klen = 16, + .iv = "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad", + .ptext = "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e" + "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb" + "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17" + "\x58\x2b\x1d\x73\x9a\x9c\x63\x18" + "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0" + "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95", + .ctext = "\xe4\x25\x0d\x22\xeb\xbe\x5e\x90" + "\x01\xe5\xae\xc9\x94\xbd\x93\x89" + "\x5f\x98\xf1\x46\x6a\x50\x3b\xa2" + "\x79\xd9\xe4\x9c\x9a\xde\xf2\x8c" + "\x25\x49\x4c\xda\xb4\x2c\x76\xab" + "\x0a\xa8\x51\xaf\xc0\x62\x1b\xe9" + "\xe9\x7a\x35\x6a\x4b\x1f\x48\x00" + "\xeb\x24\x1d\x5e\xdd\x06\x09\x23" + "\x2a\xfa\x8f\x3b\x3e\x9e\x14\x6f" + "\x2a\x3c\xef\x6d\x73\x67\xdd\x6c" + "\xc8\xa5\x57\xc8\x02\xb6\x9a\xe8" + "\x8d\xcf\x10\xfa\x3e\x9c\x4d\xeb" + "\x44\xd2\x05\x31\x40\x94\x77\x87" + "\xf0\x83\xb5\xd2\x2a\x9c\xbc\xe4", + .len = 112, + }, { + .key = "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15", + .klen = 16, + .iv = "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12", + .ptext = "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41" + "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3" + "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe" + "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c" + "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2" + "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c", + .ctext = "\xa7\x4c\x96\x55\x7c\x07\xce\xb2" + "\x6f\x63\x9f\xc6\x8b\x6f\xc6\x4a" + "\x2c\x47\x8d\x99\xdf\x65\x75\x96" + "\xb7\x1d\x50\x5b\x57\x4a\x69\xcc" + "\xc9\x3a\x18\x8a\xd1\xab\x70\x4a" + "\xa3\x13\x80\xdd\x48\xc0\x6a\x7d" + "\x21\xa8\x22\x06\x32\x47\xc0\x16" + "\x1f\x9a\xc0\x21\x33\x66\xf2\xd8" + "\x69\x79\xae\x02\x82\x3f\xaf\xa6" + "\x98\xdb\xcd\x2a\xe5\x12\x39\x80" + "\x8a\xc1\x73\x99\xe5\xe4\x17\xe3" + "\x56\xc2\x43\xa6\x41\x6b\xb2\xa4" + "\x9f\x81\xc4\xe9\xf4\x29\x65\x50" + "\x69\x81\x80\x4b\x86\xab\x5e\x30" + "\xd0\x81\x9d\x6f\x24\x59\x42\xc7" + "\x6d\x5e\x41\xb8\xf5\x99\xc2\xae", + .len = 128, + }, { + .key = "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c", + .klen = 16, + .iv = "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33", + .ptext = "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a" + "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8" + "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e" + "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a" + "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62" + "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46", + .ctext = "\x11\x7f\xea\x49\xaf\x24\x52\xa2" + "\xde\x60\x99\x58\x23\xf9\x9e\x91" + "\x73\xd5\x9a\xcb\xdd\x10\xcd\x68" + "\xb8\x9e\xef\xa4\xe9\x2d\xf0\x27" + "\x44\xd4\x9a\xd6\xb6\x9c\x7a\xec" + "\x17\x17\xea\xa7\x8e\xa8\x40\x6b" + "\x43\x3d\x50\x59\x0f\x74\x1b\x9e" + "\x03\xed\x4f\x2f\xb8\xda\xef\xc3" + "\x3f\x29\xb3\xf4\x5c\xcd\xce\x3c" + "\xba\xfb\xc6\xd1\x1d\x6f\x61\x3a" + "\x2b\xbd\xde\x30\xc5\x53\xe0\x6e" + "\xbe\xae\x2f\x81\x13\x0f\xd2\xd5" + "\x14\xda\xd3\x60\x9c\xf8\x00\x86" + "\xe9\x97\x3e\x05\xb3\x95\xb3\x21" + "\x1f\x3c\x56\xef\xcb\x32\x49\x5c" + "\x89\xf1\x34\xe4\x8d\x7f\xde\x01" + "\x1f\xd9\x25\x6d\x34\x1d\x6b\x71" + "\xc9\xa9\xd6\x14\x1a\xf1\x44\x59", + .len = 144, + }, { + .key = "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b", + .klen = 16, + .iv = "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12", + .ptext = "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c" + "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57" + "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97" + "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01" + "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53" + "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66", + .ctext = "\x5b\xc0\xe8\x17\xa4\xf9\xea\xce" + "\x9e\xf9\xe0\xb1\xac\x37\xe9\x41" + "\x0b\x57\xc6\x55\x54\x50\xfa\xa9" + "\x60\xaf\x7a\x4e\x98\x56\xde\x81" + "\x14\xfc\xac\x21\x81\x3e\xf4\x0f" + "\x40\x92\x30\xa8\x16\x88\x1a\xc3" + "\xf1\x39\xbd\x0a\xb9\x44\xc8\x67" + "\x8c\xaa\x2b\x45\x8b\x5b\x7b\x24" + "\xd5\xd8\x9e\xd3\x59\xa5\xd7\x69" + "\xdf\xf4\x50\xf9\x5f\x4f\x44\x1f" + "\x2c\x75\x68\x6e\x3a\xa8\xae\x4b" + "\x84\xf0\x42\x6c\xc0\x3c\x42\xaf" + "\x87\x2b\x89\xe9\x51\x69\x16\x63" + "\xc5\x62\x13\x05\x4c\xb2\xa9\x69" + "\x01\x14\x73\x88\x8e\x41\x47\xb6" + "\x68\x74\xbc\xe9\xad\xda\x94\xa1" + "\x0c\x12\x8e\xd4\x38\x15\x02\x97" + "\x27\x72\x4d\xdf\x61\xcc\x86\x3d" + "\xd6\x32\x4a\xc3\xa9\x4c\x35\x4f" + "\x5b\x91\x7d\x5c\x79\x59\xb3\xd5", + .len = 160, + }, { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1" + "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0", + .klen = 24, + .iv = "\xfd\xab\x56\xa6\x6e\xda\x7c\x57" + "\x36\x36\x89\x09\xcd\xa8\xd3\x91", + .ptext = "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0" + "\x51\xe3\x8c\xe9\x76\xcd\xff\x37", + .ctext = "\xa4\x12\x2f\xc4\xf0\x6d\xd9\x46" + "\xe4\xe6\xd1\x0b\x6d\x14\xf0\x8f", + .len = 16, + }, { + .key = "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe" + "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22", + .klen = 24, + .iv = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e", + .ptext = "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1" + "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d", + .ctext = "\x80\x2b\xf0\x88\xb9\x4b\x8d\xf5" + "\xc3\x0e\x15\x5b\xea\x5d\x5b\xa8" + "\x07\x95\x78\x72\xc0\xb9\xbf\x25" + "\x33\x22\xd1\x05\x56\x46\x62\x25", + .len = 32, + }, { + .key = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4" + "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0", + .klen = 24, + .iv = "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2", + .ptext = "\x39\x56\x34\x63\x2c\xc5\x51\x13" + "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e" + "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17" + "\xd9\xa0\x57\x80\xc8\x96\x70\x86", + .ctext = "\x65\x01\x3c\xb0\xac\x4c\x63\xb6" + "\xe7\xf1\xf4\x61\x35\xf4\x36\xde" + "\x7f\x85\xba\x41\xa8\xb0\x27\x11" + "\x86\x2c\x71\x16\x05\x1d\xcf\x70" + "\x35\xef\x23\x17\xfc\xed\x3f\x1a" + "\x8e\xb3\xe5\xdb\x90\xb4\xb8\x35", + .len = 48, + }, { + .key = "\x07\x2c\xf4\x61\x79\x09\x01\x8f" + "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57", + .klen = 24, + .iv = "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40", + .ptext = "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c" + "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa" + "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac" + "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44", + .ctext = "\x5a\xfb\xb1\x2c\x6e\xe5\xb8\xe0" + "\x80\xb6\x77\xa8\xfe\x10\x3a\x99" + "\x00\x8e\x30\x23\x7d\x50\x87\xda" + "\xc6\x46\x73\x37\x8b\xf1\xab\x26" + "\x2d\xa8\x0c\xa8\x9e\x77\xee\xfc" + "\x78\x4f\x03\x0f\xeb\xc6\x03\x34" + "\xb9\x9c\x4f\x59\x55\xc5\x99\x47" + "\xd4\x7e\xe8\x06\x43\x5f\xa1\x6b", + .len = 64, + }, { + .key = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d", + .klen = 24, + .iv = "\xb2\x92\x83\x70\x1e\xa3\x97\xa6" + "\x65\x53\x39\xeb\x53\x8f\xb1\x38", + .ptext = "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8" + "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8" + "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3" + "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a" + "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41", + .ctext = "\xc9\x5f\xe0\x60\x61\x38\x7e\x79" + "\x52\x68\x64\x8f\x55\x9b\x6b\x72" + "\xbf\x09\xef\x2f\xb2\x92\xbb\xa3" + "\xe1\x6a\xeb\xe6\x4e\x7c\x5d\xe0" + "\x6a\x4b\xd0\x57\x3b\x28\x8a\x83" + "\x75\xd4\x5a\x2e\xd1\x9a\x57\xe3" + "\xc5\x43\x36\xde\x02\xac\x2c\x75" + "\xea\x33\x3a\x7e\x5d\xb8\xf6\x12" + "\x42\xbd\x06\x8a\x09\x6b\xd6\xb6" + "\x25\x59\xcd\xbd\x17\xeb\x69\xb3", + .len = 80, + }, { + .key = "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50", + .klen = 24, + .iv = "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2", + .ptext = "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95" + "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda" + "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1" + "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad" + "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e", + .ctext = "\x03\x2c\x39\x24\x99\xb5\xf6\x79" + "\x91\x89\xb7\xf8\x89\x68\x37\x9d" + "\xe7\x4d\x7d\x1c\x36\xae\x98\xd2" + "\xbf\x2a\xa4\x30\x38\x30\xe7\x5d" + "\xbb\x00\x09\x40\x34\xa4\xef\x82" + "\x23\xca\x0e\xb3\x71\x80\x29\x0a" + "\xa9\x0b\x26\x65\x9a\x12\xbf\x18" + "\xfb\xf8\xe4\xc2\x62\x57\x18\xfb" + "\x1e\x98\xea\x5b\xf6\xd6\x7c\x52" + "\x7a\xba\x0e\x6a\x54\x19\xb6\xfa" + "\xe5\xd7\x60\x40\xb0\x1a\xf1\x09" + "\x70\x96\x23\x49\x98\xfc\x79\xd2", + .len = 96, + }, { + .key = "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb", + .klen = 24, + .iv = "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17", + .ptext = "\x58\x2b\x1d\x73\x9a\x9c\x63\x18" + "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0" + "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95" + "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15" + "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12" + "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41", + .ctext = "\xd4\x9a\x04\x54\x05\xd2\xe6\x3f" + "\xb0\xa4\x36\x5e\x1e\x9c\x35\xb0" + "\xa6\x62\x35\x47\xf4\x4d\x08\x9e" + "\x1c\x22\x91\x8e\x7f\x00\xa6\x3e" + "\x0a\x04\x42\x0f\xc4\xa6\x5d\xe2" + "\x49\x4c\x61\x12\xea\x9d\x7d\x7c" + "\xfa\x93\x74\x6b\x79\x8c\xdb\xc6" + "\x47\xf6\xea\x84\x3e\x97\x7d\x87" + "\x40\x38\x92\xc7\x44\xef\xdf\x63" + "\x29\xe4\x5b\x3a\x87\x22\xa1\x3f" + "\x2b\x31\xb1\xa4\x0d\xea\xf3\x0b" + "\xd7\x4f\xb6\x9c\xba\x40\xa3\x2f" + "\x21\x2b\x05\xe4\xca\xef\x87\x04" + "\xe6\xd0\x29\x2c\x29\x26\x57\xcd", + .len = 112, + }, { + .key = "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3", + .klen = 24, + .iv = "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe", + .ptext = "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c" + "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2" + "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c" + "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c" + "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33" + "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a", + .ctext = "\xbc\x57\x2a\x88\x0a\xd0\x06\x4f" + "\xdb\x7b\x03\x9f\x97\x1a\x20\xfe" + "\xdb\xdc\x8e\x7b\x68\x13\xc8\xf5" + "\x06\xe3\xe0\x7e\xd3\x51\x21\x86" + "\x4f\x32\xdb\x78\xe3\x26\xbe\x34" + "\x52\x4c\x4e\x6b\x85\x52\x63\x8b" + "\x8c\x5c\x0e\x33\xf5\xa3\x88\x2d" + "\x04\xdc\x01\x2d\xbe\xa1\x48\x6d" + "\x50\xf4\x16\xb1\xd7\x4d\x1e\x99" + "\xa8\x1d\x54\xcb\x13\xf9\x85\x51" + "\x18\x9f\xef\x45\x62\x5d\x48\xe5" + "\x0c\x54\xf7\x7b\x33\x18\xce\xb0" + "\xd5\x82\x1b\xe2\x91\xae\xdc\x09" + "\xe2\x97\xa8\x27\x13\x78\xc6\xb8" + "\x20\x06\x1a\x71\x5a\xb3\xbc\x1b" + "\x69\x1f\xcd\x57\x70\xa7\x1e\x35", + .len = 128, + }, { + .key = "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8", + .klen = 24, + .iv = "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e", + .ptext = "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a" + "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62" + "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46" + "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b" + "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12" + "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c", + .ctext = "\xd7\xb4\xfc\xcc\x1f\xf7\xfc\x7d" + "\x69\xfa\xcb\x01\x60\xf3\x5a\x14" + "\x88\xf7\xea\x43\xaa\x47\xf1\x8a" + "\x4e\xd0\x3c\x50\x58\x35\x95\x21" + "\x5f\xcc\x73\x0b\x97\xa0\x2c\x6b" + "\x70\x4d\x3d\xa8\x21\xbe\xfc\xec" + "\xb6\x55\xf0\x48\x2b\x11\xcc\x4b" + "\xda\xf7\x09\xd9\x18\x7b\x4f\x00" + "\x76\x40\xe0\x7d\x33\xcf\x4f\x77" + "\x91\x97\x63\xfa\x72\xba\x5c\x3d" + "\xcf\x2e\xb8\x19\x56\x4a\xa5\x02" + "\xc3\xb1\x80\xa8\x57\x03\x32\x57" + "\xa8\xe1\x65\xf7\xd3\x52\xc5\xcf" + "\x55\x1e\x34\xe3\x77\xab\x83\xdb" + "\xaf\xd3\x8a\xcc\x96\x1c\xc9\x73" + "\xd9\x0b\xb6\x4c\x31\xac\x2c\x82" + "\xb8\xb4\xc8\xe1\xa5\x71\xcc\xb3" + "\x7e\x85\xb8\xfa\x6b\xef\x41\x24", + .len = 144, + }, { + .key = "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57", + .klen = 24, + .iv = "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97", + .ptext = "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01" + "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53" + "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66" + "\xe0\xb1\xb3\xbd\x1c\x76\x36\xfd" + "\x45\x87\xa4\x14\x1b\xef\xe7\x16" + "\xf7\xfa\x30\x3d\xb9\x52\x8f\x2e" + "\x01\x68\xc1\x7d\xa2\x15\x49\x74" + "\x53\x82\xc2\x10\xa8\x45\x73\x4d" + "\x41\xcc\x24\xa3\x42\xff\x30\xd1" + "\x02\x21\xdc\xd9\x08\xf7\xe7\x4c" + "\x33\x2d\x62\xc7\x38\xf5\xc2\xbe" + "\x52\xf1\x34\x78\x34\x53\x30\x5b" + "\x43\x43\x51\x6a\x02\x81\x64\x0c", + .ctext = "\x71\xf6\x96\x02\x07\x71\x1a\x08" + "\x7c\xfe\x33\xc4\xc9\xbe\xe2\xed" + "\xf8\x46\x69\xce\x1b\xdc\xd3\x05" + "\x7a\xec\x26\x4d\x27\x2a\x49\x36" + "\x85\xe1\x5d\xd3\x91\xd7\x68\xb8" + "\x55\xa5\x27\x55\x2d\xc1\x78\x27" + "\x0c\x49\x0a\x24\x3b\x76\x3f\x5f" + "\x29\x1c\x37\x2f\x30\xfc\x50\xcb" + "\xe2\x54\x26\x7d\x97\xa7\xf3\x58" + "\x15\xe1\x4c\xeb\x35\xc9\xd1\x1e" + "\x7e\x7d\xa0\xe5\x62\xa5\x2d\xf6" + "\x77\xb0\xef\x13\x55\xb4\x66\x2c" + "\x3b\x50\x1b\x4d\xc2\x64\xce\xc6" + "\xfe\xf2\xad\xfe\x26\x73\x36\x66" + "\x0c\x2f\x10\x35\x97\x3c\x9c\x98" + "\xc1\x90\xa8\x82\xd7\xc6\x31\x68" + "\xcf\x77\xa8\x5b\xdf\xf9\x5a\x8e" + "\x84\xb5\x0b\x6e\x5b\xec\x36\x89" + "\x0b\xb1\xbf\xb9\x70\x02\x5c\x22" + "\xc3\xd5\xc1\xc6\xfd\x07\xdb\x70", + .len = 160, + }, { + .key = "\x82\x8e\x9e\x06\x7b\xc2\xe9\xb3" + "\x06\xa3\xfa\x99\x42\x67\x87\xac" + "\x21\xc7\xb0\x98\x6c\xf8\x26\x57" + "\x08\xdd\x92\x02\x77\x7b\x35\xe7", + .klen = 32, + .iv = "\xa1\xad\xcb\xdd\xd5\x19\xb6\xd4" + "\x0b\x62\x58\xb0\x6c\xa0\xc1\x58", + .ptext = "\x14\x0d\x8a\x09\x16\x00\x00\xf1" + "\xc0\x20\x86\xf9\x21\xd1\x34\xe2", + .ctext = "\x05\xe3\x34\xaf\x6c\x83\x14\x8b" + "\x9d\x1c\xd6\x87\x74\x91\xdf\x17", + .len = 16, + }, { + .key = "\xc9\xf3\xc4\x93\xd0\xcc\xaf\xb1" + "\x1a\x42\x93\x71\xd8\x4e\xd8\xaa" + "\x52\xad\x93\x2f\xe5\xd9\xaa\x5b" + "\x47\x37\x3a\xed\x13\x92\x35\x16", + .klen = 32, + .iv = "\x81\xc8\x50\xd1\x74\xc3\x1c\x73" + "\xbb\xab\x72\x83\x90\x5a\x15\xcb", + .ptext = "\x65\x11\x93\xaf\xe1\x69\x6c\xbe" + "\x25\x8c\x76\x87\x53\xa4\x80\xae" + "\x51\x94\x36\x3f\xca\xe7\x45\x41" + "\x76\x05\xbf\x8f\x9c\xad\xc0\xe3", + .ctext = "\x6b\x00\x6e\x49\x7a\x6d\xe3\x04" + "\x4e\xf7\x9f\x8a\x1f\x14\xbd\xb1" + "\x51\xbf\x13\x9f\x29\x95\x51\x16" + "\xd0\x23\x9a\x1a\x45\xc2\xc3\xd1", + .len = 32, + }, { + .key = "\xd5\x9f\x52\x34\x12\x99\x8e\x42" + "\xe0\x85\x04\x6f\xeb\xf1\x5d\xd0" + "\xc1\xbf\x3f\x84\xd9\x1e\x71\x44" + "\xd4\xb9\x40\x3c\x02\x2e\x21\x19", + .klen = 32, + .iv = "\x28\xc1\x97\x64\x81\x52\x57\x0e" + "\x02\x8c\xab\x4c\xe2\x60\x14\xa5", + .ptext = "\x5a\xb1\x33\x48\xaa\x51\xe9\xa4" + "\x5c\x2d\xbe\x33\xcc\xc4\x7f\x96" + "\xe8\xde\x2b\xe7\x35\x7a\x11\x4b" + "\x13\x08\x32\xc6\x41\xd8\xec\x54" + "\xa3\xd3\xda\x35\x43\x69\xf6\x88" + "\x97\xca\x00\x1b\x02\x59\x24\x82", + .ctext = "\x03\xaf\x76\xbd\x5e\x5b\xca\xc0" + "\xae\x44\xa2\x2f\xc2\x76\x2f\x50" + "\xfa\x94\x94\x5a\x48\x9d\x9c\x38" + "\xc9\x75\xc9\xb2\x56\x0a\x2d\x91" + "\xb8\xe8\x4e\xaa\xcb\x51\x9b\x6a" + "\x20\x9b\x2b\xc5\xb0\x18\x9d\x01", + .len = 48, + }, { + .key = "\x9c\x5d\xd7\x66\x36\xfa\x02\x20" + "\x99\x61\x62\x86\x0f\x43\x2e\x05" + "\x25\x8b\xfb\xf1\xae\x4c\xde\x18" + "\x0b\xf8\xd0\x9d\xaa\xd4\x56\x04", + .klen = 32, + .iv = "\xcd\xa8\x61\x89\x8d\xbb\x72\xb6" + "\x1e\xfe\x03\x34\x54\x88\x23\xe2", + .ptext = "\x66\x42\x60\x24\xf3\xe4\xe9\x7e" + "\x42\x20\xf4\x61\xce\x1c\x5e\x44" + "\x02\x26\x91\xf7\x41\xa4\xab\x34" + "\x29\x49\xdd\x78\x19\x8f\x10\x10" + "\xf0\x61\xcf\x77\x18\x17\x61\xdf" + "\xc4\xa8\x35\x0e\x75\x1b\x84\x6b" + "\xc3\x3f\x31\x59\x5a\x9c\xf4\xc3" + "\x43\xa9\xb7\xf8\x65\x40\x40\xba", + .ctext = "\xb6\x41\x55\x8f\xeb\x16\x1e\x4c" + "\x81\xa0\x85\x6c\xf0\x07\xa5\x2a" + "\x19\x91\xed\x3e\xd6\x30\x8c\xca" + "\x5d\x0f\x58\xca\xd2\x8a\xac\xa2" + "\x2b\x86\x4f\xb5\x85\x4d\xac\x6d" + "\xe5\x39\x1b\x02\x23\x89\x4e\x4f" + "\x02\x00\xe8\x1b\x40\x85\x21\x2b" + "\xc6\xb1\x98\xed\x70\xb3\xf8\xc3", + .len = 64, + }, { + .key = "\x4b\x4e\x11\x91\x27\xcf\x8c\x66" + "\x17\xfa\x5b\x4c\xa8\xb8\x0f\xa1" + "\x99\x5b\x07\x56\xe1\x8d\x94\x8b" + "\xf2\x86\x5a\x5f\x40\x83\xfa\x06", + .klen = 32, + .iv = "\xfd\x73\xee\x1c\x27\xf3\xb4\x38" + "\xc5\x7c\x2e\xc5\x6e\xdb\x49\x0d", + .ptext = "\x0a\xe2\xdd\x97\xdd\x5e\xd4\xb3" + "\xc1\x49\x8f\x53\xb2\x40\x85\x1c" + "\x90\x37\x2d\xbd\x21\x6b\x1f\x80" + "\x56\x98\x76\x1e\xcf\x6c\x78\xd8" + "\xa0\x3c\x79\xc3\x56\xf7\xfc\x64" + "\x35\x58\x1c\x7c\xc4\x5f\x2a\x25" + "\x8c\x01\x98\x1e\x1c\x1f\x15\x64" + "\x50\xb5\xfa\x02\xd3\x54\xe5\x29" + "\xe3\xd2\xa3\x83\x54\x40\x54\xc5" + "\xd8\x1c\xc9\x84\x7d\xc8\x31\x49", + .ctext = "\x53\x2a\xa8\xa0\x15\xaf\x2f\xc4" + "\x7d\x31\xb4\x61\x80\x5f\xd1\xb6" + "\x7c\xca\x86\xb9\x28\x6e\xb6\x2b" + "\xe3\x4b\x7e\xea\xb3\x4f\xa2\xa2" + "\x4e\x8f\xbe\x22\x66\xb3\x92\xbc" + "\x70\x91\xaf\xa6\x09\x5d\xe2\x05" + "\x38\x62\xd3\x6e\x07\x63\x91\xad" + "\x48\x5a\x42\xe7\xdc\x0d\xb1\xe3" + "\x92\x88\x64\xee\x93\xaa\xaf\x31" + "\x68\x57\x35\x8d\x54\x2c\xfa\xb1", + .len = 80, + }, { + .key = "\x77\x3b\xf5\xe7\x20\xf7\xe0\x0c" + "\x3d\x3a\x83\x17\x83\x79\xd8\x29" + "\x5a\x0a\x25\x7f\xe0\x21\x23\xff" + "\x31\xfd\x60\x10\xe6\x63\xe2\xaf", + .klen = 32, + .iv = "\xdb\x4c\x0d\xc0\x36\xdb\xc7\xa1" + "\xa4\x91\xd9\x05\xe6\xc4\x98\x00", + .ptext = "\x8d\x4d\xc6\x5e\x01\x82\xb3\x39" + "\xc8\x64\xa7\xcb\x05\x19\x84\x80" + "\x3f\x9c\xa8\x4f\x64\xb3\x11\x4b" + "\x0e\x21\xc4\x75\x04\x1d\x6f\xd5" + "\x04\x04\x4d\xc9\xc0\x4b\x4a\x9c" + "\x26\xb7\x68\x5a\xe4\xd0\x61\xe3" + "\x2c\x93\x8e\x3f\xb4\x67\x07\x31" + "\x02\x52\x0c\x0f\xe6\x6d\xa3\xd0" + "\x48\x95\x83\x67\x23\x64\x31\x50" + "\xd2\x5f\x69\x68\x8b\x71\xbf\x01" + "\x29\x99\x86\x36\x2e\xdf\xf1\x7c" + "\x08\x8c\x78\x7a\x93\x9a\x7d\x1b", + .ctext = "\x92\x90\x48\x2f\x3a\x6b\x68\x43" + "\x28\x9b\x7d\x1e\x46\x28\xd8\x58" + "\xd9\x1e\x44\xd7\x24\x91\x65\xb1" + "\x15\xde\xc4\x63\xf1\xb1\x34\x9e" + "\xae\x8c\x51\x94\xc5\x22\x65\x8d" + "\x3d\x85\xf5\x34\x5f\x04\x68\x95" + "\xf2\x66\x62\xbb\xc8\x3f\xe4\x0a" + "\x8a\xb2\x70\xc0\x77\xd5\x96\xef" + "\x9e\x39\x3a\x3e\x0d\x2b\xf9\xfe" + "\xa9\xbc\x00\xba\xc5\x43\xd7\x70" + "\x2f\xef\x1e\x1e\x93\xc2\x5d\xf1" + "\xb5\x50\xb8\xf5\xee\xf4\x26\x6f", + .len = 96, + }, { + .key = "\xe0\x6a\x30\xe1\x35\xb5\xb0\x7c" + "\x54\xc5\x73\x9b\x00\xe5\xe7\x02" + "\xbe\x16\x59\xdc\xd9\x03\x17\x53" + "\xa8\x37\xd1\x5f\x13\x8e\x45\xdb", + .klen = 32, + .iv = "\x54\xe9\x1c\xde\xfb\x26\x0e\x48" + "\x35\x50\x4d\x9b\x4d\x12\x21\x0d", + .ptext = "\x73\x72\xcf\xdb\xbd\xbc\xc0\xdf" + "\x6b\xbb\xdf\x65\x6f\x2f\x43\x3b" + "\x2d\x7c\x0e\x07\x7f\xa0\x95\xdd" + "\xfc\x67\xc1\x11\x7a\xe2\xb5\x4a" + "\xd1\x15\xb0\xd8\xe2\xf0\x35\x48" + "\xd8\x81\x6a\x35\xae\x67\xbf\x61" + "\xf2\x8a\xcf\x04\xc8\x09\x8b\x63" + "\x31\x74\x95\xa5\x8d\x3c\xea\xe2" + "\x5f\x67\xc4\x7e\x51\x88\xbf\xb5" + "\x78\xef\x3a\x76\xd8\x1d\x00\x75" + "\x2b\x7b\x28\x7c\xde\x4b\x39\x01" + "\x5d\xde\x92\xfe\x90\x07\x09\xfd" + "\xa5\xd1\xd3\x72\x11\x6d\xa4\x4e" + "\xd1\x6e\x16\xd1\xf6\x39\x4f\xa0", + .ctext = "\x3b\xc5\xee\xfc\x05\xaf\xa6\xb7" + "\xfe\x12\x24\x79\x31\xad\x32\xb5" + "\xfb\x71\x9b\x02\xad\xf4\x94\x20" + "\x25\x7b\xdb\xdf\x97\x99\xca\xea" + "\xc4\xed\x32\x26\x6b\xc8\xd4\x7b" + "\x5b\x55\xfa\xf9\x5b\xab\x88\xdb" + "\x48\xfe\x67\xd5\x5a\x47\x81\x4e" + "\x3e\x1e\x83\xca\x1d\x04\xe1\xb5" + "\x6c\x1b\xbd\xf2\x2d\xf1\xae\x75" + "\x09\x6a\xf8\xb2\xc3\x27\xee\x08" + "\x66\x94\x72\xc0\x2b\x12\x47\x23" + "\x4d\xde\xb4\xca\xf7\x66\xca\x14" + "\xe7\x68\x1b\xfb\x48\x70\x3e\x4c" + "\x43\xbb\x88\x32\x25\xff\x77\x6a", + .len = 112, + }, { + .key = "\x60\xb6\xde\x17\xca\x4c\xe7\xe0" + "\x07\x0d\x80\xc5\x8a\x2d\x5a\xc2" + "\x2c\xb9\xa4\x5f\x2a\x85\x2c\x3d" + "\x6d\x67\xc8\xee\x0f\xa2\xf4\x09", + .klen = 32, + .iv = "\x1a\xa5\xbc\x7e\x93\xf6\xdd\x28" + "\xb7\x69\x27\xa1\x84\x95\x25\x5a", + .ptext = "\x7b\x88\x00\xeb\xa5\xba\xa1\xa7" + "\xd4\x40\x16\x74\x2b\x42\x37\xda" + "\xe0\xaf\x89\x59\x41\x2f\x62\x00" + "\xf5\x5a\x4e\x3b\x85\x27\xb2\xed" + "\x1b\xa7\xaf\xbe\x89\xf3\x49\xb7" + "\x8c\x63\xc9\x0c\x52\x00\x5f\x38" + "\x3b\x3c\x0c\x4f\xdd\xe1\xbf\x90" + "\x4a\x48\xbf\x3a\x95\xcb\x48\xa2" + "\x92\x7c\x79\x81\xde\x18\x6e\x92" + "\x1f\x36\xa9\x5d\x8d\xc4\xb6\x4d" + "\xb2\xb4\x0e\x09\x6d\xf3\x3d\x01" + "\x3d\x9b\x40\x47\xbc\x69\x31\xa1" + "\x6a\x71\x26\xdc\xac\x10\x56\x63" + "\x15\x23\x7d\x10\xe3\x76\x82\x41" + "\xcd\x80\x57\x2f\xfc\x4d\x22\x7b" + "\x57\xbb\x9a\x0a\x03\xe9\xb3\x13", + .ctext = "\x37\x0d\x47\x21\xbc\x28\x0b\xf7" + "\x85\x5f\x60\x57\xf2\x7f\x92\x20" + "\x5f\xa7\xf6\xf4\xa6\xf5\xdf\x1e" + "\xae\x8e\xeb\x97\xfc\xce\x6a\x25" + "\x6d\x6a\x5b\xd1\x99\xf6\x27\x77" + "\x52\x0c\xf1\xd7\x94\xa0\x67\x5d" + "\x60\x35\xb0\x6d\x01\x45\x52\xc8" + "\x05\xd8\x7f\x69\xaf\x8e\x68\x05" + "\xa8\xa5\x24\x2f\x95\xef\xf1\xd2" + "\x8c\x45\x12\xc5\x7a\xcf\xbb\x99" + "\x25\xaa\xa3\x9b\x3f\xf1\xfc\x9d" + "\xfa\x2c\x26\x9b\x92\x47\x61\x6b" + "\x63\x1e\x41\x67\xcb\xb7\x0f\x52" + "\x70\xd4\x0d\x7e\xef\x34\xa2\x75" + "\x4f\x6a\x55\x9c\x2b\x4a\x02\xdd" + "\x96\x5d\xcb\xca\x45\xa1\xec\xaa", + .len = 128, + }, { + .key = "\x2a\xed\x7d\x76\xfc\xc5\x49\x50" + "\xf4\x90\x0f\xcc\x5d\xff\x0c\x3c" + "\x14\x06\xaf\x68\x8f\xd7\xb6\x25" + "\x1e\x10\x95\x2a\x71\x33\x17\x20", + .klen = 32, + .iv = "\x5b\x58\x47\xf8\xd5\x1e\x91\x81" + "\x46\xe7\x25\x3a\x02\x45\x9c\x65", + .ptext = "\x10\xaf\xde\x5c\x30\x79\x43\x28" + "\x1c\x03\xf8\x50\x0f\x30\xa5\xef" + "\x84\x19\x4c\x09\x40\x03\x75\x1f" + "\x92\x8f\x88\x01\xda\x31\x7a\xe4" + "\x48\xe3\xab\xb4\xe6\x1b\x0f\xac" + "\xd9\xfa\x8d\x23\xe4\xc6\xa4\xa9" + "\x2d\x9a\x54\x52\x44\x5c\x3c\x52" + "\x61\xf0\x00\xca\xed\xab\xed\xe2" + "\x44\x0b\xe0\x18\xba\xa5\x63\xd8" + "\xdc\x5e\x1a\x4c\xf8\xde\x5e\x75" + "\xdf\x42\x27\x7b\xe9\x11\x2f\x41" + "\x3a\x72\x54\x3d\x44\x9c\x3e\x87" + "\x8d\x8d\x43\x2f\xb2\xff\x87\xd4" + "\xad\x98\x68\x72\x53\x61\x19\x7c" + "\x20\x79\x8c\x2b\x37\x0b\x96\x15" + "\xa5\x7d\x4e\x01\xe6\xea\xb6\xfa" + "\xaa\xd3\x9d\xa2\xd9\x11\xc3\xc9" + "\xd4\x0e\x3f\x3e\xfe\x35\x1e\xe5", + .ctext = "\xb0\x2b\x75\x5f\x33\x1b\x05\x49" + "\x06\xf1\x43\x91\xc2\x85\xfa\xac" + "\x3f\x47\xf3\x89\x73\xb2\x0e\xa4" + "\x30\xcb\x87\x39\x53\x5d\x36\x89" + "\x77\xd9\x17\x01\x95\xa6\xe9\x71" + "\x51\x53\xd9\x4f\xa6\xc2\x79\x3d" + "\x2e\x50\x90\x52\x0d\x27\x1a\x46" + "\xf1\xe8\x6e\x7e\x7b\x32\xe5\x22" + "\x22\x1f\xba\x5e\xcf\x25\x6b\x26" + "\x76\xf0\xca\x8e\xdd\x5b\xd3\x09" + "\x6f\x82\x08\x56\x1f\x51\x72\x57" + "\xca\xd1\x60\x07\xfb\x9f\x71\x54" + "\x0f\xf6\x48\x71\xfa\x8f\xcb\xdd" + "\xce\xd3\x16\xcd\xae\x0e\x67\x5e" + "\xea\x8d\xa2\x4a\x4f\x11\xc8\xc8" + "\x2f\x04\xfe\xa8\x2a\x07\x1c\xb1" + "\x77\x39\xda\x8b\xd9\x5c\x94\x6c" + "\x4d\x4d\x13\x51\x6f\x07\x06\x5b", + .len = 144, + }, { + .key = "\x7b\xa7\x4d\x0a\x37\x30\xb9\xf5" + "\x2a\x79\xb4\xbf\xdb\x7f\x9b\x64" + "\x23\x43\xb5\x18\x34\xc4\x5f\xdf" + "\xd9\x2a\x66\x58\x00\x44\xb5\xd9", + .klen = 32, + .iv = "\x75\x34\x30\xc1\xf0\x69\xdf\x0a" + "\x52\xce\x4f\x1e\x2c\x41\x35\xec", + .ptext = "\x81\x47\x55\x3a\xcd\xfe\xa2\x3d" + "\x45\x53\xa7\x67\x61\x74\x25\x80" + "\x98\x89\xfe\xf8\x6a\x9f\x51\x7c" + "\xa4\xe4\xe7\xc7\xe0\x1a\xce\xbb" + "\x4b\x46\x43\xb0\xab\xa8\xd6\x0c" + "\xa0\xf0\xc8\x13\x29\xaf\xb8\x01" + "\x6b\x0c\x7e\x56\xae\xb8\x58\x72" + "\xa9\x24\x44\x61\xff\xf1\xac\xf8" + "\x09\xa8\x48\x21\xd6\xab\x41\x73" + "\x70\x6b\x92\x06\x61\xdc\xb4\x85" + "\x76\x26\x7a\x84\xc3\x9e\x3a\x14" + "\xe7\xf4\x2d\x95\x92\xad\x18\xcc" + "\x44\xd4\x2c\x36\x57\xed\x2b\x9b" + "\x3f\x2b\xcd\xe5\x11\xe3\x62\x33" + "\x42\x3f\xb8\x2a\xb1\x37\x3f\x8b" + "\xe8\xbd\x6b\x0b\x9f\x38\x5a\x5f" + "\x82\x34\xb7\x96\x35\x58\xde\xab" + "\x94\x98\x41\x5b\x3f\xac\x0a\x34" + "\x56\xc0\x02\xef\x81\x6d\xb1\xff" + "\x34\xe8\xc7\x6a\x31\x79\xba\xd8", + .ctext = "\x4e\x00\x7c\x52\x45\x76\xf9\x3d" + "\x1a\xd1\x72\xbc\xb9\x0f\xa9\xfb" + "\x0e\x5b\xe2\x3c\xc7\xae\x92\xf6" + "\xb8\x0b\x0a\x95\x40\xe9\x7f\xe0" + "\x54\x10\xf9\xf6\x23\x1f\x51\xc8" + "\x16\x8b\x2e\x79\xe1\x8c\x0b\x43" + "\xe5\xeb\xb5\x9d\x1e\xc3\x28\x07" + "\x5c\x8d\xb1\xe7\x80\xd3\xce\x62" + "\x8d\xf8\x31\x1f\x29\x8b\x90\xee" + "\xe5\xc3\xfa\x16\xc4\xf0\xc3\x99" + "\xe9\x5e\x19\xba\x37\xb8\xc0\x87" + "\xb5\xc6\xc9\x31\xcb\x6e\x30\xce" + "\x03\x1d\xfe\xce\x08\x32\x00\xeb" + "\x86\xc4\xfb\x48\x01\xda\x93\x73" + "\xcc\xb7\xae\x4e\x94\x20\xeb\xc7" + "\xe3\x33\x4c\xeb\xed\xe2\xfc\x86" + "\x0e\x73\x32\xf9\x1b\xf3\x25\xf3" + "\x74\xad\xd1\xf4\x2c\x45\xa4\xfd" + "\x52\x40\xa2\x4e\xa5\x62\xf6\x02" + "\xbb\xb0\xe3\x23\x86\x67\xb8\xf6", + .len = 160, + } +}; + +static const struct cipher_testvec aria_cfb_tv_template[] = { + { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1", + .klen = 16, + .iv = "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0" + "\xfd\xab\x56\xa6\x6e\xda\x7c\x57", + .ptext = "\x36\x36\x89\x09\xcd\xa8\xd3\x91" + "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0", + .ctext = "\x19\x28\xb5\xf2\x1c\xbc\xf8\xaf" + "\xb9\xae\x1b\x23\x4f\xe1\x6e\x40", + }, { + .key = "\x51\xe3\x8c\xe9\x76\xcd\xff\x37" + "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe", + .klen = 16, + .iv = "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22", + .ptext = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e" + "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1", + .ctext = "\x3f\x8c\xa9\x19\xd6\xb4\xfb\xed" + "\x9c\x6d\xaa\x1b\xe1\xc1\xe6\xa8" + "\x47\x35\x7d\xa3\x96\x7d\x53\x60" + "\xa9\x33\x9c\x34\xae\x7d\x7c\x74", + .len = 32, + }, { + .key = "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d", + .klen = 16, + .iv = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4", + .ptext = "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0" + "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2" + "\x39\x56\x34\x63\x2c\xc5\x51\x13" + "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e", + .ctext = "\x28\xd8\xa7\xf8\x74\x98\x00\xfc" + "\xd6\x48\xad\xbd\xbe\x3f\x0e\x7b" + "\xa3\xec\x03\x6a\xfb\xc9\x01\x83" + "\xb3\x2f\xda\x5e\x66\xa0\xc3\xec" + "\xe9\xd4\x72\x2a\xa2\x90\x41\xcf" + "\xde\x30\x79\xc3\x82\x10\x51\xe1", + .len = 48, + }, { + .key = "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17", + .klen = 16, + .iv = "\xd9\xa0\x57\x80\xc8\x96\x70\x86" + "\x07\x2c\xf4\x61\x79\x09\x01\x8f", + .ptext = "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57" + "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40" + "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c" + "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa", + .ctext = "\x29\x31\x55\xd2\xe5\x0b\x81\x39" + "\xf9\xbc\x63\xe2\xfa\x26\x99\xde" + "\x5c\xd3\x0a\x56\xe5\xfc\x83\xdd" + "\xab\x26\x90\x7d\xa8\x0f\x01\xa6" + "\x0e\x01\xdc\x1f\xfa\xa7\xdd\x09" + "\xf9\xbf\x12\xf4\xc6\x9f\xbd\x57" + "\x23\x68\x54\x0f\xe0\xcf\x1c\x6d" + "\xe1\x5e\x0b\x4a\x1e\x71\x1d\xaa", + .len = 64, + }, { + .key = "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac", + .klen = 16, + .iv = "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44", + .ptext = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d" + "\xb2\x92\x83\x70\x1e\xa3\x97\xa6" + "\x65\x53\x39\xeb\x53\x8f\xb1\x38" + "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8" + "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8", + .ctext = "\x38\xbc\xf5\x9d\x0e\x26\xa6\x18" + "\x95\x0b\x23\x54\x09\xa1\xf9\x46" + "\x7a\x31\xa0\xd7\x4a\xec\xb3\x10" + "\x8a\x8e\x99\x78\x6c\x6e\x76\xf2" + "\x63\x8a\x3b\x90\xaa\xd5\x64\x65" + "\x5a\x52\xb0\x36\x4c\xce\xed\xc7" + "\x51\x3c\x06\xb0\xee\x54\xec\x10" + "\xc0\x5f\xfd\xa9\x44\x9a\x29\x32" + "\x19\x79\x7d\x2b\x14\x26\x96\x13" + "\x9d\xa5\x61\xbd\xb6\x72\x37\x26", + .len = 80, + }, { + .key = "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3", + .klen = 16, + .iv = "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a", + .ptext = "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41" + "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50" + "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2" + "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95" + "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda", + .ctext = "\xdf\x79\x58\x30\x6f\x47\x12\x78" + "\x04\xb2\x0b\x1a\x62\x22\xe2\x9f" + "\xfe\x90\x50\x41\x1b\x6a\x6a\x9c" + "\x4e\x77\x8f\xca\xd1\x68\x31\xcd" + "\x41\x82\xa5\x5b\xc0\x08\x2b\x37" + "\x62\xec\x95\xf1\x56\x12\x38\x66" + "\x84\x82\x72\xda\x00\x21\x96\x82" + "\x33\xd4\x99\xaa\xb9\xeb\xd5\xc3" + "\x2b\xa8\xf7\xdc\x13\x0e\x21\x9f" + "\x4b\xf9\x42\x58\xa8\x39\x10\xd5" + "\x86\xa5\xc6\x78\x3b\x34\x05\x03" + "\x54\x43\x2b\x80\xa9\x53\x4d\x0e", + .len = 96, + }, { + .key = "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1", + .klen = 16, + .iv = "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad", + .ptext = "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e" + "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb" + "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17" + "\x58\x2b\x1d\x73\x9a\x9c\x63\x18" + "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0" + "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95", + .ctext = "\xe4\x25\x0d\x22\xeb\xbe\x5e\x90" + "\x01\xe5\xae\xc9\x94\xbd\x93\x89" + "\x5e\x5a\x5a\x2f\xf6\xdf\xf8\x16" + "\xd3\xb2\xed\x29\x51\xe2\x75\xb0" + "\x1a\x48\xb5\xe6\xd3\x58\x40\xc7" + "\x6f\x6f\xcf\x57\x82\x43\x5a\x36" + "\xef\x27\xe1\x34\x85\x01\xec\x98" + "\x00\xbd\x94\x6f\x12\x39\xa8\x13" + "\xfe\x3c\x39\xc0\xc6\xe1\xcc\x05" + "\x0e\xd5\xc9\xda\xbd\xdd\xdb\xaa" + "\x5a\xaa\x8e\xe8\xa8\x0a\xc5\x18" + "\xb4\x1d\x13\x81\xc9\xc4\xaa\x61" + "\xa9\xbd\xaa\x03\x12\x93\xbb\xed" + "\x0c\x6e\xbd\x1c\x05\x16\x8a\x59", + .len = 112, + }, { + .key = "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15", + .klen = 16, + .iv = "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12", + .ptext = "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41" + "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3" + "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe" + "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c" + "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2" + "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c", + .ctext = "\xa7\x4c\x96\x55\x7c\x07\xce\xb2" + "\x6f\x63\x9f\xc6\x8b\x6f\xc6\x4a" + "\x85\xf2\x4b\xdf\x62\x0c\x6c\x8d" + "\x13\x5d\xd3\x40\x58\xa6\xf9\x03" + "\xd9\xf2\x48\x4e\x12\x64\x9a\x55" + "\xa2\xa3\xd0\x19\xe5\x5b\xaa\x62" + "\x7b\xe9\x2a\x23\xab\xb5\xa6\xcf" + "\x53\x59\x70\xc6\xb8\x92\x12\x3b" + "\x93\x68\x24\xba\x7d\xd6\xc0\x5b" + "\x06\x2e\x7f\x2e\x32\x5d\x42\x9c" + "\x13\x8e\x92\x3c\x99\x20\x32\x2b" + "\x4a\x41\xb2\x4a\x81\xe8\x6e\x7f" + "\x5b\x8e\xca\x4d\xd7\x29\x96\xde" + "\x30\x9c\xa6\x84\x90\xe7\xc2\xae" + "\xf4\x7e\x73\x32\x4c\x25\xec\xef" + "\x58\x69\x63\x3f\x4e\x71\x4b\x1c", + .len = 128, + }, { + .key = "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c", + .klen = 16, + .iv = "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33", + .ptext = "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a" + "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8" + "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e" + "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a" + "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62" + "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46", + .ctext = "\x11\x7f\xea\x49\xaf\x24\x52\xa2" + "\xde\x60\x99\x58\x23\xf9\x9e\x91" + "\x94\x52\x31\xa3\x28\x07\x14\xad" + "\x00\x24\x4a\x4a\xe7\x18\xd7\x24" + "\xcc\x8b\x66\x53\x82\x65\x31\xa5" + "\x54\x76\x59\x0b\x69\x6f\x90\x2c" + "\x8d\xa5\x2b\x61\x05\x80\xfb\xe0" + "\xf9\x6e\xaf\xb9\xc4\x15\x67\xcc" + "\x15\xce\xa0\xc0\xf2\xae\xa6\x15" + "\x24\x9a\xe5\xcb\x09\x42\xcf\x41" + "\x95\xa4\x8d\xbf\xe8\xb8\x40\xcd" + "\xb0\x33\x2c\xb3\xc4\xdd\xf9\x45" + "\xda\xb2\xeb\xb3\xf8\xfa\x7f\xe3" + "\xc0\x3a\x98\xe7\x17\x4a\x0c\x60" + "\xb2\x22\xba\x3b\x21\x85\x27\x56" + "\xe0\xb2\xf7\x2a\x59\xb1\x56\x20" + "\x0b\xa9\x13\x73\xe0\x6f\x61\x32" + "\xa5\x38\x14\xb3\xe3\xaa\x70\x44", + .len = 144, + }, { + .key = "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b", + .klen = 16, + .iv = "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12", + .ptext = "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c" + "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57" + "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97" + "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01" + "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53" + "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66", + .ctext = "\x5b\xc0\xe8\x17\xa4\xf9\xea\xce" + "\x9e\xf9\xe0\xb1\xac\x37\xe9\x41" + "\xc8\x06\xf9\x1c\x1a\xfc\xe8\x7a" + "\x38\xf2\x80\x66\xc2\x70\x59\x4e" + "\xe0\x32\x5b\x27\x39\xf5\xfb\x03" + "\xc8\xaf\xd6\x7e\x57\xc7\xc6\x71" + "\xd9\xd0\x48\x39\xb1\x0d\xa8\x1a" + "\x23\x8a\x3d\x05\xe2\x90\x7e\x18" + "\xd7\x20\x04\x3b\x82\x76\x3f\xaa" + "\xc2\x89\xb6\x9e\x14\x2f\x46\xcd" + "\x51\x9b\xa8\x7b\x62\x7b\x9c\x17" + "\xc4\xe1\x8b\x3f\xb5\x4d\xac\x66" + "\x49\xf6\xb6\x4c\x3e\x16\x46\xb0" + "\xca\x04\xef\x72\x5c\x03\x0a\xe5" + "\x2f\x4e\x36\x38\x36\x9f\xf4\xe2" + "\x81\x7a\x4c\xdf\x36\x27\xd5\x9d" + "\x03\xad\x1d\x3a\xe9\x2a\x99\xb0" + "\x2c\xba\x13\x75\xc8\x37\x97\x11" + "\xf4\x15\x0f\xb7\x75\x26\xa1\x14" + "\x79\xec\x1f\xab\xd2\x10\x8c\x5f", + .len = 160, + }, { + .key = "\x7f\x92\xd5\x06\x30\x6b\xc0\x23" + "\x87\xa8\x8e\x6d\xc7\xc5\xd7\xf1" + "\x5f\xce\x89\xb3\xd5\x7f\x7f\xf0", + .klen = 24, + .iv = "\xfd\xab\x56\xa6\x6e\xda\x7c\x57" + "\x36\x36\x89\x09\xcd\xa8\xd3\x91", + .ptext = "\x48\x3e\x3c\x11\xcf\xd0\x4f\xc0" + "\x51\xe3\x8c\xe9\x76\xcd\xff\x37", + .ctext = "\xa4\x12\x2f\xc4\xf0\x6d\xd9\x46" + "\xe4\xe6\xd1\x0b\x6d\x14\xf0\x8f", + .len = 16, + }, { + .key = "\xd6\x1a\x18\x2f\x68\x2f\xb6\xfe" + "\x3d\x2d\x85\x75\x6e\x18\x8a\x52" + "\x53\x39\xfc\xc1\xf5\xc0\x56\x22", + .klen = 24, + .iv = "\xc6\xae\xaa\x0d\x90\xf2\x38\x93" + "\xac\xd2\x3f\xc7\x74\x8d\x13\x7e", + .ptext = "\xfa\x3f\x70\x52\xfb\x04\x0e\xed" + "\x0e\x60\x75\x84\x21\xdf\x13\xa1" + "\x26\xf8\x8c\x26\x0a\x37\x51\x8f" + "\xe7\x9c\x74\x77\x7a\x3e\xbb\x5d", + .ctext = "\x80\x2b\xf0\x88\xb9\x4b\x8d\xf5" + "\xc3\x0e\x15\x5b\xea\x5d\x5b\xa8" + "\x52\xe7\x83\x3c\xa1\x51\x1c\x1f" + "\x38\xd9\x7c\x88\x3c\x3a\xcd\x3e", + .len = 32, + }, { + .key = "\xd7\x33\xf3\xa9\x5b\xb4\x86\xea" + "\xe3\x7d\x50\x62\x3b\x73\xaf\xc4" + "\xda\x89\xd9\x3c\xcc\xe4\x73\xb0", + .klen = 24, + .iv = "\xef\x3e\x5f\x46\x62\x88\xd5\x26" + "\x3b\xd3\xb5\x81\x78\x70\x1b\xd2", + .ptext = "\x39\x56\x34\x63\x2c\xc5\x51\x13" + "\x48\x29\x3a\x58\xbe\x41\xc5\x80" + "\x2c\x80\xa7\x3c\x14\xb4\x89\x5e" + "\x8e\xe5\x5f\xe2\x39\x80\xf5\x2b" + "\x77\xb5\xca\x90\xda\x1d\x22\x17" + "\xd9\xa0\x57\x80\xc8\x96\x70\x86", + .ctext = "\x65\x01\x3c\xb0\xac\x4c\x63\xb6" + "\xe7\xf1\xf4\x61\x35\xf4\x36\xde" + "\xeb\x0f\x8c\x34\xd1\x78\xb4\x00" + "\xb2\xc1\x7c\x28\xb2\xb7\xbb\xa3" + "\xc6\xb7\x27\xf7\x6d\x56\x79\xfa" + "\x61\x57\xba\x30\x6f\x56\xe9\x8c", + .len = 48, + }, { + .key = "\x07\x2c\xf4\x61\x79\x09\x01\x8f" + "\x37\x32\x98\xd4\x86\x2b\x3b\x80" + "\x07\x60\xba\xf0\x2e\xc3\x4a\x57", + .klen = 24, + .iv = "\xf5\xb5\xd7\xbf\xd2\x2a\x9b\x4a" + "\xe6\x08\xf0\xbe\x77\xd1\x62\x40", + .ptext = "\xa0\x82\x09\x60\x47\xbb\x16\x56" + "\x50\x1f\xab\x8b\x10\xfe\xf0\x5c" + "\x05\x32\x63\x1a\xc4\x46\x6f\x55" + "\x32\xde\x41\x5a\xf7\x52\xd7\xfa" + "\x30\x9d\x59\x8d\x64\x76\xad\x37" + "\xba\xbc\x46\x6a\x69\x17\x3c\xac" + "\x6f\xdd\xa2\x9b\x86\x32\x14\x2e" + "\x54\x74\x8f\x3d\xe2\xd6\x85\x44", + .ctext = "\x5a\xfb\xb1\x2c\x6e\xe5\xb8\xe0" + "\x80\xb6\x77\xa8\xfe\x10\x3a\x99" + "\xbf\xc0\x2a\xfe\x6f\x38\xf2\x1d" + "\x53\x6c\x05\x83\xb1\x13\x00\x87" + "\x92\x92\x42\x70\xcf\x9f\xf7\x8f" + "\x53\x55\x18\x6f\x35\x68\x35\x50" + "\x3a\xc8\x45\x3e\xa3\xf1\x33\x2e" + "\xa1\x65\x42\xe2\x6d\x31\x8c\x4b", + .len = 64, + }, { + .key = "\x4f\x4a\x31\x64\xc6\xa5\x29\xaa" + "\xad\xfd\x32\x94\x1f\x56\x57\xd1" + "\x9d\x7e\x3d\x49\x00\x36\xb1\x5d", + .klen = 24, + .iv = "\xb2\x92\x83\x70\x1e\xa3\x97\xa6" + "\x65\x53\x39\xeb\x53\x8f\xb1\x38", + .ptext = "\x91\xac\x17\x11\x1c\x03\x69\x53" + "\xf5\xdf\xdb\x2c\x1b\x9a\x6e\x6b" + "\xb6\x02\xc4\xfa\x95\x01\x33\xa8" + "\xda\x7e\x18\x2c\xf4\x7e\x6e\x67" + "\xce\x8f\x9f\xea\x46\x66\x99\xb8" + "\xe1\xc7\x25\x4d\xbd\xa5\x74\xdf" + "\xc7\x8b\xfb\xe3\x2d\x3a\x82\xd3" + "\x17\x94\x77\x2f\x92\xb8\x87\xc2" + "\xcc\x6f\x70\x26\x87\xc7\x10\x8a" + "\xc8\xfd\xc2\xb3\xcf\xa0\xeb\x41", + .ctext = "\xc9\x5f\xe0\x60\x61\x38\x7e\x79" + "\x52\x68\x64\x8f\x55\x9b\x6b\x72" + "\xa5\x17\x61\xb7\xce\x02\xa9\xa4" + "\x5c\x73\x45\x33\xd1\x07\x5e\xdc" + "\xe5\xbe\xa7\xde\x69\xa0\x97\x98" + "\x02\xef\xa4\x67\x51\x60\x69\x4f" + "\x03\xf5\xa8\x5f\x03\x69\xbc\xc2" + "\x34\x59\x7e\xd4\xd2\xb3\x32\x2f" + "\x0c\xb4\x37\xca\xc4\xc7\x93\xf4" + "\xa4\xab\x01\x3f\x91\x29\x55\x98", + .len = 80, + }, { + .key = "\x4c\xf4\xd0\x34\xd0\x95\xab\xae" + "\x82\x5c\xfd\xfa\x13\x86\x25\xce" + "\xf4\x13\x32\xcd\xc6\x6d\xf6\x50", + .klen = 24, + .iv = "\x12\x4a\x5b\x66\x3a\xd3\xfb\x1a" + "\xaf\x06\xea\xf4\x65\x59\xd6\xc2", + .ptext = "\x84\xa0\x53\x97\x61\x30\x70\x15" + "\xac\x45\x8e\xe8\xeb\xa1\x72\x93" + "\x26\x76\x98\x6f\xe4\x86\xca\xf0" + "\x57\x89\xf2\x2b\xd4\xcf\x2d\x95" + "\x86\x26\x20\x0e\x62\xfe\x8f\x1e" + "\x5d\xcb\x2b\x7e\xdd\xab\xac\xda" + "\x6e\x49\x20\xd5\xb7\x01\x83\x4e" + "\xac\x45\x8f\xe1\x05\x3f\xd5\xb1" + "\xee\xb7\x0d\x65\x00\x38\xab\x71" + "\x70\x6e\xb3\x97\x86\xd3\xcd\xad" + "\x51\x8b\x9c\xa0\x9a\x8b\x4c\xb9" + "\x16\x01\x6a\x1f\xdf\xf0\xf9\x9e", + .ctext = "\x03\x2c\x39\x24\x99\xb5\xf6\x79" + "\x91\x89\xb7\xf8\x89\x68\x37\x9d" + "\xa2\x80\x95\x74\x87\x64\xb9\xeb" + "\x85\x28\x92\x9a\x6e\xd3\x3b\x50" + "\x4c\x80\x5b\xe4\xf2\x7e\xda\x2a" + "\xd4\xf8\xcb\xe3\x6f\xdf\xae\x0e" + "\xc5\x6c\x0b\x49\x2e\x29\x1c\xf2" + "\x3f\x44\x44\x12\x67\xa6\xff\x44" + "\xe0\xec\xd8\xf7\x32\xde\x21\x15" + "\xab\x8f\x98\x4d\xed\xb0\x42\xfd" + "\x83\x94\xe2\xcc\x69\x6d\xe8\xdb" + "\x62\x93\x1f\xd0\xf4\x8c\x62\xc0", + .len = 96, + }, { + .key = "\x25\x1b\xc2\xa6\x21\x25\xeb\x97" + "\x4b\xf6\xcb\x3b\xcd\x61\xfd\x94" + "\x37\x03\xb3\xd9\x74\x6e\x4d\xbb", + .klen = 24, + .iv = "\xfd\x87\x2b\xec\x4c\x2c\xbf\xe2" + "\x94\x1a\xe6\xd9\xaf\x0e\x78\x17", + .ptext = "\x58\x2b\x1d\x73\x9a\x9c\x63\x18" + "\x88\x7a\x0e\x87\x2f\xf0\xb0\xdb" + "\xc9\x9d\x79\x51\x34\x39\x4f\x07" + "\xa2\x7c\x21\x04\x91\x3b\x79\x79" + "\xfe\xd5\x51\x46\xd5\xcd\x28\xc0" + "\xad\xb8\x55\xb2\xb2\x5a\x9a\xa2" + "\xe2\x0c\xfc\x55\x7d\x60\xd2\x95" + "\xb6\x08\x1d\x31\xaf\xf4\x17\x46" + "\xa4\xbb\x0f\xbd\x67\x3c\x73\x15" + "\x0c\x85\x2f\x62\xe5\xf4\x35\x96" + "\xb1\x9b\x5d\x00\x10\xe9\x70\x12" + "\x3a\x87\x7f\x67\xf1\x81\x7a\x05" + "\xb4\xa6\xfe\xdf\x36\x31\x6d\x9e" + "\x0e\xa9\x44\xa0\xb0\x05\xa9\x41", + .ctext = "\xd4\x9a\x04\x54\x05\xd2\xe6\x3f" + "\xb0\xa4\x36\x5e\x1e\x9c\x35\xb0" + "\xc0\x89\xbd\x1c\xaa\x45\xa6\xc8" + "\x16\x68\x4a\x06\x93\x67\x88\xd7" + "\x72\x6e\x48\x0a\x17\xa3\x52\x8b" + "\x96\x5f\x41\xf6\x17\x64\x55\x8b" + "\xac\xce\xf6\x8c\xce\xd2\xd4\xd4" + "\x8d\x92\x32\xe0\x0d\xb4\xf7\x4a" + "\x90\xaf\x7b\x85\x21\x46\x2e\xa6" + "\x9e\xac\x0d\x22\xf2\x26\xf6\xd3" + "\x27\xcd\x59\xa0\xe2\xbb\x22\xcd" + "\x35\xb6\x28\x45\x0a\x46\xb0\x3a" + "\xac\x3e\xd3\x5b\xc6\x54\xa2\xa3" + "\x6d\xbb\xb3\xcd\xc5\x64\x62\x92", + .len = 112, + }, { + .key = "\x9c\x14\x44\x5a\xd5\x1c\x50\x08" + "\x95\xc2\xf2\xaf\x3f\x29\xc9\x3e" + "\x95\x5e\xc6\xb4\x2b\xf4\x3e\xe3", + .klen = 24, + .iv = "\x1b\xeb\x3d\x73\xfb\xd7\x1e\x2b" + "\x0c\x3d\x58\x6c\xb4\x41\x9b\xfe", + .ptext = "\x2f\x7e\x1c\x10\x81\x36\x2d\x79" + "\xaf\xab\x10\x44\x2e\xcc\x0d\x6c" + "\x9c\x14\xc2\xe4\xae\xb0\xbb\xda" + "\x6a\xe0\x42\x3d\x96\x9f\x78\x7d" + "\x70\x86\xa5\x92\x9f\xee\xcd\x3f" + "\x6a\x55\x84\x98\x28\x03\x02\xc2" + "\xf7\xec\x7a\xfa\xb1\xd9\xa8\xd8" + "\x1c\xc3\xaa\xd5\x61\x7f\x10\x0c" + "\xc0\xa1\x36\x3d\x81\x9a\xd2\x17" + "\x2e\x23\xc9\xb7\xff\xdf\x47\x6c" + "\x96\x3b\x0e\xbd\xec\x9a\x0e\xad" + "\x8c\xaf\x36\x3d\xff\x29\x8b\x33" + "\x87\x96\x77\x1a\x10\x81\x63\x8a" + "\x63\xde\x88\xa9\x9d\xa9\x01\xf2" + "\xdf\xc9\x25\x35\x48\x3a\x15\xdf" + "\x20\x6b\x91\x7c\x56\xe5\x10\x7a", + .ctext = "\xbc\x57\x2a\x88\x0a\xd0\x06\x4f" + "\xdb\x7b\x03\x9f\x97\x1a\x20\xfe" + "\x15\x91\xb4\xed\x5d\x78\x89\x2a" + "\x67\x6b\x9c\x47\x36\xc2\x80\x0e" + "\x03\x8d\x6f\xfc\x94\xc7\xc5\xc2" + "\xeb\x43\x74\x5d\xfe\xc4\x5a\xa1" + "\x80\x51\x8a\x63\xd1\x27\x1b\x0a" + "\x88\x2c\xc4\x7f\x1a\xa3\x28\xe5" + "\xfd\xd0\x8a\xd4\x36\xa6\x19\xd5" + "\xff\x41\x7a\x8b\x6e\x9a\x97\x14" + "\x2a\xc8\xd0\xb8\xa3\x8e\x64\x32" + "\xb7\x2d\x76\x9b\x3b\xe2\x3f\x91" + "\xb4\x64\xbf\x59\x67\x14\xc3\xf5" + "\xa8\x92\x4b\x85\xdf\x80\xcb\xb5" + "\xc7\x80\xf9\x4a\xbc\xed\x67\x5a" + "\x0b\x58\x65\x1f\xc9\x6e\x9b\x0a", + .len = 128, + }, { + .key = "\x2d\x2e\x0f\x30\x32\xed\xa9\x1f" + "\x71\x4e\x68\x77\xe8\xa8\x5b\xdd" + "\x3c\x5e\x68\x6b\xab\x03\xe4\xf8", + .klen = 24, + .iv = "\x42\xc1\x61\x9a\x50\xfb\xc7\x6a" + "\x1a\x31\xa7\x87\xd0\x24\xcb\x5e", + .ptext = "\xc0\x3b\x12\x28\xca\x26\x7b\xb3" + "\x14\xc1\x7f\x66\xff\x3b\xa4\x80" + "\x59\x77\x4f\xa0\xd4\xb2\xd9\x8a" + "\xb6\x67\xe6\x28\xd3\x6f\xf2\xcf" + "\xb8\x6d\x2d\xc4\x2a\x69\x89\xff" + "\xcf\xbb\x11\x2e\x2a\x2b\x7c\xfd" + "\xcd\x56\x02\x95\xc9\x54\x6e\x62" + "\x6a\x97\x75\x1a\x21\x16\x46\xfb" + "\xc2\xab\x62\x54\xef\xba\xae\x46" + "\xd4\x14\xc6\xcc\x16\x1b\x95\xf9" + "\x05\x26\x23\x81\x19\x27\xad\x7b" + "\x9c\x8b\xfb\x65\xa4\x61\xee\x69" + "\x44\xbf\x59\xde\x03\x61\x11\x12" + "\x8d\x94\x48\x47\xa9\x52\x16\xfb" + "\x6b\xaf\x59\x6d\xab\x74\xbf\x5c" + "\xb6\x09\x21\x12\x42\x98\x13\xa1" + "\xa8\x6f\xb9\x6d\x4d\xa6\xdc\xea" + "\x61\x02\x3c\xa7\xcd\x1a\x28\x8c", + .ctext = "\xd7\xb4\xfc\xcc\x1f\xf7\xfc\x7d" + "\x69\xfa\xcb\x01\x60\xf3\x5a\x14" + "\xfe\x8c\x4e\xfa\x09\xb5\x0d\xda" + "\xff\xdd\xba\xdf\xa3\x6b\x3a\x87" + "\x21\xbb\xf8\x62\x14\x22\xdd\x9b" + "\x92\x23\xaa\xd7\xcc\xb2\x15\xd0" + "\xbd\x81\x95\x24\xc2\xc6\x53\x5b" + "\xf7\x3c\xa0\xf7\x36\xbc\xbf\xf3" + "\xfc\x1c\x6e\xe0\x71\x8d\xa1\x3d" + "\x8e\x1a\xc5\xba\xd5\x68\xd4\x7a" + "\xe0\x4f\x0a\x14\x89\x0b\xa6\x2f" + "\x18\xc5\x38\x76\xf1\xe7\x5c\xae" + "\x7a\xbb\x27\x1c\xf0\x7c\x6c\x14" + "\x07\xb7\x49\x6e\x29\x04\x38\x31" + "\x91\xe8\x1d\x0f\xfc\x3b\xb8\x20" + "\x58\x64\x11\xa1\xf5\xba\xa3\x62" + "\x92\xcf\x44\x63\x2c\xe8\x10\xb5" + "\xf0\x97\x86\xcb\x5f\xc1\x80\x7a", + .len = 144, + }, { + .key = "\x66\xb8\x4d\x60\x67\x82\xcc\x8d" + "\x1e\xda\x8f\x28\xe5\x02\xdc\x2c" + "\x54\x84\x2a\x06\xb5\xd1\x34\x57", + .klen = 24, + .iv = "\xb8\x28\x4d\xf5\x69\xb9\xf3\x33" + "\x5e\x0b\xa6\x62\x35\x9b\xfb\x97", + .ptext = "\x3e\xc6\xec\xaf\x74\xe8\x72\x91" + "\xb2\xc6\x56\xb3\x23\x29\x43\xe0" + "\xfb\xcc\x21\x38\x64\x78\x9e\x78" + "\xbb\x6e\x0d\x7b\xfd\x05\x74\x01" + "\x7c\x94\xe0\xb0\xd7\x92\xfc\x58" + "\x28\xfc\xe2\x7b\x7f\xf7\x31\x0d" + "\x90\xb7\x60\x78\xa8\x9f\x52\xe3" + "\xe6\xaa\x2a\xb4\xa7\x09\x60\x53" + "\x42\x0e\x15\x31\xf6\x48\xa3\x0a" + "\x20\xf0\x79\x67\xb1\x83\x26\x66" + "\xe0\xb1\xb3\xbd\x1c\x76\x36\xfd" + "\x45\x87\xa4\x14\x1b\xef\xe7\x16" + "\xf7\xfa\x30\x3d\xb9\x52\x8f\x2e" + "\x01\x68\xc1\x7d\xa2\x15\x49\x74" + "\x53\x82\xc2\x10\xa8\x45\x73\x4d" + "\x41\xcc\x24\xa3\x42\xff\x30\xd1" + "\x02\x21\xdc\xd9\x08\xf7\xe7\x4c" + "\x33\x2d\x62\xc7\x38\xf5\xc2\xbe" + "\x52\xf1\x34\x78\x34\x53\x30\x5b" + "\x43\x43\x51\x6a\x02\x81\x64\x0c", + .ctext = "\x71\xf6\x96\x02\x07\x71\x1a\x08" + "\x7c\xfe\x33\xc4\xc9\xbe\xe2\xed" + "\xd0\xcc\x5d\x27\x75\xb4\x5d\x8d" + "\x24\x03\xe4\x96\x31\x94\x0e\x38" + "\x14\x4f\xad\x16\x58\x0d\x73\xdc" + "\xbe\x5b\xcb\x38\xeb\x4d\xbc\x9a" + "\x44\x69\x7a\x12\x91\x14\x52\xfa" + "\xd2\xa2\xc5\x66\xd7\xaf\x4d\xb9" + "\xb1\x58\x24\x10\xde\x6a\xee\x7e" + "\x45\xf3\x76\xea\x47\x8a\xe6\x96" + "\x41\xf2\x96\x2d\x3c\xec\xcf\xc6" + "\x1d\xf4\x26\xc0\xea\x90\x27\x6e" + "\x87\xef\xb5\x39\x38\xdb\xad\xbf" + "\x57\x9a\x1d\xbc\x1d\xe5\x16\x91" + "\x41\x45\xbe\x67\x6c\x42\x0f\xad" + "\xcf\xfb\xcd\xf1\x4c\xd8\x73\xe7" + "\x24\x3b\xd7\x03\xeb\xd1\xb1\x1b" + "\x7d\xc9\x3d\x34\xd7\xb8\x69\x03" + "\x76\x95\x32\x26\xed\x88\x76\x89" + "\x13\xc6\xc8\xa6\x60\xf9\x73\x4d", + .len = 160, + }, { + .key = "\x82\x8e\x9e\x06\x7b\xc2\xe9\xb3" + "\x06\xa3\xfa\x99\x42\x67\x87\xac" + "\x21\xc7\xb0\x98\x6c\xf8\x26\x57" + "\x08\xdd\x92\x02\x77\x7b\x35\xe7", + .klen = 32, + .iv = "\xa1\xad\xcb\xdd\xd5\x19\xb6\xd4" + "\x0b\x62\x58\xb0\x6c\xa0\xc1\x58", + .ptext = "\x14\x0d\x8a\x09\x16\x00\x00\xf1" + "\xc0\x20\x86\xf9\x21\xd1\x34\xe2", + .ctext = "\x05\xe3\x34\xaf\x6c\x83\x14\x8b" + "\x9d\x1c\xd6\x87\x74\x91\xdf\x17", + .len = 16, + }, { + .key = "\xc9\xf3\xc4\x93\xd0\xcc\xaf\xb1" + "\x1a\x42\x93\x71\xd8\x4e\xd8\xaa" + "\x52\xad\x93\x2f\xe5\xd9\xaa\x5b" + "\x47\x37\x3a\xed\x13\x92\x35\x16", + .klen = 32, + .iv = "\x81\xc8\x50\xd1\x74\xc3\x1c\x73" + "\xbb\xab\x72\x83\x90\x5a\x15\xcb", + .ptext = "\x65\x11\x93\xaf\xe1\x69\x6c\xbe" + "\x25\x8c\x76\x87\x53\xa4\x80\xae" + "\x51\x94\x36\x3f\xca\xe7\x45\x41" + "\x76\x05\xbf\x8f\x9c\xad\xc0\xe3", + .ctext = "\x6B\x00\x6E\x49\x7A\x6D\xE3\x04" + "\x4E\xF7\x9F\x8A\x1F\x14\xBD\xB1" + "\xD3\x5D\xA4\x30\x26\x85\x85\xEF" + "\x12\xBC\xC7\xA1\x65\x82\xA7\x74", + .len = 32, + }, { + .key = "\xd5\x9f\x52\x34\x12\x99\x8e\x42" + "\xe0\x85\x04\x6f\xeb\xf1\x5d\xd0" + "\xc1\xbf\x3f\x84\xd9\x1e\x71\x44" + "\xd4\xb9\x40\x3c\x02\x2e\x21\x19", + .klen = 32, + .iv = "\x28\xc1\x97\x64\x81\x52\x57\x0e" + "\x02\x8c\xab\x4c\xe2\x60\x14\xa5", + .ptext = "\x5a\xb1\x33\x48\xaa\x51\xe9\xa4" + "\x5c\x2d\xbe\x33\xcc\xc4\x7f\x96" + "\xe8\xde\x2b\xe7\x35\x7a\x11\x4b" + "\x13\x08\x32\xc6\x41\xd8\xec\x54" + "\xa3\xd3\xda\x35\x43\x69\xf6\x88" + "\x97\xca\x00\x1b\x02\x59\x24\x82", + .ctext = "\x03\xaf\x76\xbd\x5e\x5b\xca\xc0" + "\xae\x44\xa2\x2f\xc2\x76\x2f\x50" + "\x6a\x73\x28\xf2\xba\xe8\xb2\xb8" + "\x43\x61\x41\x92\xff\xac\xcb\xa6" + "\x84\x31\xe3\x34\xd0\x37\x81\xab" + "\x2b\x0e\x97\x3c\x4a\x2d\xa4\x83", + .len = 48, + }, { + .key = "\x9c\x5d\xd7\x66\x36\xfa\x02\x20" + "\x99\x61\x62\x86\x0f\x43\x2e\x05" + "\x25\x8b\xfb\xf1\xae\x4c\xde\x18" + "\x0b\xf8\xd0\x9d\xaa\xd4\x56\x04", + .klen = 32, + .iv = "\xcd\xa8\x61\x89\x8d\xbb\x72\xb6" + "\x1e\xfe\x03\x34\x54\x88\x23\xe2", + .ptext = "\x66\x42\x60\x24\xf3\xe4\xe9\x7e" + "\x42\x20\xf4\x61\xce\x1c\x5e\x44" + "\x02\x26\x91\xf7\x41\xa4\xab\x34" + "\x29\x49\xdd\x78\x19\x8f\x10\x10" + "\xf0\x61\xcf\x77\x18\x17\x61\xdf" + "\xc4\xa8\x35\x0e\x75\x1b\x84\x6b" + "\xc3\x3f\x31\x59\x5a\x9c\xf4\xc3" + "\x43\xa9\xb7\xf8\x65\x40\x40\xba", + .ctext = "\xb6\x41\x55\x8f\xeb\x16\x1e\x4c" + "\x81\xa0\x85\x6c\xf0\x07\xa5\x2a" + "\x12\x0f\x1d\xb2\xaa\xba\x85\x0f" + "\xa6\x27\x1a\x91\xa6\xc5\x8c\x2a" + "\xde\x8d\x3a\xa9\x8b\xcf\x24\xf1" + "\x82\x51\x6b\xc8\x01\xd7\x7b\x89" + "\x6c\xfc\xb1\x96\x6c\xa2\xd7\x1f" + "\x4b\x7a\xd9\x8d\x34\xaa\xa0\x8a", + .len = 64, + }, { + .key = "\x4b\x4e\x11\x91\x27\xcf\x8c\x66" + "\x17\xfa\x5b\x4c\xa8\xb8\x0f\xa1" + "\x99\x5b\x07\x56\xe1\x8d\x94\x8b" + "\xf2\x86\x5a\x5f\x40\x83\xfa\x06", + .klen = 32, + .iv = "\xfd\x73\xee\x1c\x27\xf3\xb4\x38" + "\xc5\x7c\x2e\xc5\x6e\xdb\x49\x0d", + .ptext = "\x0a\xe2\xdd\x97\xdd\x5e\xd4\xb3" + "\xc1\x49\x8f\x53\xb2\x40\x85\x1c" + "\x90\x37\x2d\xbd\x21\x6b\x1f\x80" + "\x56\x98\x76\x1e\xcf\x6c\x78\xd8" + "\xa0\x3c\x79\xc3\x56\xf7\xfc\x64" + "\x35\x58\x1c\x7c\xc4\x5f\x2a\x25" + "\x8c\x01\x98\x1e\x1c\x1f\x15\x64" + "\x50\xb5\xfa\x02\xd3\x54\xe5\x29" + "\xe3\xd2\xa3\x83\x54\x40\x54\xc5" + "\xd8\x1c\xc9\x84\x7d\xc8\x31\x49", + .ctext = "\x53\x2a\xa8\xa0\x15\xaf\x2f\xc4" + "\x7d\x31\xb4\x61\x80\x5f\xd1\xb6" + "\xa4\x29\x40\x72\x1b\xb2\x96\xb7" + "\x4d\x5e\x5b\x53\x44\xa4\xf1\xe9" + "\xf0\x27\x2f\x26\x84\x66\x13\xa4" + "\xb2\x19\x55\xb1\x18\xf3\x69\xfd" + "\xb0\x2f\x08\x3f\xa5\x41\xe2\x34" + "\x5e\x63\x57\x0e\xef\x17\x78\xbc" + "\xc3\x65\x7c\xbe\x6b\xa3\xa3\xef" + "\x58\x05\x30\x5a\x08\xbd\xf7\x0e", + .len = 80, + }, { + .key = "\x77\x3b\xf5\xe7\x20\xf7\xe0\x0c" + "\x3d\x3a\x83\x17\x83\x79\xd8\x29" + "\x5a\x0a\x25\x7f\xe0\x21\x23\xff" + "\x31\xfd\x60\x10\xe6\x63\xe2\xaf", + .klen = 32, + .iv = "\xdb\x4c\x0d\xc0\x36\xdb\xc7\xa1" + "\xa4\x91\xd9\x05\xe6\xc4\x98\x00", + .ptext = "\x8d\x4d\xc6\x5e\x01\x82\xb3\x39" + "\xc8\x64\xa7\xcb\x05\x19\x84\x80" + "\x3f\x9c\xa8\x4f\x64\xb3\x11\x4b" + "\x0e\x21\xc4\x75\x04\x1d\x6f\xd5" + "\x04\x04\x4d\xc9\xc0\x4b\x4a\x9c" + "\x26\xb7\x68\x5a\xe4\xd0\x61\xe3" + "\x2c\x93\x8e\x3f\xb4\x67\x07\x31" + "\x02\x52\x0c\x0f\xe6\x6d\xa3\xd0" + "\x48\x95\x83\x67\x23\x64\x31\x50" + "\xd2\x5f\x69\x68\x8b\x71\xbf\x01" + "\x29\x99\x86\x36\x2e\xdf\xf1\x7c" + "\x08\x8c\x78\x7a\x93\x9a\x7d\x1b", + .ctext = "\x92\x90\x48\x2f\x3a\x6b\x68\x43" + "\x28\x9b\x7d\x1e\x46\x28\xd8\x58" + "\x0f\x47\x8b\xb5\x83\x35\x35\x3e" + "\xdf\x59\x3d\xb3\x47\xfc\xfc\x52" + "\x86\xeb\xb3\x58\x54\xd5\x0a\xb4" + "\xad\xbd\x5c\x09\xfc\x08\xc2\x01" + "\x5e\x9b\x30\x11\xc4\x40\x2e\x32" + "\x9c\xa0\xf1\xfd\xae\xd4\x75\x5e" + "\x52\xd9\x19\x4d\xc1\xd4\xb6\x19" + "\x88\xfb\x29\x17\x15\xbb\x60\xd6" + "\x5a\xe9\x82\x89\xaf\x30\x4e\xd4" + "\x47\xde\x86\x88\x95\x4c\x13\x59", + .len = 96, + }, { + .key = "\xe0\x6a\x30\xe1\x35\xb5\xb0\x7c" + "\x54\xc5\x73\x9b\x00\xe5\xe7\x02" + "\xbe\x16\x59\xdc\xd9\x03\x17\x53" + "\xa8\x37\xd1\x5f\x13\x8e\x45\xdb", + .klen = 32, + .iv = "\x54\xe9\x1c\xde\xfb\x26\x0e\x48" + "\x35\x50\x4d\x9b\x4d\x12\x21\x0d", + .ptext = "\x73\x72\xcf\xdb\xbd\xbc\xc0\xdf" + "\x6b\xbb\xdf\x65\x6f\x2f\x43\x3b" + "\x2d\x7c\x0e\x07\x7f\xa0\x95\xdd" + "\xfc\x67\xc1\x11\x7a\xe2\xb5\x4a" + "\xd1\x15\xb0\xd8\xe2\xf0\x35\x48" + "\xd8\x81\x6a\x35\xae\x67\xbf\x61" + "\xf2\x8a\xcf\x04\xc8\x09\x8b\x63" + "\x31\x74\x95\xa5\x8d\x3c\xea\xe2" + "\x5f\x67\xc4\x7e\x51\x88\xbf\xb5" + "\x78\xef\x3a\x76\xd8\x1d\x00\x75" + "\x2b\x7b\x28\x7c\xde\x4b\x39\x01" + "\x5d\xde\x92\xfe\x90\x07\x09\xfd" + "\xa5\xd1\xd3\x72\x11\x6d\xa4\x4e" + "\xd1\x6e\x16\xd1\xf6\x39\x4f\xa0", + .ctext = "\x3b\xc5\xee\xfc\x05\xaf\xa6\xb7" + "\xfe\x12\x24\x79\x31\xad\x32\xb5" + "\x64\x5a\x17\xc9\xbf\x1f\xdc\xce" + "\x8d\x73\x00\x71\xd9\xfb\xd2\xe6" + "\xc3\x54\xb4\xf3\x36\xe8\x89\x12" + "\x5a\x32\x0b\xa6\xec\x5f\x89\xe7" + "\xe8\x34\x92\xa6\xce\xde\x8f\xf9" + "\x4f\xda\xed\x61\x8e\xb2\x81\xbe" + "\xf2\x15\x85\xbe\xa1\x5f\x19\x85" + "\x71\x7e\xda\x46\x59\xed\x5d\xb0" + "\xd9\x68\x97\xe0\xcd\x1d\x1b\x65" + "\xf5\xc9\x44\xe2\xb4\x42\x17\x7c" + "\xe7\x58\xf3\x2f\xcf\xbe\x5c\x66" + "\xaa\xd3\x61\xa5\x9a\x79\xbb\xa0", + .len = 112, + }, { + .key = "\x60\xb6\xde\x17\xca\x4c\xe7\xe0" + "\x07\x0d\x80\xc5\x8a\x2d\x5a\xc2" + "\x2c\xb9\xa4\x5f\x2a\x85\x2c\x3d" + "\x6d\x67\xc8\xee\x0f\xa2\xf4\x09", + .klen = 32, + .iv = "\x1a\xa5\xbc\x7e\x93\xf6\xdd\x28" + "\xb7\x69\x27\xa1\x84\x95\x25\x5a", + .ptext = "\x7b\x88\x00\xeb\xa5\xba\xa1\xa7" + "\xd4\x40\x16\x74\x2b\x42\x37\xda" + "\xe0\xaf\x89\x59\x41\x2f\x62\x00" + "\xf5\x5a\x4e\x3b\x85\x27\xb2\xed" + "\x1b\xa7\xaf\xbe\x89\xf3\x49\xb7" + "\x8c\x63\xc9\x0c\x52\x00\x5f\x38" + "\x3b\x3c\x0c\x4f\xdd\xe1\xbf\x90" + "\x4a\x48\xbf\x3a\x95\xcb\x48\xa2" + "\x92\x7c\x79\x81\xde\x18\x6e\x92" + "\x1f\x36\xa9\x5d\x8d\xc4\xb6\x4d" + "\xb2\xb4\x0e\x09\x6d\xf3\x3d\x01" + "\x3d\x9b\x40\x47\xbc\x69\x31\xa1" + "\x6a\x71\x26\xdc\xac\x10\x56\x63" + "\x15\x23\x7d\x10\xe3\x76\x82\x41" + "\xcd\x80\x57\x2f\xfc\x4d\x22\x7b" + "\x57\xbb\x9a\x0a\x03\xe9\xb3\x13", + .ctext = "\x37\x0d\x47\x21\xbc\x28\x0b\xf7" + "\x85\x5f\x60\x57\xf2\x7f\x92\x20" + "\x53\x1a\xbf\xd1\x7f\x8c\x39\x29" + "\x0e\x18\xab\x0c\x00\x92\xd3\x68" + "\x60\x56\x3b\x00\xef\xf8\x02\xfa" + "\xcb\x92\x1a\x91\xe1\xf0\x4f\x8a" + "\xc6\x4f\x65\x16\x71\x8b\x5d\xd5" + "\x79\xa9\x6d\x68\x1b\x59\xe7\x2a" + "\x1c\xd0\x5d\xfb\x06\x3b\x15\x72" + "\xa8\xd1\x59\x9a\xb2\x6c\xf2\xd5" + "\x19\xef\xde\x03\x4c\x75\x65\x38" + "\x5b\xda\xc9\xf0\x44\x99\xb2\x6e" + "\x78\xfb\x85\x5a\x92\x91\x1a\x0a" + "\x13\x0c\x1b\x1c\xbe\xbe\x46\x6e" + "\x73\xff\xc2\x6e\xb9\x06\x16\x7e" + "\xf6\xc0\x01\x30\x34\x56\x46\x55", + .len = 128, + }, { + .key = "\x2a\xed\x7d\x76\xfc\xc5\x49\x50" + "\xf4\x90\x0f\xcc\x5d\xff\x0c\x3c" + "\x14\x06\xaf\x68\x8f\xd7\xb6\x25" + "\x1e\x10\x95\x2a\x71\x33\x17\x20", + .klen = 32, + .iv = "\x5b\x58\x47\xf8\xd5\x1e\x91\x81" + "\x46\xe7\x25\x3a\x02\x45\x9c\x65", + .ptext = "\x10\xaf\xde\x5c\x30\x79\x43\x28" + "\x1c\x03\xf8\x50\x0f\x30\xa5\xef" + "\x84\x19\x4c\x09\x40\x03\x75\x1f" + "\x92\x8f\x88\x01\xda\x31\x7a\xe4" + "\x48\xe3\xab\xb4\xe6\x1b\x0f\xac" + "\xd9\xfa\x8d\x23\xe4\xc6\xa4\xa9" + "\x2d\x9a\x54\x52\x44\x5c\x3c\x52" + "\x61\xf0\x00\xca\xed\xab\xed\xe2" + "\x44\x0b\xe0\x18\xba\xa5\x63\xd8" + "\xdc\x5e\x1a\x4c\xf8\xde\x5e\x75" + "\xdf\x42\x27\x7b\xe9\x11\x2f\x41" + "\x3a\x72\x54\x3d\x44\x9c\x3e\x87" + "\x8d\x8d\x43\x2f\xb2\xff\x87\xd4" + "\xad\x98\x68\x72\x53\x61\x19\x7c" + "\x20\x79\x8c\x2b\x37\x0b\x96\x15" + "\xa5\x7d\x4e\x01\xe6\xea\xb6\xfa" + "\xaa\xd3\x9d\xa2\xd9\x11\xc3\xc9" + "\xd4\x0e\x3f\x3e\xfe\x35\x1e\xe5", + .ctext = "\xb0\x2b\x75\x5f\x33\x1b\x05\x49" + "\x06\xf1\x43\x91\xc2\x85\xfa\xac" + "\x74\xd5\x8c\xc9\x47\x6e\x5a\xf6" + "\x69\x33\x4c\xcb\x2f\x36\x4b\x41" + "\xec\x05\x69\xab\x7f\x42\xc9\xd2" + "\x26\x64\x51\x9e\x3d\x65\x35\xf0" + "\x8d\x5e\x8a\xb1\xee\xdf\x1a\x98" + "\x36\xd2\x37\x49\x5b\xe2\x57\x00" + "\x1d\x72\x7e\xe8\x38\x11\x83\x15" + "\xc7\x4e\x65\xa4\x2c\x9e\x6a\x3e" + "\xb4\x78\x3f\xe9\x91\x5d\x06\xa9" + "\xf1\xfc\x6b\x08\xe5\x2b\x2a\x99" + "\x65\xa7\x2e\x47\xf9\xc2\xb1\x8b" + "\x88\x2f\xb7\x62\x84\x63\x94\x00" + "\x49\xa7\xd0\x2b\x54\x7a\x69\xb3" + "\x04\x66\xfc\x97\x40\x92\xd1\xb8" + "\xb4\x2a\x9e\xdb\x31\xcd\x48\x84" + "\x29\x3b\x02\xac\xb8\x54\x95\xb4", + .len = 144, + }, { + .key = "\x7b\xa7\x4d\x0a\x37\x30\xb9\xf5" + "\x2a\x79\xb4\xbf\xdb\x7f\x9b\x64" + "\x23\x43\xb5\x18\x34\xc4\x5f\xdf" + "\xd9\x2a\x66\x58\x00\x44\xb5\xd9", + .klen = 32, + .iv = "\x75\x34\x30\xc1\xf0\x69\xdf\x0a" + "\x52\xce\x4f\x1e\x2c\x41\x35\xec", + .ptext = "\x81\x47\x55\x3a\xcd\xfe\xa2\x3d" + "\x45\x53\xa7\x67\x61\x74\x25\x80" + "\x98\x89\xfe\xf8\x6a\x9f\x51\x7c" + "\xa4\xe4\xe7\xc7\xe0\x1a\xce\xbb" + "\x4b\x46\x43\xb0\xab\xa8\xd6\x0c" + "\xa0\xf0\xc8\x13\x29\xaf\xb8\x01" + "\x6b\x0c\x7e\x56\xae\xb8\x58\x72" + "\xa9\x24\x44\x61\xff\xf1\xac\xf8" + "\x09\xa8\x48\x21\xd6\xab\x41\x73" + "\x70\x6b\x92\x06\x61\xdc\xb4\x85" + "\x76\x26\x7a\x84\xc3\x9e\x3a\x14" + "\xe7\xf4\x2d\x95\x92\xad\x18\xcc" + "\x44\xd4\x2c\x36\x57\xed\x2b\x9b" + "\x3f\x2b\xcd\xe5\x11\xe3\x62\x33" + "\x42\x3f\xb8\x2a\xb1\x37\x3f\x8b" + "\xe8\xbd\x6b\x0b\x9f\x38\x5a\x5f" + "\x82\x34\xb7\x96\x35\x58\xde\xab" + "\x94\x98\x41\x5b\x3f\xac\x0a\x34" + "\x56\xc0\x02\xef\x81\x6d\xb1\xff" + "\x34\xe8\xc7\x6a\x31\x79\xba\xd8", + .ctext = "\x4e\x00\x7c\x52\x45\x76\xf9\x3d" + "\x1a\xd1\x72\xbc\xb9\x0f\xa9\xfb" + "\x0a\xf5\xe8\x11\x66\x8b\xad\x68" + "\x5a\x2e\xbf\x09\x33\x9d\xb6\x67" + "\xe5\xcb\x0a\xe0\xac\xed\x73\x4b" + "\xbb\x15\xde\xd8\xab\x33\x28\x5f" + "\x96\x07\x3c\x28\x79\x88\x84\xc7" + "\x13\xf7\x0d\xa5\x97\x3b\xd9\xb1" + "\xf2\x65\xb0\xac\xbb\x8a\x97\xd1" + "\x70\x3a\x91\x65\xc8\x39\x04\xe7" + "\x1a\x9c\x80\x65\x2b\x69\x4b\xdc" + "\xdc\xc7\xf1\x31\xda\xab\xb4\xd7" + "\x46\x2e\x1d\xc9\x2e\xe9\x46\xec" + "\xa4\xa1\x91\x6b\x4a\x09\xf9\x39" + "\x7b\x7d\x6d\xf5\x43\x7f\xcc\x74" + "\x96\xfa\x48\xd0\xe1\x74\x24\xd0" + "\x19\x22\x24\x84\x2b\x12\x10\x46" + "\x90\xbd\xa9\x93\xb7\xf7\x36\xd4" + "\x48\xc7\x32\x83\x8c\xa9\xcd\x5a" + "\x2f\x05\x33\xc1\x5b\x50\x70\xc4", + .len = 160, + } +}; + +static const struct aead_testvec aria_gcm_tv_template[] = { + { + .key = "\xe9\x1e\x5e\x75\xda\x65\x55\x4a" + "\x48\x18\x1f\x38\x46\x34\x95\x62", + .klen = 16, + .iv = "\x00\x00\x20\xe8\xf5\xeb\x00\x00" + "\x00\x00\x31\x5e", + .assoc = "\x80\x08\x31\x5e\xbf\x2e\x6f\xe0" + "\x20\xe8\xf5\xeb", + .alen = 12, + .ptext = "\xf5\x7a\xf5\xfd\x4a\xe1\x95\x62" + "\x97\x6e\xc5\x7a\x5a\x7a\xd5\x5a" + "\x5a\xf5\xc5\xe5\xc5\xfd\xf5\xc5" + "\x5a\xd5\x7a\x4a\x72\x72\xd5\x72" + "\x62\xe9\x72\x95\x66\xed\x66\xe9" + "\x7a\xc5\x4a\x4a\x5a\x7a\xd5\xe1" + "\x5a\xe5\xfd\xd5\xfd\x5a\xc5\xd5" + "\x6a\xe5\x6a\xd5\xc5\x72\xd5\x4a" + "\xe5\x4a\xc5\x5a\x95\x6a\xfd\x6a" + "\xed\x5a\x4a\xc5\x62\x95\x7a\x95" + "\x16\x99\x16\x91\xd5\x72\xfd\x14" + "\xe9\x7a\xe9\x62\xed\x7a\x9f\x4a" + "\x95\x5a\xf5\x72\xe1\x62\xf5\x7a" + "\x95\x66\x66\xe1\x7a\xe1\xf5\x4a" + "\x95\xf5\x66\xd5\x4a\x66\xe1\x6e" + "\x4a\xfd\x6a\x9f\x7a\xe1\xc5\xc5" + "\x5a\xe5\xd5\x6a\xfd\xe9\x16\xc5" + "\xe9\x4a\x6e\xc5\x66\x95\xe1\x4a" + "\xfd\xe1\x14\x84\x16\xe9\x4a\xd5" + "\x7a\xc5\x14\x6e\xd5\x9d\x1c\xc5", + .plen = 160, + .ctext = "\x4d\x8a\x9a\x06\x75\x55\x0c\x70" + "\x4b\x17\xd8\xc9\xdd\xc8\x1a\x5c" + "\xd6\xf7\xda\x34\xf2\xfe\x1b\x3d" + "\xb7\xcb\x3d\xfb\x96\x97\x10\x2e" + "\xa0\xf3\xc1\xfc\x2d\xbc\x87\x3d" + "\x44\xbc\xee\xae\x8e\x44\x42\x97" + "\x4b\xa2\x1f\xf6\x78\x9d\x32\x72" + "\x61\x3f\xb9\x63\x1a\x7c\xf3\xf1" + "\x4b\xac\xbe\xb4\x21\x63\x3a\x90" + "\xff\xbe\x58\xc2\xfa\x6b\xdc\xa5" + "\x34\xf1\x0d\x0d\xe0\x50\x2c\xe1" + "\xd5\x31\xb6\x33\x6e\x58\x87\x82" + "\x78\x53\x1e\x5c\x22\xbc\x6c\x85" + "\xbb\xd7\x84\xd7\x8d\x9e\x68\x0a" + "\xa1\x90\x31\xaa\xf8\x91\x01\xd6" + "\x69\xd7\xa3\x96\x5c\x1f\x7e\x16" + "\x22\x9d\x74\x63\xe0\x53\x5f\x4e" + "\x25\x3f\x5d\x18\x18\x7d\x40\xb8" + "\xae\x0f\x56\x4b\xd9\x70\xb5\xe7" + "\xe2\xad\xfb\x21\x1e\x89\xa9\x53" + "\x5a\xba\xce\x3f\x37\xf5\xa7\x36" + "\xf4\xbe\x98\x4b\xbf\xfb\xed\xc1", + .clen = 176, + }, { + .key = "\x0c\x5f\xfd\x37\xa1\x1e\xdc\x42" + "\xc3\x25\x28\x7f\xc0\x60\x4f\x2e" + "\x3e\x8c\xd5\x67\x1a\x00\xfe\x32" + "\x16\xaa\x5e\xb1\x05\x78\x3b\x54", + .klen = 32, + .iv = "\x00\x00\x20\xe8\xf5\xeb\x00\x00" + "\x00\x00\x31\x5e", + .assoc = "\x80\x08\x31\x5e\xbf\x2e\x6f\xe0" + "\x20\xe8\xf5\xeb", + .alen = 12, + .ptext = "\xf5\x7a\xf5\xfd\x4a\xe1\x95\x62" + "\x97\x6e\xc5\x7a\x5a\x7a\xd5\x5a" + "\x5a\xf5\xc5\xe5\xc5\xfd\xf5\xc5" + "\x5a\xd5\x7a\x4a\x72\x72\xd5\x72" + "\x62\xe9\x72\x95\x66\xed\x66\xe9" + "\x7a\xc5\x4a\x4a\x5a\x7a\xd5\xe1" + "\x5a\xe5\xfd\xd5\xfd\x5a\xc5\xd5" + "\x6a\xe5\x6a\xd5\xc5\x72\xd5\x4a" + "\xe5\x4a\xc5\x5a\x95\x6a\xfd\x6a" + "\xed\x5a\x4a\xc5\x62\x95\x7a\x95" + "\x16\x99\x16\x91\xd5\x72\xfd\x14" + "\xe9\x7a\xe9\x62\xed\x7a\x9f\x4a" + "\x95\x5a\xf5\x72\xe1\x62\xf5\x7a" + "\x95\x66\x66\xe1\x7a\xe1\xf5\x4a" + "\x95\xf5\x66\xd5\x4a\x66\xe1\x6e" + "\x4a\xfd\x6a\x9f\x7a\xe1\xc5\xc5" + "\x5a\xe5\xd5\x6a\xfd\xe9\x16\xc5" + "\xe9\x4a\x6e\xc5\x66\x95\xe1\x4a" + "\xfd\xe1\x14\x84\x16\xe9\x4a\xd5" + "\x7a\xc5\x14\x6e\xd5\x9d\x1c\xc5", + .plen = 160, + .ctext = "\x6f\x9e\x4b\xcb\xc8\xc8\x5f\xc0" + "\x12\x8f\xb1\xe4\xa0\xa2\x0c\xb9" + "\x93\x2f\xf7\x45\x81\xf5\x4f\xc0" + "\x13\xdd\x05\x4b\x19\xf9\x93\x71" + "\x42\x5b\x35\x2d\x97\xd3\xf3\x37" + "\xb9\x0b\x63\xd1\xb0\x82\xad\xee" + "\xea\x9d\x2d\x73\x91\x89\x7d\x59" + "\x1b\x98\x5e\x55\xfb\x50\xcb\x53" + "\x50\xcf\x7d\x38\xdc\x27\xdd\xa1" + "\x27\xc0\x78\xa1\x49\xc8\xeb\x98" + "\x08\x3d\x66\x36\x3a\x46\xe3\x72" + "\x6a\xf2\x17\xd3\xa0\x02\x75\xad" + "\x5b\xf7\x72\xc7\x61\x0e\xa4\xc2" + "\x30\x06\x87\x8f\x0e\xe6\x9a\x83" + "\x97\x70\x31\x69\xa4\x19\x30\x3f" + "\x40\xb7\x2e\x45\x73\x71\x4d\x19" + "\xe2\x69\x7d\xf6\x1e\x7c\x72\x52" + "\xe5\xab\xc6\xba\xde\x87\x6a\xc4" + "\x96\x1b\xfa\xc4\xd5\xe8\x67\xaf" + "\xca\x35\x1a\x48\xae\xd5\x28\x22" + "\xe2\x10\xd6\xce\xd2\xcf\x43\x0f" + "\xf8\x41\x47\x29\x15\xe7\xef\x48", + .clen = 176, + } +}; + static const struct cipher_testvec chacha20_tv_template[] = { { /* RFC7539 A.2. Test Vector #1 */ .key = "\x00\x00\x00\x00\x00\x00\x00\x00" From 30fb034361ff1b9bfc569b2d8d66b544ea3eb18f Mon Sep 17 00:00:00 2001 From: Yuan Can Date: Mon, 4 Jul 2022 13:01:45 +0000 Subject: [PATCH 56/89] crypto: ccree - Add missing clk_disable_unprepare() in cc_pm_resume() Add clk_disable_unprepare() on error path in cc_pm_resume(). Reported-by: Hulk Robot Signed-off-by: Yuan Can Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_pm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d5421b0c6831c..6124fbbbed946 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -41,6 +41,7 @@ static int cc_pm_resume(struct device *dev) /* wait for Cryptocell reset completion */ if (!cc_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); + clk_disable_unprepare(drvdata->clk); return -EBUSY; } @@ -48,6 +49,7 @@ static int cc_pm_resume(struct device *dev) rc = init_cc_regs(drvdata); if (rc) { dev_err(dev, "init_cc_regs (%x)\n", rc); + clk_disable_unprepare(drvdata->clk); return rc; } /* check if tee fips error occurred during power down */ From 4fdcabb86ab17621b54695e5bb52ceddef434e82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Tue, 5 Jul 2022 22:51:38 +0200 Subject: [PATCH 57/89] crypto: atmel-aes - Drop if with an always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The remove callback is only called after probe completed successfully. In this case platform_set_drvdata() was called with a non-NULL argument and so aes_dd is never NULL. This is a preparation for making platform remove callbacks return void. Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- drivers/crypto/atmel-aes.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index f72c6b3e4ad81..886bf258544c6 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2669,8 +2669,7 @@ static int atmel_aes_remove(struct platform_device *pdev) struct atmel_aes_dev *aes_dd; aes_dd = platform_get_drvdata(pdev); - if (!aes_dd) - return -ENODEV; + spin_lock(&atmel_aes.lock); list_del(&aes_dd->list); spin_unlock(&atmel_aes.lock); From 25edb4cddb0f3deb260b52106b73c3d0f4968489 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Tue, 5 Jul 2022 22:51:39 +0200 Subject: [PATCH 58/89] crypto: atmel-sha - Drop if with an always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The remove callback is only called after probe completed successfully. In this case platform_set_drvdata() was called with a non-NULL argument and so sha_dd is never NULL. This is a preparation for making platform remove callbacks return void. Signed-off-by: Uwe Kleine-König Reviewed-by: Claudiu Beznea Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index d1628112dacc1..e054e0ac6fc2f 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2669,8 +2669,7 @@ static int atmel_sha_remove(struct platform_device *pdev) struct atmel_sha_dev *sha_dd; sha_dd = platform_get_drvdata(pdev); - if (!sha_dd) - return -ENODEV; + spin_lock(&atmel_sha.lock); list_del(&sha_dd->list); spin_unlock(&atmel_sha.lock); From 515f4fc66bf176b91d24d89deddc18e0be12543f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Tue, 5 Jul 2022 22:51:40 +0200 Subject: [PATCH 59/89] crypto: atmel-tdes - Drop if with an always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The remove callback is only called after probe completed successfully. In this case platform_set_drvdata() was called with a non-NULL argument and so tdes_dd is never NULL. This is a preparation for making platform remove callbacks return void. Signed-off-by: Uwe Kleine-König Reviewed-by: Claudiu Beznea Signed-off-by: Herbert Xu --- drivers/crypto/atmel-tdes.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 9fd7b8e439d2f..a5e78aa08bf0f 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1266,8 +1266,7 @@ static int atmel_tdes_remove(struct platform_device *pdev) struct atmel_tdes_dev *tdes_dd; tdes_dd = platform_get_drvdata(pdev); - if (!tdes_dd) - return -ENODEV; + spin_lock(&atmel_tdes.lock); list_del(&tdes_dd->list); spin_unlock(&atmel_tdes.lock); From da1e716864aeeaeef0a32a7cdddfb8c6ebd8c1f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Tue, 5 Jul 2022 22:51:41 +0200 Subject: [PATCH 60/89] crypto: omap-aes - Drop if with an always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The remove callback is only called after probe completed successfully. In this case platform_set_drvdata() was called with a non-NULL argument and so dd is never NULL. This is a preparation for making platform remove callbacks return void. While touching this driver remove a stray empty line. Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 581211a926283..67a99c760bc4b 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1261,9 +1261,6 @@ static int omap_aes_remove(struct platform_device *pdev) struct aead_alg *aalg; int i, j; - if (!dd) - return -ENODEV; - spin_lock_bh(&list_lock); list_del(&dd->list); spin_unlock_bh(&list_lock); @@ -1279,7 +1276,6 @@ static int omap_aes_remove(struct platform_device *pdev) aalg = &dd->pdata->aead_algs_info->algs_list[i]; crypto_unregister_aead(aalg); dd->pdata->aead_algs_info->registered--; - } crypto_engine_exit(dd->engine); From 8ce715e711659322a6e05cad154aa1355bb2e14f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Tue, 5 Jul 2022 22:51:42 +0200 Subject: [PATCH 61/89] crypto: omap-des - Drop if with an always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The remove callback is only called after probe completed successfully. In this case platform_set_drvdata() was called with a non-NULL argument and so dd is never NULL. This is a preparation for making platform remove callbacks return void. While touching this driver remove an assignment without effect. Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- drivers/crypto/omap-des.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 538aff80869f6..f783769ea1107 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -1091,9 +1091,6 @@ static int omap_des_remove(struct platform_device *pdev) struct omap_des_dev *dd = platform_get_drvdata(pdev); int i, j; - if (!dd) - return -ENODEV; - spin_lock_bh(&list_lock); list_del(&dd->list); spin_unlock_bh(&list_lock); @@ -1106,7 +1103,6 @@ static int omap_des_remove(struct platform_device *pdev) tasklet_kill(&dd->done_task); omap_des_dma_cleanup(dd); pm_runtime_disable(dd->dev); - dd = NULL; return 0; } From 35b22c19daa1bbe77dce2f7f776c0cf2acff1f00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Tue, 5 Jul 2022 22:51:43 +0200 Subject: [PATCH 62/89] crypto: omap-sham - Drop if with an always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The remove callback is only called after probe completed successfully. In this case platform_set_drvdata() was called with a non-NULL argument and so dd is never NULL. This is a preparation for making platform remove callbacks return void. Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 4b37dc69a50ce..655a7f5a406a1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2197,8 +2197,7 @@ static int omap_sham_remove(struct platform_device *pdev) int i, j; dd = platform_get_drvdata(pdev); - if (!dd) - return -ENODEV; + spin_lock_bh(&sham.lock); list_del(&dd->list); spin_unlock_bh(&sham.lock); From 1d5390a33a4b54b2129316656792d55755a03d06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Tue, 5 Jul 2022 22:51:44 +0200 Subject: [PATCH 63/89] crypto: s5p-sss - Drop if with an always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The remove callback is only called after probe completed successfully. In this case platform_set_drvdata() was called with a non-NULL argument and so pdata is never NULL. This is a preparation for making platform remove callbacks return void. Signed-off-by: Uwe Kleine-König Acked-by: Vladimir Zapolskiy Reviewed-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/s5p-sss.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 7717e9e5977bb..b79e49aa724f6 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -2321,9 +2321,6 @@ static int s5p_aes_remove(struct platform_device *pdev) struct s5p_aes_dev *pdata = platform_get_drvdata(pdev); int i; - if (!pdata) - return -ENODEV; - for (i = 0; i < ARRAY_SIZE(algs); i++) crypto_unregister_skcipher(&algs[i]); From 2e26efb384d8a38ac62dc7022cff448cf54b80ee Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 5 Jul 2022 15:58:57 -0700 Subject: [PATCH 64/89] crypto: caam/qi2 - switch to netif_napi_add_tx_weight() caam has its own special NAPI weights. It's also a crypto device so presumably it can't be used for packet Rx. Switch to the (new) correct API. Signed-off-by: Jakub Kicinski Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamalg_qi2.c | 5 +++-- drivers/crypto/caam/qi.c | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 4b81fb33f199b..4482cb145d051 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -5083,8 +5083,9 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) ppriv->net_dev.dev = *dev; INIT_LIST_HEAD(&ppriv->net_dev.napi_list); - netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, - DPAA2_CAAM_NAPI_WEIGHT); + netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi, + dpaa2_dpseci_poll, + DPAA2_CAAM_NAPI_WEIGHT); } return 0; diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 49439d0d1b3c2..c36f27376d7e0 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -749,8 +749,8 @@ int caam_qi_init(struct platform_device *caam_pdev) net_dev->dev = *qidev; INIT_LIST_HEAD(&net_dev->napi_list); - netif_napi_add(net_dev, irqtask, caam_qi_poll, - CAAM_NAPI_WEIGHT); + netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll, + CAAM_NAPI_WEIGHT); napi_enable(irqtask); } From c6a16f4bbf5518603b51df23f23e46223244dcb0 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Wed, 6 Jul 2022 11:58:44 +0300 Subject: [PATCH 65/89] crypto: atmel-sha - initialize sha_dd while declaring Initialize sha_dd with platform_get_drvdata() when declaring it. Signed-off-by: Claudiu Beznea Signed-off-by: Herbert Xu --- drivers/crypto/atmel-sha.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index e054e0ac6fc2f..ca4b01926d1b1 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -2666,9 +2666,7 @@ static int atmel_sha_probe(struct platform_device *pdev) static int atmel_sha_remove(struct platform_device *pdev) { - struct atmel_sha_dev *sha_dd; - - sha_dd = platform_get_drvdata(pdev); + struct atmel_sha_dev *sha_dd = platform_get_drvdata(pdev); spin_lock(&atmel_sha.lock); list_del(&sha_dd->list); From 6c14a9650b8729882995f5fcd6c62336b40aeb26 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Wed, 6 Jul 2022 12:17:27 +0300 Subject: [PATCH 66/89] crypto: atmel-tdes - initialize tdes_dd while declaring Initialize sha_dd with platform_get_drvdata() when declaring it. Signed-off-by: Claudiu Beznea Signed-off-by: Herbert Xu --- drivers/crypto/atmel-tdes.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index a5e78aa08bf0f..8b7bc1076e0db 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1263,9 +1263,7 @@ static int atmel_tdes_probe(struct platform_device *pdev) static int atmel_tdes_remove(struct platform_device *pdev) { - struct atmel_tdes_dev *tdes_dd; - - tdes_dd = platform_get_drvdata(pdev); + struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev); spin_lock(&atmel_tdes.lock); list_del(&tdes_dd->list); From a65c9a2a0b43118ee6f00eeeb73aefdcbd89728f Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Wed, 6 Jul 2022 14:11:43 -0500 Subject: [PATCH 67/89] crypto: sa2ul - Set the supported_algos bits individually Setting these individually gives a better picture of supported functions at a glance. Plus if the list changes an unwanted one will not accidentally get set with GENMASK. Signed-off-by: Andrew Davis Signed-off-by: Herbert Xu --- drivers/crypto/sa2ul.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 6957a125b4470..1d732113b81ec 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -2361,7 +2361,15 @@ static int sa_link_child(struct device *dev, void *data) static struct sa_match_data am654_match_data = { .priv = 1, .priv_id = 1, - .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0), + .supported_algos = BIT(SA_ALG_CBC_AES) | + BIT(SA_ALG_EBC_AES) | + BIT(SA_ALG_CBC_DES3) | + BIT(SA_ALG_ECB_DES3) | + BIT(SA_ALG_SHA1) | + BIT(SA_ALG_SHA256) | + BIT(SA_ALG_SHA512) | + BIT(SA_ALG_AUTHENC_SHA1_AES) | + BIT(SA_ALG_AUTHENC_SHA256_AES), }; static struct sa_match_data am64_match_data = { From b77e34f5b10de529255c9468203d0644a7af3b81 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Wed, 6 Jul 2022 14:11:44 -0500 Subject: [PATCH 68/89] crypto: sa2ul - Check engine status before enabling There is a engine status register that can be used to check if the different HW crypto engines are enabled. Check that first and then only try to enable the engines if they are not already on. This has a couple benefits. First we don't need to use match_data for this. Second, this driver can now work on HS devices where the engine control registers are read-only and writing causes a firewall exception. Signed-off-by: Andrew Davis Signed-off-by: Herbert Xu --- drivers/crypto/sa2ul.c | 15 +++++++-------- drivers/crypto/sa2ul.h | 1 + 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 1d732113b81ec..f4bc06c24ad8f 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -86,7 +86,6 @@ struct sa_match_data { u8 priv; u8 priv_id; u32 supported_algos; - bool skip_engine_control; }; static struct device *sa_k3_dev; @@ -2380,7 +2379,6 @@ static struct sa_match_data am64_match_data = { BIT(SA_ALG_SHA256) | BIT(SA_ALG_SHA512) | BIT(SA_ALG_AUTHENC_SHA256_AES), - .skip_engine_control = true, }; static const struct of_device_id of_match[] = { @@ -2398,6 +2396,7 @@ static int sa_ul_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; static void __iomem *saul_base; struct sa_crypto_data *dev_data; + u32 status, val; int ret; dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL); @@ -2434,13 +2433,13 @@ static int sa_ul_probe(struct platform_device *pdev) spin_lock_init(&dev_data->scid_lock); - if (!dev_data->match_data->skip_engine_control) { - u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | - SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | - SA_EEC_TRNG_EN; - + val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | + SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | + SA_EEC_TRNG_EN; + status = readl_relaxed(saul_base + SA_ENGINE_STATUS); + /* Only enable engines if all are not already enabled */ + if (val & ~status) writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL); - } sa_register_algos(dev_data); diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h index ed66d1f111db5..92bf97232a292 100644 --- a/drivers/crypto/sa2ul.h +++ b/drivers/crypto/sa2ul.h @@ -16,6 +16,7 @@ #include #include +#define SA_ENGINE_STATUS 0x0008 #define SA_ENGINE_ENABLE_CONTROL 0x1000 struct sa_tfm_ctx; From 1353e576ae3b7b9703b74f6f2276b6dd450f4a2e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 7 Jul 2022 09:05:46 +0100 Subject: [PATCH 69/89] crypto: x86/blowfish - remove redundant assignment to variable nytes Variable nbytes is being assigned a value that is never read, it is being re-assigned in the next statement in the while-loop. The assignment is redundant and can be removed. Cleans up clang scan-build warnings, e.g.: arch/x86/crypto/blowfish_glue.c:147:10: warning: Although the value stored to 'nbytes' is used in the enclosing expression, the value is never actually read from 'nbytes' Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- arch/x86/crypto/blowfish_glue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c index ba06322c1e396..019c64c1340a3 100644 --- a/arch/x86/crypto/blowfish_glue.c +++ b/arch/x86/crypto/blowfish_glue.c @@ -144,7 +144,7 @@ static int cbc_encrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { + while (walk.nbytes) { nbytes = __cbc_encrypt(ctx, &walk); err = skcipher_walk_done(&walk, nbytes); } @@ -225,7 +225,7 @@ static int cbc_decrypt(struct skcipher_request *req) err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { + while (walk.nbytes) { nbytes = __cbc_decrypt(ctx, &walk); err = skcipher_walk_done(&walk, nbytes); } From 5a44749f65b2342d43dea82024e4febdac33c78d Mon Sep 17 00:00:00 2001 From: Vladis Dronov Date: Fri, 8 Jul 2022 14:33:13 +0200 Subject: [PATCH 70/89] crypto: fips - make proc files report fips module name and version FIPS 140-3 introduced a requirement for the FIPS module to return information about itself, specifically a name and a version. These values must match the values reported on FIPS certificates. This patch adds two files to read a name and a version from: /proc/sys/crypto/fips_name /proc/sys/crypto/fips_version v2: removed redundant parentheses in config entries. v3: move FIPS_MODULE_* defines to fips.c where they are used. v4: return utsrelease.h inclusion Signed-off-by: Simo Sorce Signed-off-by: Vladis Dronov Signed-off-by: Herbert Xu --- crypto/Kconfig | 21 +++++++++++++++++++++ crypto/fips.c | 35 ++++++++++++++++++++++++++++++----- 2 files changed, 51 insertions(+), 5 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 7d98a2b4ac9cd..54bdcf2ce331d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -33,6 +33,27 @@ config CRYPTO_FIPS certification. You should say no unless you know what this is. +config CRYPTO_FIPS_NAME + string "FIPS Module Name" + default "Linux Kernel Cryptographic API" + depends on CRYPTO_FIPS + help + This option sets the FIPS Module name reported by the Crypto API via + the /proc/sys/crypto/fips_name file. + +config CRYPTO_FIPS_CUSTOM_VERSION + bool "Use Custom FIPS Module Version" + depends on CRYPTO_FIPS + default n + +config CRYPTO_FIPS_VERSION + string "FIPS Module Version" + default "(none)" + depends on CRYPTO_FIPS_CUSTOM_VERSION + help + This option provides the ability to override the FIPS Module Version. + By default the KERNELRELEASE value is used. + config CRYPTO_ALGAPI tristate select CRYPTO_ALGAPI2 diff --git a/crypto/fips.c b/crypto/fips.c index 7b1d8caee6692..b05d3c7b3ca53 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -12,6 +12,7 @@ #include #include #include +#include int fips_enabled; EXPORT_SYMBOL_GPL(fips_enabled); @@ -30,13 +31,37 @@ static int fips_enable(char *str) __setup("fips=", fips_enable); +#define FIPS_MODULE_NAME CONFIG_CRYPTO_FIPS_NAME +#ifdef CONFIG_CRYPTO_FIPS_CUSTOM_VERSION +#define FIPS_MODULE_VERSION CONFIG_CRYPTO_FIPS_VERSION +#else +#define FIPS_MODULE_VERSION UTS_RELEASE +#endif + +static char fips_name[] = FIPS_MODULE_NAME; +static char fips_version[] = FIPS_MODULE_VERSION; + static struct ctl_table crypto_sysctl_table[] = { { - .procname = "fips_enabled", - .data = &fips_enabled, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec + .procname = "fips_enabled", + .data = &fips_enabled, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_dointvec + }, + { + .procname = "fips_name", + .data = &fips_name, + .maxlen = 64, + .mode = 0444, + .proc_handler = proc_dostring + }, + { + .procname = "fips_version", + .data = &fips_version, + .maxlen = 64, + .mode = 0444, + .proc_handler = proc_dostring }, {} }; From 463f74089ff9148e3e46af454a6977d40b98cd10 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 9 Jul 2022 14:18:48 -0700 Subject: [PATCH 71/89] crypto: lib - move lib/sha1.c into lib/crypto/ SHA-1 is a crypto algorithm (or at least was intended to be -- it's not considered secure anymore), so move it out of the top-level library directory and into lib/crypto/. Signed-off-by: Eric Biggers Reviewed-by: Jason A. Donenfeld Signed-off-by: Herbert Xu --- lib/Makefile | 2 +- lib/crypto/Makefile | 2 ++ lib/{ => crypto}/sha1.c | 0 3 files changed, 3 insertions(+), 1 deletion(-) rename lib/{ => crypto}/sha1.c (100%) diff --git a/lib/Makefile b/lib/Makefile index f99bf61f8bbc6..67482f5ec0e89 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -29,7 +29,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ - idr.o extable.o sha1.o irq_regs.o argv_split.o \ + idr.o extable.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 26be2bbe09c59..d28111ba54fcb 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -34,6 +34,8 @@ libpoly1305-y := poly1305-donna32.o libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o libpoly1305-y += poly1305.o +obj-y += sha1.o + obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o diff --git a/lib/sha1.c b/lib/crypto/sha1.c similarity index 100% rename from lib/sha1.c rename to lib/crypto/sha1.c From ec8f7f4821d5e70d71601519bc2325b311324a96 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 9 Jul 2022 14:18:49 -0700 Subject: [PATCH 72/89] crypto: lib - make the sha1 library optional Since the Linux RNG no longer uses sha1_transform(), the SHA-1 library is no longer needed unconditionally. Make it possible to build the Linux kernel without the SHA-1 library by putting it behind a kconfig option, and selecting this new option from the kconfig options that gate the remaining users: CRYPTO_SHA1 for crypto/sha1_generic.c, BPF for kernel/bpf/core.c, and IPV6 for net/ipv6/addrconf.c. Unfortunately, since BPF is selected by NET, for now this can only make a difference for kernels built without networking support. Signed-off-by: Eric Biggers Reviewed-by: Jason A. Donenfeld Acked-by: Jakub Kicinski Acked-by: Alexei Starovoitov Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + init/Kconfig | 1 + lib/crypto/Kconfig | 3 +++ lib/crypto/Makefile | 3 ++- net/ipv6/Kconfig | 1 + 5 files changed, 8 insertions(+), 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 54bdcf2ce331d..75c71d9a5ffb7 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -901,6 +901,7 @@ config CRYPTO_RMD160 config CRYPTO_SHA1 tristate "SHA1 digest algorithm" select CRYPTO_HASH + select CRYPTO_LIB_SHA1 help SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). diff --git a/init/Kconfig b/init/Kconfig index c984afc489dea..d8d0b4bdfe419 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1472,6 +1472,7 @@ config HAVE_PCSPKR_PLATFORM # interpreter that classic socket filters depend on config BPF bool + select CRYPTO_LIB_SHA1 menuconfig EXPERT bool "Configure standard kernel features (expert users)" diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 2082af43d51fb..9ff549f63540f 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -121,6 +121,9 @@ config CRYPTO_LIB_CHACHA20POLY1305 select CRYPTO_LIB_POLY1305 select CRYPTO_ALGAPI +config CRYPTO_LIB_SHA1 + tristate + config CRYPTO_LIB_SHA256 tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index d28111ba54fcb..919cbb2c220d6 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -34,7 +34,8 @@ libpoly1305-y := poly1305-donna32.o libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o libpoly1305-y += poly1305.o -obj-y += sha1.o +obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o +libsha1-y := sha1.o obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index bf2e5e5fe1427..658bfed1df8b1 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -7,6 +7,7 @@ menuconfig IPV6 tristate "The IPv6 protocol" default y + select CRYPTO_LIB_SHA1 help Support for IP version 6 (IPv6). From ed221835a7ae8d0c5d9967d70518d0f230b8e9c5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 19 Jul 2022 03:04:15 +0000 Subject: [PATCH 73/89] crypto: lib - add module license to libsha1 libsha1 can be a module, so it needs a MODULE_LICENSE. Fixes: ec8f7f4821d5 ("crypto: lib - make the sha1 library optional") Reported-by: Randy Dunlap Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- lib/crypto/sha1.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/crypto/sha1.c b/lib/crypto/sha1.c index 0494766fc574e..1aebe7be9401b 100644 --- a/lib/crypto/sha1.c +++ b/lib/crypto/sha1.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -135,3 +136,5 @@ void sha1_init(__u32 *buf) buf[4] = 0xc3d2e1f0; } EXPORT_SYMBOL(sha1_init); + +MODULE_LICENSE("GPL"); From 19cdbdb7cda0cb4948dfaab613d8b4f63c88a53e Mon Sep 17 00:00:00 2001 From: Bagas Sanjaya Date: Wed, 13 Jul 2022 15:56:27 +0700 Subject: [PATCH 74/89] Documentation: qat: Use code block for qat sysfs example kernel test robot and Stephen Rothwell reported htmldocs warning: Documentation/ABI/testing/sysfs-driver-qat:24: WARNING: Unexpected indentation. The warning isn't caused by Date: field pointed by the warning, but rather by sysfs example that isn't in literal code block. Add the code block marker. Link: https://lore.kernel.org/linux-next/20220711204932.333379b4@canb.auug.org.au/ Link: https://lore.kernel.org/linux-doc/202207090803.TEGI95qw-lkp@intel.com/ Reported-by: kernel test robot Reported-by: Stephen Rothwell Fixes: d4cfb144f60551 ("crypto: qat - expose device config through sysfs for 4xxx") Acked-by: Giovanni Cabiddu Cc: Wojciech Ziemba Cc: Adam Guerin Cc: Fiona Trahe Cc: Herbert Xu Cc: Vladis Dronov Cc: Tomasz Kowallik Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Bagas Sanjaya Acked-by: Vladis Dronov Signed-off-by: Herbert Xu --- Documentation/ABI/testing/sysfs-driver-qat | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index a600531e95628..43e081ec22cc4 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -46,7 +46,8 @@ Description: Reports the current configuration of the QAT device and allows The following example shows how to change the configuration of a device configured for running crypto services in order to - run data compression: + run data compression:: + # cat /sys/bus/pci/devices//qat/state up # cat /sys/bus/pci/devices//qat/cfg_services From 1b466b8cbfd8321408bb0792b241a2b9222aed62 Mon Sep 17 00:00:00 2001 From: Bagas Sanjaya Date: Wed, 13 Jul 2022 15:56:28 +0700 Subject: [PATCH 75/89] Documentation: qat: rewrite description The sysfs description contains redundancy on returned and allowed values list, due to the described sysfs is read-write. Rewrite. Acked-by: Giovanni Cabiddu Cc: Adam Guerin Cc: Tomasz Kowallik Cc: Giovanni Cabiddu Cc: Wojciech Ziemba Cc: Fiona Trahe Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Bagas Sanjaya Signed-off-by: Herbert Xu --- Documentation/ABI/testing/sysfs-driver-qat | 38 ++++++++-------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index 43e081ec22cc4..185f81a2aab36 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -2,18 +2,14 @@ What: /sys/bus/pci/devices//qat/state Date: June 2022 KernelVersion: 5.20 Contact: qat-linux@intel.com -Description: Reports the current state of the QAT device and allows to - change it. +Description: (RW) Reports the current state of the QAT device. Write to + the file to start or stop the device. - This attribute is RW. + The values are: - Returned values: - up: the device is up and running - down: the device is down + * up: the device is up and running + * down: the device is down - Allowed values: - up: initialize and start the device - down: stop the device and bring it down It is possible to transition the device from up to down only if the device is up and vice versa. @@ -24,22 +20,14 @@ What: /sys/bus/pci/devices//qat/cfg_services Date: June 2022 KernelVersion: 5.20 Contact: qat-linux@intel.com -Description: Reports the current configuration of the QAT device and allows - to change it. - - This attribute is RW. - - Returned values: - sym;asym: the device is configured for running - crypto services - dc: the device is configured for running - compression services - - Allowed values: - sym;asym: configure the device for running - crypto services - dc: configure the device for running - compression services +Description: (RW) Reports the current configuration of the QAT device. + Write to the file to change the configured services. + + The values are: + + * sym;asym: the device is configured for running crypto + services + * dc: the device is configured for running compression services It is possible to set the configuration only if the device is in the `down` state (see /sys/bus/pci/devices//qat/state) From 693b8755e1b1dd3c0bc22920a8bf2bd495688909 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 14 Jul 2022 23:28:20 +0200 Subject: [PATCH 76/89] crypto: keembay-ocs-ecc - Drop if with an always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The remove callback is only called after probe completed successfully. In this case platform_set_drvdata() was called with a non-NULL argument and so ecc_dev is never NULL. This is a preparation for making platform remove callbacks return void. Signed-off-by: Uwe Kleine-König Signed-off-by: Herbert Xu --- drivers/crypto/keembay/keembay-ocs-ecc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/keembay/keembay-ocs-ecc.c index 5d0785d3f1b55..2269df17514ce 100644 --- a/drivers/crypto/keembay/keembay-ocs-ecc.c +++ b/drivers/crypto/keembay/keembay-ocs-ecc.c @@ -976,8 +976,6 @@ static int kmb_ocs_ecc_remove(struct platform_device *pdev) struct ocs_ecc_dev *ecc_dev; ecc_dev = platform_get_drvdata(pdev); - if (!ecc_dev) - return -ENODEV; crypto_unregister_kpp(&ocs_ecdh_p384); crypto_unregister_kpp(&ocs_ecdh_p256); From 4cbdecd02fd29eb69a376ffdac47aff441c4d19f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 14 Jul 2022 18:59:14 -0700 Subject: [PATCH 77/89] crypto: rmd160 - fix Kconfig "its" grammar Use the possessive "its" instead of the contraction "it's" where appropriate. Signed-off-by: Randy Dunlap Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 75c71d9a5ffb7..e72bf3fc298cc 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -889,7 +889,7 @@ config CRYPTO_RMD160 RIPEMD-160 is a 160-bit cryptographic hash function. It is intended to be used as a secure replacement for the 128-bit hash functions - MD4, MD5 and it's predecessor RIPEMD + MD4, MD5 and its predecessor RIPEMD (not to be confused with RIPEMD-128). It's speed is comparable to SHA1 and there are no known attacks From 824b94a88320eaa5e3e059b494e457ed25987a63 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 15 Jul 2022 12:50:07 +0800 Subject: [PATCH 78/89] crypto: twofish - Fix comment typo The double `that' is duplicated in line 301, remove one. Signed-off-by: Jason Wang Signed-off-by: Herbert Xu --- crypto/twofish_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c index d23fa531b91f1..f921f30334f48 100644 --- a/crypto/twofish_common.c +++ b/crypto/twofish_common.c @@ -298,7 +298,7 @@ static const u32 mds[4][256] = { * multiplication is inefficient without hardware support. To multiply * faster, I make use of the fact x is a generator for the nonzero elements, * so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for - * some n in 0..254. Note that that caret is exponentiation in GF(2^8), + * some n in 0..254. Note that caret is exponentiation in GF(2^8), * *not* polynomial notation. So if I want to compute pq where p and q are * in GF(2^8), I can just say: * 1. if p=0 or q=0 then pq=0 From 85796a9b7583a0b00ee9e69b932daafb41515a76 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 15 Jul 2022 13:40:50 +0800 Subject: [PATCH 79/89] hwrng: via - Fix comment typo The double `close' is duplicated in line 148, remove one. Signed-off-by: Jason Wang Signed-off-by: Herbert Xu --- drivers/char/hw_random/via-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index 7444cc146e86a..a9a0a3b09c8bd 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -145,7 +145,7 @@ static int via_rng_init(struct hwrng *rng) } /* Control the RNG via MSR. Tread lightly and pay very close - * close attention to values written, as the reserved fields + * attention to values written, as the reserved fields * are documented to be "undefined and unpredictable"; but it * does not say to write them as zero, so I make a guess that * we restore the values we find in the register. From 647c952eac5d27569365e9407c96db3c6915e08f Mon Sep 17 00:00:00 2001 From: shaom Deng Date: Sun, 17 Jul 2022 00:56:33 +0800 Subject: [PATCH 80/89] cyrpto: powerpc/aes - delete the rebundant word "block" in comments there is rebundant word "block" in comments, so remove it Signed-off-by: shaom Deng Signed-off-by: Herbert Xu --- arch/powerpc/crypto/aes-spe-glue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index e8dfe9fb02668..efab78a3a8f6b 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -28,7 +28,7 @@ * instructions per clock cycle using one 32/64 bit unit (SU1) and one 32 * bit unit (SU2). One of these can be a memory access that is executed via * a single load and store unit (LSU). XTS-AES-256 takes ~780 operations per - * 16 byte block block or 25 cycles per byte. Thus 768 bytes of input data + * 16 byte block or 25 cycles per byte. Thus 768 bytes of input data * will need an estimated maximum of 20,000 cycles. Headroom for cache misses * included. Even with the low end model clocked at 667 MHz this equals to a * critical time window of less than 30us. The value has been chosen to From 9d2bb9a74b2877f100637d6ab5685bcd33c69d44 Mon Sep 17 00:00:00 2001 From: Ignat Korchagin Date: Sun, 17 Jul 2022 14:37:46 +0100 Subject: [PATCH 81/89] crypto: testmgr - some more fixes to RSA test vectors Two more fixes: * some test vectors in commit 79e6e2f3f3ff ("crypto: testmgr - populate RSA CRT parameters in RSA test vectors") had misplaced commas, which break the test and trigger KASAN warnings at least on x86-64 * pkcs1pad test vector did not have its CRT parameters Fixes: 79e6e2f3f3ff ("crypto: testmgr - populate RSA CRT parameters in RSA test vectors") Reported-by: Eric Biggers Signed-off-by: Ignat Korchagin Signed-off-by: Herbert Xu --- crypto/testmgr.h | 102 +++++++++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 39 deletions(-) diff --git a/crypto/testmgr.h b/crypto/testmgr.h index dee88510f58d5..d6088e26f3261 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -273,7 +273,7 @@ static const struct akcipher_testvec rsa_tv_template[] = { "\x61\xAD\xBD\x3A\x8A\x7E\x99\x1C\x5C\x05\x56\xA9\x4C\x31\x46\xA7" "\xF9\x80\x3F\x8F\x6F\x8A\xE3\x42\xE9\x31\xFD\x8A\xE4\x7A\x22\x0D" "\x1B\x99\xA4\x95\x84\x98\x07\xFE\x39\xF9\x24\x5A\x98\x36\xDA\x3D" - "\x02\x41", /* coefficient - integer of 65 bytes */ + "\x02\x41" /* coefficient - integer of 65 bytes */ "\x00\xB0\x6C\x4F\xDA\xBB\x63\x01\x19\x8D\x26\x5B\xDB\xAE\x94\x23" "\xB3\x80\xF2\x71\xF7\x34\x53\x88\x50\x93\x07\x7F\xCD\x39\xE2\x11" "\x9F\xC9\x86\x32\x15\x4F\x58\x83\xB1\x67\xA9\x67\xBF\x40\x2B\x4E" @@ -370,7 +370,7 @@ static const struct akcipher_testvec rsa_tv_template[] = { "\x6A\x37\x3B\x86\x6C\x51\x37\x5B\x1D\x79\xF2\xA3\x43\x10\xC6\xA7" "\x21\x79\x6D\xF9\xE9\x04\x6A\xE8\x32\xFF\xAE\xFD\x1C\x7B\x8C\x29" "\x13\xA3\x0C\xB2\xAD\xEC\x6C\x0F\x8D\x27\x12\x7B\x48\xB2\xDB\x31" - "\x02\x81\x81", /* coefficient - integer of 129 bytes */ + "\x02\x81\x81" /* coefficient - integer of 129 bytes */ "\x00\x8D\x1B\x05\xCA\x24\x1F\x0C\x53\x19\x52\x74\x63\x21\xFA\x78" "\x46\x79\xAF\x5C\xDE\x30\xA4\x6C\x20\x38\xE6\x97\x39\xB8\x7A\x70" "\x0D\x8B\x6C\x6D\x13\x74\xD5\x1C\xDE\xA9\xF4\x60\x37\xFE\x68\x77" @@ -1231,7 +1231,7 @@ static const struct akcipher_testvec ecrdsa_tv_template[] = { static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { { .key = - "\x30\x82\x03\x1f\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" + "\x30\x82\x04\xa5\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" "\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28" "\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67" "\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d" @@ -1247,42 +1247,66 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { "\x9e\x49\x63\x6e\x02\xc1\xc9\x3a\x9b\xa5\x22\x1b\x07\x95\xd6\x10" "\x02\x50\xfd\xfd\xd1\x9b\xbe\xab\xc2\xc0\x74\xd7\xec\x00\xfb\x11" "\x71\xcb\x7a\xdc\x81\x79\x9f\x86\x68\x46\x63\x82\x4d\xb7\xf1\xe6" - "\x16\x6f\x42\x63\xf4\x94\xa0\xca\x33\xcc\x75\x13\x02\x82\x01\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01" - "\x02\x82\x01\x00\x62\xb5\x60\x31\x4f\x3f\x66\x16\xc1\x60\xac\x47" - "\x2a\xff\x6b\x69\x00\x4a\xb2\x5c\xe1\x50\xb9\x18\x74\xa8\xe4\xdc" - "\xa8\xec\xcd\x30\xbb\xc1\xc6\xe3\xc6\xac\x20\x2a\x3e\x5e\x8b\x12" - "\xe6\x82\x08\x09\x38\x0b\xab\x7c\xb3\xcc\x9c\xce\x97\x67\xdd\xef" - "\x95\x40\x4e\x92\xe2\x44\xe9\x1d\xc1\x14\xfd\xa9\xb1\xdc\x71\x9c" - "\x46\x21\xbd\x58\x88\x6e\x22\x15\x56\xc1\xef\xe0\xc9\x8d\xe5\x80" - "\x3e\xda\x7e\x93\x0f\x52\xf6\xf5\xc1\x91\x90\x9e\x42\x49\x4f\x8d" - "\x9c\xba\x38\x83\xe9\x33\xc2\x50\x4f\xec\xc2\xf0\xa8\xb7\x6e\x28" - "\x25\x56\x6b\x62\x67\xfe\x08\xf1\x56\xe5\x6f\x0e\x99\xf1\xe5\x95" - "\x7b\xef\xeb\x0a\x2c\x92\x97\x57\x23\x33\x36\x07\xdd\xfb\xae\xf1" - "\xb1\xd8\x33\xb7\x96\x71\x42\x36\xc5\xa4\xa9\x19\x4b\x1b\x52\x4c" - "\x50\x69\x91\xf0\x0e\xfa\x80\x37\x4b\xb5\xd0\x2f\xb7\x44\x0d\xd4" - "\xf8\x39\x8d\xab\x71\x67\x59\x05\x88\x3d\xeb\x48\x48\x33\x88\x4e" - "\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a" - "\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda" - "\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46" - "\xb8\x35\xdf\x41\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00" - "\x02\x01\x00", - .key_len = 803, + "\x16\x6f\x42\x63\xf4\x94\xa0\xca\x33\xcc\x75\x13\x02\x03\x01\x00" + "\x01\x02\x82\x01\x00\x62\xb5\x60\x31\x4f\x3f\x66\x16\xc1\x60\xac" + "\x47\x2a\xff\x6b\x69\x00\x4a\xb2\x5c\xe1\x50\xb9\x18\x74\xa8\xe4" + "\xdc\xa8\xec\xcd\x30\xbb\xc1\xc6\xe3\xc6\xac\x20\x2a\x3e\x5e\x8b" + "\x12\xe6\x82\x08\x09\x38\x0b\xab\x7c\xb3\xcc\x9c\xce\x97\x67\xdd" + "\xef\x95\x40\x4e\x92\xe2\x44\xe9\x1d\xc1\x14\xfd\xa9\xb1\xdc\x71" + "\x9c\x46\x21\xbd\x58\x88\x6e\x22\x15\x56\xc1\xef\xe0\xc9\x8d\xe5" + "\x80\x3e\xda\x7e\x93\x0f\x52\xf6\xf5\xc1\x91\x90\x9e\x42\x49\x4f" + "\x8d\x9c\xba\x38\x83\xe9\x33\xc2\x50\x4f\xec\xc2\xf0\xa8\xb7\x6e" + "\x28\x25\x56\x6b\x62\x67\xfe\x08\xf1\x56\xe5\x6f\x0e\x99\xf1\xe5" + "\x95\x7b\xef\xeb\x0a\x2c\x92\x97\x57\x23\x33\x36\x07\xdd\xfb\xae" + "\xf1\xb1\xd8\x33\xb7\x96\x71\x42\x36\xc5\xa4\xa9\x19\x4b\x1b\x52" + "\x4c\x50\x69\x91\xf0\x0e\xfa\x80\x37\x4b\xb5\xd0\x2f\xb7\x44\x0d" + "\xd4\xf8\x39\x8d\xab\x71\x67\x59\x05\x88\x3d\xeb\x48\x48\x33\x88" + "\x4e\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9" + "\x7a\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f" + "\xda\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d" + "\x46\xb8\x35\xdf\x41\x02\x81\x81\x00\xe4\x4c\xae\xde\x16\xfd\x9f" + "\x83\x55\x5b\x84\x4a\xcf\x1c\xf1\x37\x95\xad\xca\x29\x7f\x2d\x6e" + "\x32\x81\xa4\x2b\x26\x14\x96\x1d\x40\x05\xec\x0c\xaf\x3f\x2c\x6f" + "\x2c\xe8\xbf\x1d\xee\xd0\xb3\xef\x7c\x5b\x9e\x88\x4f\x2a\x8b\x0e" + "\x4a\xbd\xb7\x8c\xfa\x10\x0e\x3b\xda\x68\xad\x41\x2b\xe4\x96\xfa" + "\x7f\x80\x52\x5f\x07\x9f\x0e\x3b\x5e\x96\x45\x1a\x13\x2b\x94\xce" + "\x1f\x07\x69\x85\x35\xfc\x69\x63\x5b\xf8\xf8\x3f\xce\x9d\x40\x1e" + "\x7c\xad\xfb\x9e\xce\xe0\x01\xf8\xef\x59\x5d\xdc\x00\x79\xab\x8a" + "\x3f\x80\xa2\x76\x32\x94\xa9\xea\x65\x02\x81\x81\x00\xf1\x38\x60" + "\x90\x0d\x0c\x2e\x3d\x34\xe5\x90\xea\x21\x43\x1f\x68\x63\x16\x7b" + "\x25\x8d\xde\x82\x2b\x52\xf8\xa3\xfd\x0f\x39\xe7\xe9\x5e\x32\x75" + "\x15\x7d\xd0\xc9\xce\x06\xe5\xfb\xa9\xcb\x22\xe5\xdb\x49\x09\xf2" + "\xe6\xb7\xa5\xa7\x75\x2e\x91\x2d\x2b\x5d\xf1\x48\x61\x45\x43\xd7" + "\xbd\xfc\x11\x73\xb5\x11\x9f\xb2\x18\x3a\x6f\x36\xa7\xc2\xd3\x18" + "\x4d\xf0\xc5\x1f\x70\x8c\x9b\xc5\x1d\x95\xa8\x5a\x9e\x8c\xb1\x4b" + "\x6a\x2a\x84\x76\x2c\xd8\x4f\x47\xb0\x81\x84\x02\x45\xf0\x85\xf8" + "\x0c\x6d\xa7\x0c\x4d\x2c\xb2\x5b\x81\x70\xfd\x6e\x17\x02\x81\x81" + "\x00\x8d\x07\xc5\xfa\x92\x4f\x48\xcb\xd3\xdd\xfe\x02\x4c\xa1\x7f" + "\x6d\xab\xfc\x38\xe7\x9b\x95\xcf\xfe\x49\x51\xc6\x09\xf7\x2b\xa8" + "\x94\x15\x54\x75\x9d\x88\xb4\x05\x55\xc3\xcd\xd4\x4a\xe4\x08\x53" + "\xc8\x09\xbd\x0c\x4d\x83\x65\x75\x85\xbc\x5e\xf8\x2a\xbd\xe2\x5d" + "\x1d\x16\x0e\xf9\x34\x89\x38\xaf\x34\x36\x6c\x2c\x22\x44\x22\x81" + "\x90\x73\xd9\xea\x3a\xaf\x70\x74\x48\x7c\xc6\xb5\xb0\xdc\xe5\xa9" + "\xa8\x76\x4b\xbc\xf7\x00\xf3\x4c\x22\x0f\x44\x62\x1d\x40\x0a\x57" + "\xe2\x5b\xdd\x7c\x7b\x9a\xad\xda\x70\x52\x21\x8a\x4c\xc2\xc3\x98" + "\x75\x02\x81\x81\x00\xed\x24\x5c\xa2\x21\x81\xa1\x0f\xa1\x2a\x33" + "\x0e\x49\xc7\x00\x60\x92\x51\x6e\x9d\x9b\xdc\x6d\x22\x04\x7e\xd6" + "\x51\x19\x9f\xf6\xe3\x91\x2c\x8f\xb8\xa2\x29\x19\xcc\x47\x31\xdf" + "\xf8\xab\xf0\xd2\x02\x83\xca\x99\x16\xc2\xe2\xc3\x3f\x4b\x99\x83" + "\xcb\x87\x9e\x86\x66\xc2\x3e\x91\x21\x80\x66\xf3\xd6\xc5\xcd\xb6" + "\xbb\x64\xef\x22\xcf\x48\x94\x58\xe7\x7e\xd5\x7c\x34\x1c\xb7\xa2" + "\xd0\x93\xe9\x9f\xb5\x11\x61\xd7\x5f\x37\x0f\x64\x52\x70\x11\x78" + "\xcc\x08\x77\xeb\xf8\x30\x1e\xb4\x9e\x1b\x4a\xc7\xa8\x33\x51\xe0" + "\xed\xdf\x53\xf6\xdf\x02\x81\x81\x00\x86\xd9\x4c\xee\x65\x61\xc1" + "\x19\xa9\xd5\x74\x9b\xd5\xca\xf6\x83\x2b\x06\xb4\x20\xfe\x45\x29" + "\xe8\xe3\xfa\xe1\x4f\x28\x8e\x63\x2f\x74\xc3\x3a\x5c\x9a\xf5\x9e" + "\x0e\x0d\xc5\xfe\xa0\x4c\x00\xce\x7b\xa4\x19\x17\x59\xaf\x13\x3a" + "\x03\x8f\x54\xf5\x60\x39\x2e\xd9\x06\xb3\x7c\xd6\x90\x06\x41\x77" + "\xf3\x93\xe1\x7a\x01\x41\xc1\x8f\xfe\x4c\x88\x39\xdb\xde\x71\x9e" + "\x58\xd1\x49\x50\x80\xb2\x5a\x4f\x69\x8b\xb8\xfe\x63\xd4\x42\x3d" + "\x37\x61\xa8\x4c\xff\xb6\x99\x4c\xf4\x51\xe0\x44\xaa\x69\x79\x3f" + "\x81\xa4\x61\x3d\x26\xe9\x04\x52\x64", + .key_len = 1193, /* * m is SHA256 hash of following message: * "\x49\x41\xbe\x0a\x0c\xc9\xf6\x35\x51\xe4\x27\x56\x13\x71\x4b\xd0" From 98dfa9343f37bdd4112966292751e3a93aaf2e56 Mon Sep 17 00:00:00 2001 From: Zhengchao Shao Date: Tue, 19 Jul 2022 12:23:23 +0800 Subject: [PATCH 82/89] crypto: hisilicon/hpre - don't use GFP_KERNEL to alloc mem during softirq The hpre encryption driver may be used to encrypt and decrypt packets during the rx softirq, it is not allowed to use GFP_KERNEL. Fixes: c8b4b477079d ("crypto: hisilicon - add HiSilicon HPRE accelerator") Signed-off-by: Zhengchao Shao Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 97d54c1465c2b..3ba6f15deafc6 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -252,7 +252,7 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, if (unlikely(shift < 0)) return -EINVAL; - ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL); + ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC); if (unlikely(!ptr)) return -ENOMEM; From fa4d57b85786ec0e16565c75a51c208834b0c24d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Tue, 19 Jul 2022 09:54:03 +0200 Subject: [PATCH 83/89] crypto: inside-secure - Add missing MODULE_DEVICE_TABLE for of MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Without MODULE_DEVICE_TABLE, crypto_safexcel.ko module is not automatically loaded on platforms where inside-secure crypto HW is specified in device tree (e.g. Armada 3720). So add missing MODULE_DEVICE_TABLE for of. Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver") Signed-off-by: Pali Rohár Acked-by: Marek Behún Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 9b1a158aec299..ad0d8c4a71ac1 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1831,6 +1831,8 @@ static const struct of_device_id safexcel_of_match_table[] = { {}, }; +MODULE_DEVICE_TABLE(of, safexcel_of_match_table); + static struct platform_driver crypto_safexcel = { .probe = safexcel_probe, .remove = safexcel_remove, From 96ec8dfdd094b7b2b015e092d581835a732949ff Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 19 Jul 2022 11:13:28 -0500 Subject: [PATCH 84/89] crypto: ccp - Add support for new CCP/PSP device ID Add a new CCP/PSP PCI device ID. This uses same register offsets as the previously supported structure. Signed-off-by: Mario Limonciello Acked-by: Tom Lendacky Acked-by: Rijo Thomas Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sp-pci.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index b5970ae54d0ea..792d6da7f0c07 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -427,6 +427,12 @@ static const struct sp_dev_vdata dev_vdata[] = { .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv2, +#endif + }, + { /* 6 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv3, #endif }, }; @@ -438,6 +444,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] }, { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] }, { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] }, + { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] }, /* Last entry must be zero */ { 0, } }; From 383ce25dd2b117da0c092dc0b45e7ad0e9ffcbb2 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 20 Jul 2022 15:28:44 +0200 Subject: [PATCH 85/89] crypto: ccree - Remove a useless dma_supported() call There is no point in calling dma_supported() before calling dma_set_coherent_mask(). This function already calls dma_supported() and returns an error (-EIO) if it fails. So remove the superfluous dma_supported() call. Moreover, setting a larger DMA mask will never fail when setting a smaller one will succeed, so the whole "while" loop can be removed as well. (see [1]) While at it, fix the name of the function reported in a dev_err(). [1]: https://lore.kernel.org/all/YteQ6Vx2C03UtCkG@infradead.org/ Suggested-by: Christoph Hellwig Signed-off-by: Christophe JAILLET Acked-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_driver.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 7d1bee86d5810..cadead18b59e8 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -372,17 +372,10 @@ static int init_cc_resources(struct platform_device *plat_dev) dev->dma_mask = &dev->coherent_dma_mask; dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN); - while (dma_mask > 0x7fffffffUL) { - if (dma_supported(dev, dma_mask)) { - rc = dma_set_coherent_mask(dev, dma_mask); - if (!rc) - break; - } - dma_mask >>= 1; - } - + rc = dma_set_coherent_mask(dev, dma_mask); if (rc) { - dev_err(dev, "Failed in dma_set_mask, mask=%llx\n", dma_mask); + dev_err(dev, "Failed in dma_set_coherent_mask, mask=%llx\n", + dma_mask); return rc; } From 45f5d0176d8426cc1ab0bab84fbd8ef5c57526c6 Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Thu, 21 Jul 2022 10:18:31 +0800 Subject: [PATCH 86/89] crypto: hisilicon/sec - fix auth key size error The authentication algorithm supports a maximum of 128-byte keys. The allocated key memory is insufficient. Fixes: 2f072d75d1ab ("crypto: hisilicon - Add aead support on SEC2") Signed-off-by: Kai Ye Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 6 +++--- drivers/crypto/hisilicon/sec2/sec_crypto.h | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 71dfa7db63947..77c9f13cf69ac 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -620,7 +620,7 @@ static int sec_auth_init(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, + a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, &a_ctx->a_key_dma, GFP_KERNEL); if (!a_ctx->a_key) return -ENOMEM; @@ -632,8 +632,8 @@ static void sec_auth_uninit(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, + memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); + dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, a_ctx->a_key, a_ctx->a_key_dma); } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index 5e039b50e9d4c..d033f63b583f8 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -7,6 +7,7 @@ #define SEC_AIV_SIZE 12 #define SEC_IV_SIZE 24 #define SEC_MAX_KEY_SIZE 64 +#define SEC_MAX_AKEY_SIZE 128 #define SEC_COMM_SCENE 0 #define SEC_MIN_BLOCK_SZ 1 From 11364d61314eb97b12d6b6facb1ededada52fcc1 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 21 Jul 2022 22:58:53 +0200 Subject: [PATCH 87/89] crypto: hisilicon/zip - Use the bitmap API to allocate bitmaps Use bitmap_zalloc()/bitmap_free() instead of hand-writing them. It is less verbose and it improves the semantic. While at it, add an explicit include . Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_crypto.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 67869513e48c1..ad35434a3fdb7 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 HiSilicon Limited. */ #include #include +#include #include #include #include "zip.h" @@ -606,8 +607,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) req_q = &ctx->qp_ctx[i].req_q; req_q->size = QM_Q_DEPTH; - req_q->req_bitmap = kcalloc(BITS_TO_LONGS(req_q->size), - sizeof(long), GFP_KERNEL); + req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL); if (!req_q->req_bitmap) { ret = -ENOMEM; if (i == 0) @@ -631,11 +631,11 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) return 0; err_free_loop1: - kfree(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap); + bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap); err_free_loop0: kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q); err_free_bitmap: - kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap); + bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap); return ret; } @@ -645,7 +645,7 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx) for (i = 0; i < HZIP_CTX_Q_NUM; i++) { kfree(ctx->qp_ctx[i].req_q.q); - kfree(ctx->qp_ctx[i].req_q.req_bitmap); + bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap); } } From 7ae19d422c7da84b5f13bc08b98bd737a08d3a53 Mon Sep 17 00:00:00 2001 From: GUO Zihua Date: Fri, 22 Jul 2022 14:31:57 +0800 Subject: [PATCH 88/89] crypto: arm64/poly1305 - fix a read out-of-bound A kasan error was reported during fuzzing: BUG: KASAN: slab-out-of-bounds in neon_poly1305_blocks.constprop.0+0x1b4/0x250 [poly1305_neon] Read of size 4 at addr ffff0010e293f010 by task syz-executor.5/1646715 CPU: 4 PID: 1646715 Comm: syz-executor.5 Kdump: loaded Not tainted 5.10.0.aarch64 #1 Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.59 01/31/2019 Call trace: dump_backtrace+0x0/0x394 show_stack+0x34/0x4c arch/arm64/kernel/stacktrace.c:196 __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x158/0x1e4 lib/dump_stack.c:118 print_address_description.constprop.0+0x68/0x204 mm/kasan/report.c:387 __kasan_report+0xe0/0x140 mm/kasan/report.c:547 kasan_report+0x44/0xe0 mm/kasan/report.c:564 check_memory_region_inline mm/kasan/generic.c:187 [inline] __asan_load4+0x94/0xd0 mm/kasan/generic.c:252 neon_poly1305_blocks.constprop.0+0x1b4/0x250 [poly1305_neon] neon_poly1305_do_update+0x6c/0x15c [poly1305_neon] neon_poly1305_update+0x9c/0x1c4 [poly1305_neon] crypto_shash_update crypto/shash.c:131 [inline] shash_finup_unaligned+0x84/0x15c crypto/shash.c:179 crypto_shash_finup+0x8c/0x140 crypto/shash.c:193 shash_digest_unaligned+0xb8/0xe4 crypto/shash.c:201 crypto_shash_digest+0xa4/0xfc crypto/shash.c:217 crypto_shash_tfm_digest+0xb4/0x150 crypto/shash.c:229 essiv_skcipher_setkey+0x164/0x200 [essiv] crypto_skcipher_setkey+0xb0/0x160 crypto/skcipher.c:612 skcipher_setkey+0x3c/0x50 crypto/algif_skcipher.c:305 alg_setkey+0x114/0x2a0 crypto/af_alg.c:220 alg_setsockopt+0x19c/0x210 crypto/af_alg.c:253 __sys_setsockopt+0x190/0x2e0 net/socket.c:2123 __do_sys_setsockopt net/socket.c:2134 [inline] __se_sys_setsockopt net/socket.c:2131 [inline] __arm64_sys_setsockopt+0x78/0x94 net/socket.c:2131 __invoke_syscall arch/arm64/kernel/syscall.c:36 [inline] invoke_syscall+0x64/0x100 arch/arm64/kernel/syscall.c:48 el0_svc_common.constprop.0+0x220/0x230 arch/arm64/kernel/syscall.c:155 do_el0_svc+0xb4/0xd4 arch/arm64/kernel/syscall.c:217 el0_svc+0x24/0x3c arch/arm64/kernel/entry-common.c:353 el0_sync_handler+0x160/0x164 arch/arm64/kernel/entry-common.c:369 el0_sync+0x160/0x180 arch/arm64/kernel/entry.S:683 This error can be reproduced by the following code compiled as ko on a system with kasan enabled: #include #include #include #include char test_data[] = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" "\x18\x19\x1a\x1b\x1c\x1d\x1e"; int init(void) { struct crypto_shash *tfm = NULL; char *data = NULL, *out = NULL; tfm = crypto_alloc_shash("poly1305", 0, 0); data = kmalloc(POLY1305_KEY_SIZE - 1, GFP_KERNEL); out = kmalloc(POLY1305_DIGEST_SIZE, GFP_KERNEL); memcpy(data, test_data, POLY1305_KEY_SIZE - 1); crypto_shash_tfm_digest(tfm, data, POLY1305_KEY_SIZE - 1, out); kfree(data); kfree(out); return 0; } void deinit(void) { } module_init(init) module_exit(deinit) MODULE_LICENSE("GPL"); The root cause of the bug sits in neon_poly1305_blocks. The logic neon_poly1305_blocks() performed is that if it was called with both s[] and r[] uninitialized, it will first try to initialize them with the data from the first "block" that it believed to be 32 bytes in length. First 16 bytes are used as the key and the next 16 bytes for s[]. This would lead to the aforementioned read out-of-bound. However, after calling poly1305_init_arch(), only 16 bytes were deducted from the input and s[] is initialized yet again with the following 16 bytes. The second initialization of s[] is certainly redundent which indicates that the first initialization should be for r[] only. This patch fixes the issue by calling poly1305_init_arm64() instead of poly1305_init_arch(). This is also the implementation for the same algorithm on arm platform. Fixes: f569ca164751 ("crypto: arm64/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON implementation") Cc: stable@vger.kernel.org Signed-off-by: GUO Zihua Reviewed-by: Eric Biggers Acked-by: Will Deacon Signed-off-by: Herbert Xu --- arch/arm64/crypto/poly1305-glue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c index 9c3d86e397bf3..1fae18ba11ed1 100644 --- a/arch/arm64/crypto/poly1305-glue.c +++ b/arch/arm64/crypto/poly1305-glue.c @@ -52,7 +52,7 @@ static void neon_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, { if (unlikely(!dctx->sset)) { if (!dctx->rset) { - poly1305_init_arch(dctx, src); + poly1305_init_arm64(&dctx->h, src); src += POLY1305_BLOCK_SIZE; len -= POLY1305_BLOCK_SIZE; dctx->rset = 1; From af5d35b83f642399c719ea9a8599a13b8a0c4167 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Sat, 23 Jul 2022 17:26:40 +0800 Subject: [PATCH 89/89] crypto: tcrypt - Remove the static variable initialisations to NULL Initialise global and static variable to NULL is always unnecessary. Remove the unnecessary initialisations. Signed-off-by: Jason Wang Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index f56d1a9cf0a78..59eb8ec366643 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -58,7 +58,7 @@ */ static unsigned int sec; -static char *alg = NULL; +static char *alg; static u32 type; static u32 mask; static int mode;