11// SPDX-License-Identifier: GPL-2.0-or-later
22/* XTS: as defined in IEEE1619/D16
33 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
4- * (sector sizes which are not a multiple of 16 bytes are,
5- * however currently unsupported)
64 *
75 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
86 *
@@ -34,6 +32,8 @@ struct xts_instance_ctx {
3432
3533struct rctx {
3634 le128 t ;
35+ struct scatterlist * tail ;
36+ struct scatterlist sg [2 ];
3737 struct skcipher_request subreq ;
3838};
3939
@@ -84,10 +84,11 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
8484 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
8585 * just doing the gf128mul_x_ble() calls again.
8686 */
87- static int xor_tweak (struct skcipher_request * req , bool second_pass )
87+ static int xor_tweak (struct skcipher_request * req , bool second_pass , bool enc )
8888{
8989 struct rctx * rctx = skcipher_request_ctx (req );
9090 struct crypto_skcipher * tfm = crypto_skcipher_reqtfm (req );
91+ const bool cts = (req -> cryptlen % XTS_BLOCK_SIZE );
9192 const int bs = XTS_BLOCK_SIZE ;
9293 struct skcipher_walk w ;
9394 le128 t = rctx -> t ;
@@ -109,6 +110,20 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
109110 wdst = w .dst .virt .addr ;
110111
111112 do {
113+ if (unlikely (cts ) &&
114+ w .total - w .nbytes + avail < 2 * XTS_BLOCK_SIZE ) {
115+ if (!enc ) {
116+ if (second_pass )
117+ rctx -> t = t ;
118+ gf128mul_x_ble (& t , & t );
119+ }
120+ le128_xor (wdst , & t , wsrc );
121+ if (enc && second_pass )
122+ gf128mul_x_ble (& rctx -> t , & t );
123+ skcipher_walk_done (& w , avail - bs );
124+ return 0 ;
125+ }
126+
112127 le128_xor (wdst ++ , & t , wsrc ++ );
113128 gf128mul_x_ble (& t , & t );
114129 } while ((avail -= bs ) >= bs );
@@ -119,65 +134,162 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
119134 return err ;
120135}
121136
122- static int xor_tweak_pre (struct skcipher_request * req )
137+ static int xor_tweak_pre (struct skcipher_request * req , bool enc )
123138{
124- return xor_tweak (req , false);
139+ return xor_tweak (req , false, enc );
125140}
126141
127- static int xor_tweak_post (struct skcipher_request * req )
142+ static int xor_tweak_post (struct skcipher_request * req , bool enc )
128143{
129- return xor_tweak (req , true);
144+ return xor_tweak (req , true, enc );
130145}
131146
132- static void crypt_done (struct crypto_async_request * areq , int err )
147+ static void cts_done (struct crypto_async_request * areq , int err )
148+ {
149+ struct skcipher_request * req = areq -> data ;
150+ le128 b ;
151+
152+ if (!err ) {
153+ struct rctx * rctx = skcipher_request_ctx (req );
154+
155+ scatterwalk_map_and_copy (& b , rctx -> tail , 0 , XTS_BLOCK_SIZE , 0 );
156+ le128_xor (& b , & rctx -> t , & b );
157+ scatterwalk_map_and_copy (& b , rctx -> tail , 0 , XTS_BLOCK_SIZE , 1 );
158+ }
159+
160+ skcipher_request_complete (req , err );
161+ }
162+
163+ static int cts_final (struct skcipher_request * req ,
164+ int (* crypt )(struct skcipher_request * req ))
165+ {
166+ struct priv * ctx = crypto_skcipher_ctx (crypto_skcipher_reqtfm (req ));
167+ int offset = req -> cryptlen & ~(XTS_BLOCK_SIZE - 1 );
168+ struct rctx * rctx = skcipher_request_ctx (req );
169+ struct skcipher_request * subreq = & rctx -> subreq ;
170+ int tail = req -> cryptlen % XTS_BLOCK_SIZE ;
171+ le128 b [2 ];
172+ int err ;
173+
174+ rctx -> tail = scatterwalk_ffwd (rctx -> sg , req -> dst ,
175+ offset - XTS_BLOCK_SIZE );
176+
177+ scatterwalk_map_and_copy (b , rctx -> tail , 0 , XTS_BLOCK_SIZE , 0 );
178+ memcpy (b + 1 , b , tail );
179+ scatterwalk_map_and_copy (b , req -> src , offset , tail , 0 );
180+
181+ le128_xor (b , & rctx -> t , b );
182+
183+ scatterwalk_map_and_copy (b , rctx -> tail , 0 , XTS_BLOCK_SIZE + tail , 1 );
184+
185+ skcipher_request_set_tfm (subreq , ctx -> child );
186+ skcipher_request_set_callback (subreq , req -> base .flags , cts_done , req );
187+ skcipher_request_set_crypt (subreq , rctx -> tail , rctx -> tail ,
188+ XTS_BLOCK_SIZE , NULL );
189+
190+ err = crypt (subreq );
191+ if (err )
192+ return err ;
193+
194+ scatterwalk_map_and_copy (b , rctx -> tail , 0 , XTS_BLOCK_SIZE , 0 );
195+ le128_xor (b , & rctx -> t , b );
196+ scatterwalk_map_and_copy (b , rctx -> tail , 0 , XTS_BLOCK_SIZE , 1 );
197+
198+ return 0 ;
199+ }
200+
201+ static void encrypt_done (struct crypto_async_request * areq , int err )
133202{
134203 struct skcipher_request * req = areq -> data ;
135204
136205 if (!err ) {
137206 struct rctx * rctx = skcipher_request_ctx (req );
138207
139208 rctx -> subreq .base .flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP ;
140- err = xor_tweak_post (req );
209+ err = xor_tweak_post (req , true);
210+
211+ if (!err && unlikely (req -> cryptlen % XTS_BLOCK_SIZE )) {
212+ err = cts_final (req , crypto_skcipher_encrypt );
213+ if (err == - EINPROGRESS )
214+ return ;
215+ }
141216 }
142217
143218 skcipher_request_complete (req , err );
144219}
145220
146- static void init_crypt (struct skcipher_request * req )
221+ static void decrypt_done (struct crypto_async_request * areq , int err )
222+ {
223+ struct skcipher_request * req = areq -> data ;
224+
225+ if (!err ) {
226+ struct rctx * rctx = skcipher_request_ctx (req );
227+
228+ rctx -> subreq .base .flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP ;
229+ err = xor_tweak_post (req , false);
230+
231+ if (!err && unlikely (req -> cryptlen % XTS_BLOCK_SIZE )) {
232+ err = cts_final (req , crypto_skcipher_decrypt );
233+ if (err == - EINPROGRESS )
234+ return ;
235+ }
236+ }
237+
238+ skcipher_request_complete (req , err );
239+ }
240+
241+ static int init_crypt (struct skcipher_request * req , crypto_completion_t compl )
147242{
148243 struct priv * ctx = crypto_skcipher_ctx (crypto_skcipher_reqtfm (req ));
149244 struct rctx * rctx = skcipher_request_ctx (req );
150245 struct skcipher_request * subreq = & rctx -> subreq ;
151246
247+ if (req -> cryptlen < XTS_BLOCK_SIZE )
248+ return - EINVAL ;
249+
152250 skcipher_request_set_tfm (subreq , ctx -> child );
153- skcipher_request_set_callback (subreq , req -> base .flags , crypt_done , req );
251+ skcipher_request_set_callback (subreq , req -> base .flags , compl , req );
154252 skcipher_request_set_crypt (subreq , req -> dst , req -> dst ,
155- req -> cryptlen , NULL );
253+ req -> cryptlen & ~( XTS_BLOCK_SIZE - 1 ) , NULL );
156254
157255 /* calculate first value of T */
158256 crypto_cipher_encrypt_one (ctx -> tweak , (u8 * )& rctx -> t , req -> iv );
257+
258+ return 0 ;
159259}
160260
161261static int encrypt (struct skcipher_request * req )
162262{
163263 struct rctx * rctx = skcipher_request_ctx (req );
164264 struct skcipher_request * subreq = & rctx -> subreq ;
265+ int err ;
165266
166- init_crypt (req );
167- return xor_tweak_pre (req ) ?:
168- crypto_skcipher_encrypt (subreq ) ?:
169- xor_tweak_post (req );
267+ err = init_crypt (req , encrypt_done ) ?:
268+ xor_tweak_pre (req , true) ?:
269+ crypto_skcipher_encrypt (subreq ) ?:
270+ xor_tweak_post (req , true);
271+
272+ if (err || likely ((req -> cryptlen % XTS_BLOCK_SIZE ) == 0 ))
273+ return err ;
274+
275+ return cts_final (req , crypto_skcipher_encrypt );
170276}
171277
172278static int decrypt (struct skcipher_request * req )
173279{
174280 struct rctx * rctx = skcipher_request_ctx (req );
175281 struct skcipher_request * subreq = & rctx -> subreq ;
282+ int err ;
283+
284+ err = init_crypt (req , decrypt_done ) ?:
285+ xor_tweak_pre (req , false) ?:
286+ crypto_skcipher_decrypt (subreq ) ?:
287+ xor_tweak_post (req , false);
288+
289+ if (err || likely ((req -> cryptlen % XTS_BLOCK_SIZE ) == 0 ))
290+ return err ;
176291
177- init_crypt (req );
178- return xor_tweak_pre (req ) ?:
179- crypto_skcipher_decrypt (subreq ) ?:
180- xor_tweak_post (req );
292+ return cts_final (req , crypto_skcipher_decrypt );
181293}
182294
183295static int init_tfm (struct crypto_skcipher * tfm )
0 commit comments