@@ -293,14 +293,16 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
293293 return skcipher_walk_next (walk );
294294}
295295
296- int skcipher_walk_virt (struct skcipher_walk * walk ,
297- struct skcipher_request * req , bool atomic )
296+ int skcipher_walk_virt (struct skcipher_walk * __restrict walk ,
297+ struct skcipher_request * __restrict req , bool atomic )
298298{
299- const struct skcipher_alg * alg =
300- crypto_skcipher_alg ( crypto_skcipher_reqtfm ( req )) ;
299+ struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req );
300+ struct skcipher_alg * alg ;
301301
302302 might_sleep_if (req -> base .flags & CRYPTO_TFM_REQ_MAY_SLEEP );
303303
304+ alg = crypto_skcipher_alg (tfm );
305+
304306 walk -> total = req -> cryptlen ;
305307 walk -> nbytes = 0 ;
306308 walk -> iv = req -> iv ;
@@ -316,14 +318,9 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
316318 scatterwalk_start (& walk -> in , req -> src );
317319 scatterwalk_start (& walk -> out , req -> dst );
318320
319- /*
320- * Accessing 'alg' directly generates better code than using the
321- * crypto_skcipher_blocksize() and similar helper functions here, as it
322- * prevents the algorithm pointer from being repeatedly reloaded.
323- */
324- walk -> blocksize = alg -> base .cra_blocksize ;
325- walk -> ivsize = alg -> co .ivsize ;
326- walk -> alignmask = alg -> base .cra_alignmask ;
321+ walk -> blocksize = crypto_skcipher_blocksize (tfm );
322+ walk -> ivsize = crypto_skcipher_ivsize (tfm );
323+ walk -> alignmask = crypto_skcipher_alignmask (tfm );
327324
328325 if (alg -> co .base .cra_type != & crypto_skcipher_type )
329326 walk -> stride = alg -> co .chunksize ;
@@ -334,10 +331,11 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
334331}
335332EXPORT_SYMBOL_GPL (skcipher_walk_virt );
336333
337- static int skcipher_walk_aead_common (struct skcipher_walk * walk ,
338- struct aead_request * req , bool atomic )
334+ static int skcipher_walk_aead_common (struct skcipher_walk * __restrict walk ,
335+ struct aead_request * __restrict req ,
336+ bool atomic )
339337{
340- const struct aead_alg * alg = crypto_aead_alg ( crypto_aead_reqtfm (req ) );
338+ struct crypto_aead * tfm = crypto_aead_reqtfm (req );
341339
342340 walk -> nbytes = 0 ;
343341 walk -> iv = req -> iv ;
@@ -353,30 +351,27 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
353351 scatterwalk_start_at_pos (& walk -> in , req -> src , req -> assoclen );
354352 scatterwalk_start_at_pos (& walk -> out , req -> dst , req -> assoclen );
355353
356- /*
357- * Accessing 'alg' directly generates better code than using the
358- * crypto_aead_blocksize() and similar helper functions here, as it
359- * prevents the algorithm pointer from being repeatedly reloaded.
360- */
361- walk -> blocksize = alg -> base .cra_blocksize ;
362- walk -> stride = alg -> chunksize ;
363- walk -> ivsize = alg -> ivsize ;
364- walk -> alignmask = alg -> base .cra_alignmask ;
354+ walk -> blocksize = crypto_aead_blocksize (tfm );
355+ walk -> stride = crypto_aead_chunksize (tfm );
356+ walk -> ivsize = crypto_aead_ivsize (tfm );
357+ walk -> alignmask = crypto_aead_alignmask (tfm );
365358
366359 return skcipher_walk_first (walk );
367360}
368361
369- int skcipher_walk_aead_encrypt (struct skcipher_walk * walk ,
370- struct aead_request * req , bool atomic )
362+ int skcipher_walk_aead_encrypt (struct skcipher_walk * __restrict walk ,
363+ struct aead_request * __restrict req ,
364+ bool atomic )
371365{
372366 walk -> total = req -> cryptlen ;
373367
374368 return skcipher_walk_aead_common (walk , req , atomic );
375369}
376370EXPORT_SYMBOL_GPL (skcipher_walk_aead_encrypt );
377371
378- int skcipher_walk_aead_decrypt (struct skcipher_walk * walk ,
379- struct aead_request * req , bool atomic )
372+ int skcipher_walk_aead_decrypt (struct skcipher_walk * __restrict walk ,
373+ struct aead_request * __restrict req ,
374+ bool atomic )
380375{
381376 struct crypto_aead * tfm = crypto_aead_reqtfm (req );
382377
0 commit comments