3030#include <crypto/internal/aead.h>
3131#include <crypto/scatterwalk.h>
3232#include <crypto/if_alg.h>
33+ #include <crypto/skcipher.h>
34+ #include <crypto/null.h>
3335#include <linux/init.h>
3436#include <linux/list.h>
3537#include <linux/kernel.h>
@@ -70,6 +72,7 @@ struct aead_async_req {
7072struct aead_tfm {
7173 struct crypto_aead * aead ;
7274 bool has_key ;
75+ struct crypto_skcipher * null_tfm ;
7376};
7477
7578struct aead_ctx {
@@ -168,7 +171,12 @@ static int aead_alloc_tsgl(struct sock *sk)
168171 return 0 ;
169172}
170173
171- static unsigned int aead_count_tsgl (struct sock * sk , size_t bytes )
174+ /**
175+ * Count number of SG entries from the beginning of the SGL to @bytes. If
176+ * an offset is provided, the counting of the SG entries starts at the offset.
177+ */
178+ static unsigned int aead_count_tsgl (struct sock * sk , size_t bytes ,
179+ size_t offset )
172180{
173181 struct alg_sock * ask = alg_sk (sk );
174182 struct aead_ctx * ctx = ask -> private ;
@@ -183,32 +191,55 @@ static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes)
183191 struct scatterlist * sg = sgl -> sg ;
184192
185193 for (i = 0 ; i < sgl -> cur ; i ++ ) {
194+ size_t bytes_count ;
195+
196+ /* Skip offset */
197+ if (offset >= sg [i ].length ) {
198+ offset -= sg [i ].length ;
199+ bytes -= sg [i ].length ;
200+ continue ;
201+ }
202+
203+ bytes_count = sg [i ].length - offset ;
204+
205+ offset = 0 ;
186206 sgl_count ++ ;
187- if (sg [i ].length >= bytes )
207+
208+ /* If we have seen requested number of bytes, stop */
209+ if (bytes_count >= bytes )
188210 return sgl_count ;
189211
190- bytes -= sg [ i ]. length ;
212+ bytes -= bytes_count ;
191213 }
192214 }
193215
194216 return sgl_count ;
195217}
196218
219+ /**
220+ * Release the specified buffers from TX SGL pointed to by ctx->tsgl_list for
221+ * @used bytes.
222+ *
223+ * If @dst is non-null, reassign the pages to dst. The caller must release
224+ * the pages. If @dst_offset is given only reassign the pages to @dst starting
225+ * at the @dst_offset (byte). The caller must ensure that @dst is large
226+ * enough (e.g. by using aead_count_tsgl with the same offset).
227+ */
197228static void aead_pull_tsgl (struct sock * sk , size_t used ,
198- struct scatterlist * dst )
229+ struct scatterlist * dst , size_t dst_offset )
199230{
200231 struct alg_sock * ask = alg_sk (sk );
201232 struct aead_ctx * ctx = ask -> private ;
202233 struct aead_tsgl * sgl ;
203234 struct scatterlist * sg ;
204- unsigned int i ;
235+ unsigned int i , j ;
205236
206237 while (!list_empty (& ctx -> tsgl_list )) {
207238 sgl = list_first_entry (& ctx -> tsgl_list , struct aead_tsgl ,
208239 list );
209240 sg = sgl -> sg ;
210241
211- for (i = 0 ; i < sgl -> cur ; i ++ ) {
242+ for (i = 0 , j = 0 ; i < sgl -> cur ; i ++ ) {
212243 size_t plen = min_t (size_t , used , sg [i ].length );
213244 struct page * page = sg_page (sg + i );
214245
@@ -219,8 +250,20 @@ static void aead_pull_tsgl(struct sock *sk, size_t used,
219250 * Assumption: caller created aead_count_tsgl(len)
220251 * SG entries in dst.
221252 */
222- if (dst )
223- sg_set_page (dst + i , page , plen , sg [i ].offset );
253+ if (dst ) {
254+ if (dst_offset >= plen ) {
255+ /* discard page before offset */
256+ dst_offset -= plen ;
257+ put_page (page );
258+ } else {
259+ /* reassign page to dst after offset */
260+ sg_set_page (dst + j , page ,
261+ plen - dst_offset ,
262+ sg [i ].offset + dst_offset );
263+ dst_offset = 0 ;
264+ j ++ ;
265+ }
266+ }
224267
225268 sg [i ].length -= plen ;
226269 sg [i ].offset += plen ;
@@ -233,6 +276,7 @@ static void aead_pull_tsgl(struct sock *sk, size_t used,
233276
234277 if (!dst )
235278 put_page (page );
279+
236280 sg_assign_page (sg + i , NULL );
237281 }
238282
@@ -583,6 +627,20 @@ static void aead_async_cb(struct crypto_async_request *_req, int err)
583627 release_sock (sk );
584628}
585629
630+ static int crypto_aead_copy_sgl (struct crypto_skcipher * null_tfm ,
631+ struct scatterlist * src ,
632+ struct scatterlist * dst , unsigned int len )
633+ {
634+ SKCIPHER_REQUEST_ON_STACK (skreq , null_tfm );
635+
636+ skcipher_request_set_tfm (skreq , null_tfm );
637+ skcipher_request_set_callback (skreq , CRYPTO_TFM_REQ_MAY_BACKLOG ,
638+ NULL , NULL );
639+ skcipher_request_set_crypt (skreq , src , dst , len , NULL );
640+
641+ return crypto_skcipher_encrypt (skreq );
642+ }
643+
586644static int _aead_recvmsg (struct socket * sock , struct msghdr * msg ,
587645 size_t ignored , int flags )
588646{
@@ -593,11 +651,14 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
593651 struct aead_ctx * ctx = ask -> private ;
594652 struct aead_tfm * aeadc = pask -> private ;
595653 struct crypto_aead * tfm = aeadc -> aead ;
654+ struct crypto_skcipher * null_tfm = aeadc -> null_tfm ;
596655 unsigned int as = crypto_aead_authsize (tfm );
597656 unsigned int areqlen =
598657 sizeof (struct aead_async_req ) + crypto_aead_reqsize (tfm );
599658 struct aead_async_req * areq ;
600659 struct aead_rsgl * last_rsgl = NULL ;
660+ struct aead_tsgl * tsgl ;
661+ struct scatterlist * src ;
601662 int err = 0 ;
602663 size_t used = 0 ; /* [in] TX bufs to be en/decrypted */
603664 size_t outlen = 0 ; /* [out] RX bufs produced by kernel */
@@ -716,25 +777,91 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
716777 outlen -= less ;
717778 }
718779
780+ processed = used + ctx -> aead_assoclen ;
781+ tsgl = list_first_entry (& ctx -> tsgl_list , struct aead_tsgl , list );
782+
719783 /*
720- * Create a per request TX SGL for this request which tracks the
721- * SG entries from the global TX SGL.
784+ * Copy of AAD from source to destination
785+ *
786+ * The AAD is copied to the destination buffer without change. Even
787+ * when user space uses an in-place cipher operation, the kernel
788+ * will copy the data as it does not see whether such in-place operation
789+ * is initiated.
790+ *
791+ * To ensure efficiency, the following implementation ensure that the
792+ * ciphers are invoked to perform a crypto operation in-place. This
793+ * is achieved by memory management specified as follows.
722794 */
723- processed = used + ctx -> aead_assoclen ;
724- areq -> tsgl_entries = aead_count_tsgl (sk , processed );
725- if (!areq -> tsgl_entries )
726- areq -> tsgl_entries = 1 ;
727- areq -> tsgl = sock_kmalloc (sk , sizeof (* areq -> tsgl ) * areq -> tsgl_entries ,
728- GFP_KERNEL );
729- if (!areq -> tsgl ) {
730- err = - ENOMEM ;
731- goto free ;
795+
796+ /* Use the RX SGL as source (and destination) for crypto op. */
797+ src = areq -> first_rsgl .sgl .sg ;
798+
799+ if (ctx -> enc ) {
800+ /*
801+ * Encryption operation - The in-place cipher operation is
802+ * achieved by the following operation:
803+ *
804+ * TX SGL: AAD || PT || Tag
805+ * | |
806+ * | copy |
807+ * v v
808+ * RX SGL: AAD || PT
809+ */
810+ err = crypto_aead_copy_sgl (null_tfm , tsgl -> sg ,
811+ areq -> first_rsgl .sgl .sg , processed );
812+ if (err )
813+ goto free ;
814+ aead_pull_tsgl (sk , processed , NULL , 0 );
815+ } else {
816+ /*
817+ * Decryption operation - To achieve an in-place cipher
818+ * operation, the following SGL structure is used:
819+ *
820+ * TX SGL: AAD || CT || Tag
821+ * | | ^
822+ * | copy | | Create SGL link.
823+ * v v |
824+ * RX SGL: AAD || CT ----+
825+ */
826+
827+ /* Copy AAD || CT to RX SGL buffer for in-place operation. */
828+ err = crypto_aead_copy_sgl (null_tfm , tsgl -> sg ,
829+ areq -> first_rsgl .sgl .sg , outlen );
830+ if (err )
831+ goto free ;
832+
833+ /* Create TX SGL for tag and chain it to RX SGL. */
834+ areq -> tsgl_entries = aead_count_tsgl (sk , processed ,
835+ processed - as );
836+ if (!areq -> tsgl_entries )
837+ areq -> tsgl_entries = 1 ;
838+ areq -> tsgl = sock_kmalloc (sk , sizeof (* areq -> tsgl ) *
839+ areq -> tsgl_entries ,
840+ GFP_KERNEL );
841+ if (!areq -> tsgl ) {
842+ err = - ENOMEM ;
843+ goto free ;
844+ }
845+ sg_init_table (areq -> tsgl , areq -> tsgl_entries );
846+
847+ /* Release TX SGL, except for tag data and reassign tag data. */
848+ aead_pull_tsgl (sk , processed , areq -> tsgl , processed - as );
849+
850+ /* chain the areq TX SGL holding the tag with RX SGL */
851+ if (last_rsgl ) {
852+ /* RX SGL present */
853+ struct af_alg_sgl * sgl_prev = & last_rsgl -> sgl ;
854+
855+ sg_unmark_end (sgl_prev -> sg + sgl_prev -> npages - 1 );
856+ sg_chain (sgl_prev -> sg , sgl_prev -> npages + 1 ,
857+ areq -> tsgl );
858+ } else
859+ /* no RX SGL present (e.g. authentication only) */
860+ src = areq -> tsgl ;
732861 }
733- sg_init_table (areq -> tsgl , areq -> tsgl_entries );
734- aead_pull_tsgl (sk , processed , areq -> tsgl );
735862
736863 /* Initialize the crypto operation */
737- aead_request_set_crypt (& areq -> aead_req , areq -> tsgl ,
864+ aead_request_set_crypt (& areq -> aead_req , src ,
738865 areq -> first_rsgl .sgl .sg , used , ctx -> iv );
739866 aead_request_set_ad (& areq -> aead_req , ctx -> aead_assoclen );
740867 aead_request_set_tfm (& areq -> aead_req , tfm );
@@ -951,6 +1078,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
9511078{
9521079 struct aead_tfm * tfm ;
9531080 struct crypto_aead * aead ;
1081+ struct crypto_skcipher * null_tfm ;
9541082
9551083 tfm = kzalloc (sizeof (* tfm ), GFP_KERNEL );
9561084 if (!tfm )
@@ -962,7 +1090,15 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
9621090 return ERR_CAST (aead );
9631091 }
9641092
1093+ null_tfm = crypto_get_default_null_skcipher2 ();
1094+ if (IS_ERR (null_tfm )) {
1095+ crypto_free_aead (aead );
1096+ kfree (tfm );
1097+ return ERR_CAST (null_tfm );
1098+ }
1099+
9651100 tfm -> aead = aead ;
1101+ tfm -> null_tfm = null_tfm ;
9661102
9671103 return tfm ;
9681104}
@@ -1003,7 +1139,8 @@ static void aead_sock_destruct(struct sock *sk)
10031139 struct crypto_aead * tfm = aeadc -> aead ;
10041140 unsigned int ivlen = crypto_aead_ivsize (tfm );
10051141
1006- aead_pull_tsgl (sk , ctx -> used , NULL );
1142+ aead_pull_tsgl (sk , ctx -> used , NULL , 0 );
1143+ crypto_put_default_null_skcipher2 ();
10071144 sock_kzfree_s (sk , ctx -> iv , ivlen );
10081145 sock_kfree_s (sk , ctx , ctx -> len );
10091146 af_alg_release_parent (sk );
0 commit comments