@@ -197,6 +197,141 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
197197 return ret ;
198198}
199199
200+ static int cpt_inline_ipsec_cfg_inbound (struct rvu * rvu , int blkaddr , u8 cptlf ,
201+ struct cpt_inline_ipsec_cfg_msg * req )
202+ {
203+ u16 sso_pf_func = req -> sso_pf_func ;
204+ u8 nix_sel ;
205+ u64 val ;
206+
207+ val = rvu_read64 (rvu , blkaddr , CPT_AF_LFX_CTL (cptlf ));
208+ if (req -> enable && (val & BIT_ULL (16 ))) {
209+ /* IPSec inline outbound path is already enabled for a given
210+ * CPT LF, HRM states that inline inbound & outbound paths
211+ * must not be enabled at the same time for a given CPT LF
212+ */
213+ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA ;
214+ }
215+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
216+ if (sso_pf_func && !is_pffunc_map_valid (rvu , sso_pf_func , BLKTYPE_SSO ))
217+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID ;
218+
219+ nix_sel = (blkaddr == BLKADDR_CPT1 ) ? 1 : 0 ;
220+ /* Enable CPT LF for IPsec inline inbound operations */
221+ if (req -> enable )
222+ val |= BIT_ULL (9 );
223+ else
224+ val &= ~BIT_ULL (9 );
225+
226+ val |= (u64 )nix_sel << 8 ;
227+ rvu_write64 (rvu , blkaddr , CPT_AF_LFX_CTL (cptlf ), val );
228+
229+ if (sso_pf_func ) {
230+ /* Set SSO_PF_FUNC */
231+ val = rvu_read64 (rvu , blkaddr , CPT_AF_LFX_CTL2 (cptlf ));
232+ val |= (u64 )sso_pf_func << 32 ;
233+ val |= (u64 )req -> nix_pf_func << 48 ;
234+ rvu_write64 (rvu , blkaddr , CPT_AF_LFX_CTL2 (cptlf ), val );
235+ }
236+ if (req -> sso_pf_func_ovrd )
237+ /* Set SSO_PF_FUNC_OVRD for inline IPSec */
238+ rvu_write64 (rvu , blkaddr , CPT_AF_ECO , 0x1 );
239+
240+ /* Configure the X2P Link register with the cpt base channel number and
241+ * range of channels it should propagate to X2P
242+ */
243+ if (!is_rvu_otx2 (rvu )) {
244+ val = (ilog2 (NIX_CHAN_CPT_X2P_MASK + 1 ) << 16 );
245+ val |= rvu -> hw -> cpt_chan_base ;
246+
247+ rvu_write64 (rvu , blkaddr , CPT_AF_X2PX_LINK_CFG (0 ), val );
248+ rvu_write64 (rvu , blkaddr , CPT_AF_X2PX_LINK_CFG (1 ), val );
249+ }
250+
251+ return 0 ;
252+ }
253+
254+ static int cpt_inline_ipsec_cfg_outbound (struct rvu * rvu , int blkaddr , u8 cptlf ,
255+ struct cpt_inline_ipsec_cfg_msg * req )
256+ {
257+ u16 nix_pf_func = req -> nix_pf_func ;
258+ int nix_blkaddr ;
259+ u8 nix_sel ;
260+ u64 val ;
261+
262+ val = rvu_read64 (rvu , blkaddr , CPT_AF_LFX_CTL (cptlf ));
263+ if (req -> enable && (val & BIT_ULL (9 ))) {
264+ /* IPSec inline inbound path is already enabled for a given
265+ * CPT LF, HRM states that inline inbound & outbound paths
266+ * must not be enabled at the same time for a given CPT LF
267+ */
268+ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA ;
269+ }
270+
271+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
272+ if (nix_pf_func && !is_pffunc_map_valid (rvu , nix_pf_func , BLKTYPE_NIX ))
273+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID ;
274+
275+ /* Enable CPT LF for IPsec inline outbound operations */
276+ if (req -> enable )
277+ val |= BIT_ULL (16 );
278+ else
279+ val &= ~BIT_ULL (16 );
280+ rvu_write64 (rvu , blkaddr , CPT_AF_LFX_CTL (cptlf ), val );
281+
282+ if (nix_pf_func ) {
283+ /* Set NIX_PF_FUNC */
284+ val = rvu_read64 (rvu , blkaddr , CPT_AF_LFX_CTL2 (cptlf ));
285+ val |= (u64 )nix_pf_func << 48 ;
286+ rvu_write64 (rvu , blkaddr , CPT_AF_LFX_CTL2 (cptlf ), val );
287+
288+ nix_blkaddr = rvu_get_blkaddr (rvu , BLKTYPE_NIX , nix_pf_func );
289+ nix_sel = (nix_blkaddr == BLKADDR_NIX0 ) ? 0 : 1 ;
290+
291+ val = rvu_read64 (rvu , blkaddr , CPT_AF_LFX_CTL (cptlf ));
292+ val |= (u64 )nix_sel << 8 ;
293+ rvu_write64 (rvu , blkaddr , CPT_AF_LFX_CTL (cptlf ), val );
294+ }
295+
296+ return 0 ;
297+ }
298+
299+ int rvu_mbox_handler_cpt_inline_ipsec_cfg (struct rvu * rvu ,
300+ struct cpt_inline_ipsec_cfg_msg * req ,
301+ struct msg_rsp * rsp )
302+ {
303+ u16 pcifunc = req -> hdr .pcifunc ;
304+ struct rvu_block * block ;
305+ int cptlf , blkaddr , ret ;
306+ u16 actual_slot ;
307+
308+ blkaddr = rvu_get_blkaddr_from_slot (rvu , BLKTYPE_CPT , pcifunc ,
309+ req -> slot , & actual_slot );
310+ if (blkaddr < 0 )
311+ return CPT_AF_ERR_LF_INVALID ;
312+
313+ block = & rvu -> hw -> block [blkaddr ];
314+
315+ cptlf = rvu_get_lf (rvu , block , pcifunc , actual_slot );
316+ if (cptlf < 0 )
317+ return CPT_AF_ERR_LF_INVALID ;
318+
319+ switch (req -> dir ) {
320+ case CPT_INLINE_INBOUND :
321+ ret = cpt_inline_ipsec_cfg_inbound (rvu , blkaddr , cptlf , req );
322+ break ;
323+
324+ case CPT_INLINE_OUTBOUND :
325+ ret = cpt_inline_ipsec_cfg_outbound (rvu , blkaddr , cptlf , req );
326+ break ;
327+
328+ default :
329+ return CPT_AF_ERR_PARAM ;
330+ }
331+
332+ return ret ;
333+ }
334+
200335static bool is_valid_offset (struct rvu * rvu , struct cpt_rd_wr_reg_msg * req )
201336{
202337 u64 offset = req -> reg_offset ;
0 commit comments