@@ -129,6 +129,8 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
129129 return ERR_PTR (- ENXIO );
130130 }
131131
132+ dev_set_drvdata (dev , cxlhdm );
133+
132134 return cxlhdm ;
133135}
134136EXPORT_SYMBOL_NS_GPL (devm_cxl_setup_hdm , CXL );
@@ -466,6 +468,222 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
466468 return devm_add_action_or_reset (& port -> dev , cxl_dpa_release , cxled );
467469}
468470
471+ static void cxld_set_interleave (struct cxl_decoder * cxld , u32 * ctrl )
472+ {
473+ u16 eig ;
474+ u8 eiw ;
475+
476+ /*
477+ * Input validation ensures these warns never fire, but otherwise
478+ * suppress unititalized variable usage warnings.
479+ */
480+ if (WARN_ONCE (ways_to_cxl (cxld -> interleave_ways , & eiw ),
481+ "invalid interleave_ways: %d\n" , cxld -> interleave_ways ))
482+ return ;
483+ if (WARN_ONCE (granularity_to_cxl (cxld -> interleave_granularity , & eig ),
484+ "invalid interleave_granularity: %d\n" ,
485+ cxld -> interleave_granularity ))
486+ return ;
487+
488+ u32p_replace_bits (ctrl , eig , CXL_HDM_DECODER0_CTRL_IG_MASK );
489+ u32p_replace_bits (ctrl , eiw , CXL_HDM_DECODER0_CTRL_IW_MASK );
490+ * ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT ;
491+ }
492+
493+ static void cxld_set_type (struct cxl_decoder * cxld , u32 * ctrl )
494+ {
495+ u32p_replace_bits (ctrl , !!(cxld -> target_type == 3 ),
496+ CXL_HDM_DECODER0_CTRL_TYPE );
497+ }
498+
499+ static void cxld_set_hpa (struct cxl_decoder * cxld , u64 * base , u64 * size )
500+ {
501+ struct cxl_region * cxlr = cxld -> region ;
502+ struct cxl_region_params * p = & cxlr -> params ;
503+
504+ cxld -> hpa_range = (struct range ) {
505+ .start = p -> res -> start ,
506+ .end = p -> res -> end ,
507+ };
508+
509+ * base = p -> res -> start ;
510+ * size = resource_size (p -> res );
511+ }
512+
513+ static void cxld_clear_hpa (struct cxl_decoder * cxld )
514+ {
515+ cxld -> hpa_range = (struct range ) {
516+ .start = 0 ,
517+ .end = -1 ,
518+ };
519+ }
520+
521+ static int cxlsd_set_targets (struct cxl_switch_decoder * cxlsd , u64 * tgt )
522+ {
523+ struct cxl_dport * * t = & cxlsd -> target [0 ];
524+ int ways = cxlsd -> cxld .interleave_ways ;
525+
526+ if (dev_WARN_ONCE (& cxlsd -> cxld .dev ,
527+ ways > 8 || ways > cxlsd -> nr_targets ,
528+ "ways: %d overflows targets: %d\n" , ways ,
529+ cxlsd -> nr_targets ))
530+ return - ENXIO ;
531+
532+ * tgt = FIELD_PREP (GENMASK (7 , 0 ), t [0 ]-> port_id );
533+ if (ways > 1 )
534+ * tgt |= FIELD_PREP (GENMASK (15 , 8 ), t [1 ]-> port_id );
535+ if (ways > 2 )
536+ * tgt |= FIELD_PREP (GENMASK (23 , 16 ), t [2 ]-> port_id );
537+ if (ways > 3 )
538+ * tgt |= FIELD_PREP (GENMASK (31 , 24 ), t [3 ]-> port_id );
539+ if (ways > 4 )
540+ * tgt |= FIELD_PREP (GENMASK_ULL (39 , 32 ), t [4 ]-> port_id );
541+ if (ways > 5 )
542+ * tgt |= FIELD_PREP (GENMASK_ULL (47 , 40 ), t [5 ]-> port_id );
543+ if (ways > 6 )
544+ * tgt |= FIELD_PREP (GENMASK_ULL (55 , 48 ), t [6 ]-> port_id );
545+ if (ways > 7 )
546+ * tgt |= FIELD_PREP (GENMASK_ULL (63 , 56 ), t [7 ]-> port_id );
547+
548+ return 0 ;
549+ }
550+
551+ /*
552+ * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
553+ * committed or error within 10ms, but just be generous with 20ms to account for
554+ * clock skew and other marginal behavior
555+ */
556+ #define COMMIT_TIMEOUT_MS 20
557+ static int cxld_await_commit (void __iomem * hdm , int id )
558+ {
559+ u32 ctrl ;
560+ int i ;
561+
562+ for (i = 0 ; i < COMMIT_TIMEOUT_MS ; i ++ ) {
563+ ctrl = readl (hdm + CXL_HDM_DECODER0_CTRL_OFFSET (id ));
564+ if (FIELD_GET (CXL_HDM_DECODER0_CTRL_COMMIT_ERROR , ctrl )) {
565+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT ;
566+ writel (ctrl , hdm + CXL_HDM_DECODER0_CTRL_OFFSET (id ));
567+ return - EIO ;
568+ }
569+ if (FIELD_GET (CXL_HDM_DECODER0_CTRL_COMMITTED , ctrl ))
570+ return 0 ;
571+ fsleep (1000 );
572+ }
573+
574+ return - ETIMEDOUT ;
575+ }
576+
577+ static int cxl_decoder_commit (struct cxl_decoder * cxld )
578+ {
579+ struct cxl_port * port = to_cxl_port (cxld -> dev .parent );
580+ struct cxl_hdm * cxlhdm = dev_get_drvdata (& port -> dev );
581+ void __iomem * hdm = cxlhdm -> regs .hdm_decoder ;
582+ int id = cxld -> id , rc ;
583+ u64 base , size ;
584+ u32 ctrl ;
585+
586+ if (cxld -> flags & CXL_DECODER_F_ENABLE )
587+ return 0 ;
588+
589+ if (port -> commit_end + 1 != id ) {
590+ dev_dbg (& port -> dev ,
591+ "%s: out of order commit, expected decoder%d.%d\n" ,
592+ dev_name (& cxld -> dev ), port -> id , port -> commit_end + 1 );
593+ return - EBUSY ;
594+ }
595+
596+ down_read (& cxl_dpa_rwsem );
597+ /* common decoder settings */
598+ ctrl = readl (hdm + CXL_HDM_DECODER0_CTRL_OFFSET (cxld -> id ));
599+ cxld_set_interleave (cxld , & ctrl );
600+ cxld_set_type (cxld , & ctrl );
601+ cxld_set_hpa (cxld , & base , & size );
602+
603+ writel (upper_32_bits (base ), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET (id ));
604+ writel (lower_32_bits (base ), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET (id ));
605+ writel (upper_32_bits (size ), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET (id ));
606+ writel (lower_32_bits (size ), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET (id ));
607+
608+ if (is_switch_decoder (& cxld -> dev )) {
609+ struct cxl_switch_decoder * cxlsd =
610+ to_cxl_switch_decoder (& cxld -> dev );
611+ void __iomem * tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH (id );
612+ void __iomem * tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW (id );
613+ u64 targets ;
614+
615+ rc = cxlsd_set_targets (cxlsd , & targets );
616+ if (rc ) {
617+ dev_dbg (& port -> dev , "%s: target configuration error\n" ,
618+ dev_name (& cxld -> dev ));
619+ goto err ;
620+ }
621+
622+ writel (upper_32_bits (targets ), tl_hi );
623+ writel (lower_32_bits (targets ), tl_lo );
624+ } else {
625+ struct cxl_endpoint_decoder * cxled =
626+ to_cxl_endpoint_decoder (& cxld -> dev );
627+ void __iomem * sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH (id );
628+ void __iomem * sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW (id );
629+
630+ writel (upper_32_bits (cxled -> skip ), sk_hi );
631+ writel (lower_32_bits (cxled -> skip ), sk_lo );
632+ }
633+
634+ writel (ctrl , hdm + CXL_HDM_DECODER0_CTRL_OFFSET (id ));
635+ up_read (& cxl_dpa_rwsem );
636+
637+ port -> commit_end ++ ;
638+ rc = cxld_await_commit (hdm , cxld -> id );
639+ err :
640+ if (rc ) {
641+ dev_dbg (& port -> dev , "%s: error %d committing decoder\n" ,
642+ dev_name (& cxld -> dev ), rc );
643+ cxld -> reset (cxld );
644+ return rc ;
645+ }
646+ cxld -> flags |= CXL_DECODER_F_ENABLE ;
647+
648+ return 0 ;
649+ }
650+
651+ static int cxl_decoder_reset (struct cxl_decoder * cxld )
652+ {
653+ struct cxl_port * port = to_cxl_port (cxld -> dev .parent );
654+ struct cxl_hdm * cxlhdm = dev_get_drvdata (& port -> dev );
655+ void __iomem * hdm = cxlhdm -> regs .hdm_decoder ;
656+ int id = cxld -> id ;
657+ u32 ctrl ;
658+
659+ if ((cxld -> flags & CXL_DECODER_F_ENABLE ) == 0 )
660+ return 0 ;
661+
662+ if (port -> commit_end != id ) {
663+ dev_dbg (& port -> dev ,
664+ "%s: out of order reset, expected decoder%d.%d\n" ,
665+ dev_name (& cxld -> dev ), port -> id , port -> commit_end );
666+ return - EBUSY ;
667+ }
668+
669+ down_read (& cxl_dpa_rwsem );
670+ ctrl = readl (hdm + CXL_HDM_DECODER0_CTRL_OFFSET (id ));
671+ ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT ;
672+ writel (ctrl , hdm + CXL_HDM_DECODER0_CTRL_OFFSET (id ));
673+
674+ cxld_clear_hpa (cxld );
675+ writel (0 , hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET (id ));
676+ writel (0 , hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET (id ));
677+ writel (0 , hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET (id ));
678+ writel (0 , hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET (id ));
679+ up_read (& cxl_dpa_rwsem );
680+
681+ port -> commit_end -- ;
682+ cxld -> flags &= ~CXL_DECODER_F_ENABLE ;
683+
684+ return 0 ;
685+ }
686+
469687static int init_hdm_decoder (struct cxl_port * port , struct cxl_decoder * cxld ,
470688 int * target_map , void __iomem * hdm , int which ,
471689 u64 * dpa_base )
@@ -488,6 +706,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
488706 base = ioread64_hi_lo (hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET (which ));
489707 size = ioread64_hi_lo (hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET (which ));
490708 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED );
709+ cxld -> commit = cxl_decoder_commit ;
710+ cxld -> reset = cxl_decoder_reset ;
491711
492712 if (!committed )
493713 size = 0 ;
@@ -511,6 +731,13 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
511731 cxld -> target_type = CXL_DECODER_EXPANDER ;
512732 else
513733 cxld -> target_type = CXL_DECODER_ACCELERATOR ;
734+ if (cxld -> id != port -> commit_end + 1 ) {
735+ dev_warn (& port -> dev ,
736+ "decoder%d.%d: Committed out of order\n" ,
737+ port -> id , cxld -> id );
738+ return - ENXIO ;
739+ }
740+ port -> commit_end = cxld -> id ;
514741 } else {
515742 /* unless / until type-2 drivers arrive, assume type-3 */
516743 if (FIELD_GET (CXL_HDM_DECODER0_CTRL_TYPE , ctrl ) == 0 ) {
0 commit comments