File tree 2 files changed +56
-0
lines changed
2 files changed +56
-0
lines changed Original file line number Diff line number Diff line change @@ -433,3 +433,31 @@ def _nuisance_tuning(
433
433
res = {"params" : params , "tune_res" : tune_res }
434
434
435
435
return res
436
+
437
+ def sensitivity_benchmark (self , benchmarking_set , fit_args = None ):
438
+ """
439
+ Computes a benchmark for a given set of features.
440
+ Returns a DataFrame containing the corresponding values for cf_y, cf_d, rho and the change in estimates.
441
+
442
+ Parameters
443
+ ----------
444
+ benchmarking_set : list
445
+ List of features to be used for benchmarking.
446
+
447
+ fit_args : dict, optional
448
+ Additional arguments for the fit method.
449
+ Default is None.
450
+
451
+ Returns
452
+ -------
453
+ benchmark_results : pandas.DataFrame
454
+ Benchmark results.
455
+ """
456
+ if self .score == "experimental" :
457
+ warnings .warn (
458
+ "Sensitivity benchmarking for experimental score may not be meaningful. "
459
+ "Consider using score='observational' for conditional treatment assignment." ,
460
+ UserWarning ,
461
+ )
462
+
463
+ return super ().sensitivity_benchmark (benchmarking_set , fit_args )
Original file line number Diff line number Diff line change @@ -664,3 +664,31 @@ def _nuisance_tuning(
664
664
res = {"params" : params , "tune_res" : tune_res }
665
665
666
666
return res
667
+
668
+ def sensitivity_benchmark (self , benchmarking_set , fit_args = None ):
669
+ """
670
+ Computes a benchmark for a given set of features.
671
+ Returns a DataFrame containing the corresponding values for cf_y, cf_d, rho and the change in estimates.
672
+
673
+ Parameters
674
+ ----------
675
+ benchmarking_set : list
676
+ List of features to be used for benchmarking.
677
+
678
+ fit_args : dict, optional
679
+ Additional arguments for the fit method.
680
+ Default is None.
681
+
682
+ Returns
683
+ -------
684
+ benchmark_results : pandas.DataFrame
685
+ Benchmark results.
686
+ """
687
+ if self .score == "experimental" :
688
+ warnings .warn (
689
+ "Sensitivity benchmarking for experimental score may not be meaningful. "
690
+ "Consider using score='observational' for conditional treatment assignment." ,
691
+ UserWarning ,
692
+ )
693
+
694
+ return super ().sensitivity_benchmark (benchmarking_set , fit_args )
You can’t perform that action at this time.
0 commit comments