Skip to content

Commit d5b4d9c

Browse files
committed
add warnings for did benchmark and experimental score
1 parent 1742cb0 commit d5b4d9c

File tree

2 files changed

+56
-0
lines changed

2 files changed

+56
-0
lines changed

doubleml/did/did.py

+28
Original file line numberDiff line numberDiff line change
@@ -433,3 +433,31 @@ def _nuisance_tuning(
433433
res = {"params": params, "tune_res": tune_res}
434434

435435
return res
436+
437+
def sensitivity_benchmark(self, benchmarking_set, fit_args=None):
438+
"""
439+
Computes a benchmark for a given set of features.
440+
Returns a DataFrame containing the corresponding values for cf_y, cf_d, rho and the change in estimates.
441+
442+
Parameters
443+
----------
444+
benchmarking_set : list
445+
List of features to be used for benchmarking.
446+
447+
fit_args : dict, optional
448+
Additional arguments for the fit method.
449+
Default is None.
450+
451+
Returns
452+
-------
453+
benchmark_results : pandas.DataFrame
454+
Benchmark results.
455+
"""
456+
if self.score == "experimental":
457+
warnings.warn(
458+
"Sensitivity benchmarking for experimental score may not be meaningful. "
459+
"Consider using score='observational' for conditional treatment assignment.",
460+
UserWarning,
461+
)
462+
463+
return super().sensitivity_benchmark(benchmarking_set, fit_args)

doubleml/did/did_cs.py

+28
Original file line numberDiff line numberDiff line change
@@ -664,3 +664,31 @@ def _nuisance_tuning(
664664
res = {"params": params, "tune_res": tune_res}
665665

666666
return res
667+
668+
def sensitivity_benchmark(self, benchmarking_set, fit_args=None):
669+
"""
670+
Computes a benchmark for a given set of features.
671+
Returns a DataFrame containing the corresponding values for cf_y, cf_d, rho and the change in estimates.
672+
673+
Parameters
674+
----------
675+
benchmarking_set : list
676+
List of features to be used for benchmarking.
677+
678+
fit_args : dict, optional
679+
Additional arguments for the fit method.
680+
Default is None.
681+
682+
Returns
683+
-------
684+
benchmark_results : pandas.DataFrame
685+
Benchmark results.
686+
"""
687+
if self.score == "experimental":
688+
warnings.warn(
689+
"Sensitivity benchmarking for experimental score may not be meaningful. "
690+
"Consider using score='observational' for conditional treatment assignment.",
691+
UserWarning,
692+
)
693+
694+
return super().sensitivity_benchmark(benchmarking_set, fit_args)

0 commit comments

Comments
 (0)