Skip to content

Commit

Permalink
Fix loop indentation typo
Browse files Browse the repository at this point in the history
See #80
  • Loading branch information
hoffmansc authored May 7, 2019
1 parent 0cf736f commit 7f9df18
Showing 1 changed file with 34 additions and 34 deletions.
68 changes: 34 additions & 34 deletions aif360/algorithms/postprocessing/reject_option_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,40 +121,40 @@ def fit(self, dataset_true, dataset_pred):
low_ROC_margin = 0.0
high_ROC_margin = (1.0-class_thresh)

# Iterate through ROC margins
for ROC_margin in np.linspace(
low_ROC_margin,
high_ROC_margin,
self.num_ROC_margin):
self.ROC_margin = ROC_margin

# Predict using the current threshold and margin
dataset_transf_pred = self.predict(dataset_pred)

dataset_transf_metric_pred = BinaryLabelDatasetMetric(
dataset_transf_pred,
unprivileged_groups=self.unprivileged_groups,
privileged_groups=self.privileged_groups)
classified_transf_metric = ClassificationMetric(
dataset_true,
dataset_transf_pred,
unprivileged_groups=self.unprivileged_groups,
privileged_groups=self.privileged_groups)

ROC_margin_arr[cnt] = self.ROC_margin
class_thresh_arr[cnt] = self.classification_threshold

# Balanced accuracy and fairness metric computations
balanced_acc_arr[cnt] = 0.5*(classified_transf_metric.true_positive_rate()\
+classified_transf_metric.true_negative_rate())
if self.metric_name == "Statistical parity difference":
fair_metric_arr[cnt] = dataset_transf_metric_pred.mean_difference()
elif self.metric_name == "Average odds difference":
fair_metric_arr[cnt] = classified_transf_metric.average_odds_difference()
elif self.metric_name == "Equal opportunity difference":
fair_metric_arr[cnt] = classified_transf_metric.equal_opportunity_difference()

cnt += 1
# Iterate through ROC margins
for ROC_margin in np.linspace(
low_ROC_margin,
high_ROC_margin,
self.num_ROC_margin):
self.ROC_margin = ROC_margin

# Predict using the current threshold and margin
dataset_transf_pred = self.predict(dataset_pred)

dataset_transf_metric_pred = BinaryLabelDatasetMetric(
dataset_transf_pred,
unprivileged_groups=self.unprivileged_groups,
privileged_groups=self.privileged_groups)
classified_transf_metric = ClassificationMetric(
dataset_true,
dataset_transf_pred,
unprivileged_groups=self.unprivileged_groups,
privileged_groups=self.privileged_groups)

ROC_margin_arr[cnt] = self.ROC_margin
class_thresh_arr[cnt] = self.classification_threshold

# Balanced accuracy and fairness metric computations
balanced_acc_arr[cnt] = 0.5*(classified_transf_metric.true_positive_rate()\
+classified_transf_metric.true_negative_rate())
if self.metric_name == "Statistical parity difference":
fair_metric_arr[cnt] = dataset_transf_metric_pred.mean_difference()
elif self.metric_name == "Average odds difference":
fair_metric_arr[cnt] = classified_transf_metric.average_odds_difference()
elif self.metric_name == "Equal opportunity difference":
fair_metric_arr[cnt] = classified_transf_metric.equal_opportunity_difference()

cnt += 1

rel_inds = np.logical_and(fair_metric_arr >= self.metric_lb,
fair_metric_arr <= self.metric_ub)
Expand Down

0 comments on commit 7f9df18

Please sign in to comment.