diff --git a/images/regression_ensemble.png b/images/regression_ensemble.png
new file mode 100644
index 0000000..8ce1939
Binary files /dev/null and b/images/regression_ensemble.png differ
diff --git a/images/triangular_simplex.png b/images/triangular_simplex.png
new file mode 100644
index 0000000..76a635e
Binary files /dev/null and b/images/triangular_simplex.png differ
diff --git a/src/data/__pycache__/toy_loading.cpython-37.pyc b/src/data/__pycache__/toy_loading.cpython-37.pyc
new file mode 100644
index 0000000..7079e1b
Binary files /dev/null and b/src/data/__pycache__/toy_loading.cpython-37.pyc differ
diff --git a/src/data/toy_loading.py b/src/data/toy_loading.py
new file mode 100644
index 0000000..fb88502
--- /dev/null
+++ b/src/data/toy_loading.py
@@ -0,0 +1,136 @@
+import pandas as pd
+import numpy as np
+import torch
+from torch.utils import data as tdata
+
+
+def get_toy_dataset(
+ target_generator_fn,
+ noise_generator_fn,
+ train_limits=(-1.0, 1.0),
+ test_limits=(-1.5, 1.5), ood_abs_limits=(1.1, 1.3),
+ train_samples=100, test_samples=200,
+ ood_samples=40, random_state=0
+):
+ """Generates one-dimensional regression dataset"""
+ np.random.seed(random_state)
+ x_train = np.random.uniform(train_limits[0], train_limits[1], (train_samples,))
+
+ y_train = target_generator_fn(x_train)
+ np.random.seed(random_state)
+ y_noise = noise_generator_fn(x_train) * np.random.randn(y_train.shape[0])
+ y_train += y_noise
+
+ np.random.seed(random_state)
+ x_ood_1 = np.random.uniform(ood_abs_limits[0], ood_abs_limits[1], (ood_samples // 2,))
+ np.random.seed(random_state)
+ x_ood_2 = np.random.uniform(-ood_abs_limits[1], -ood_abs_limits[0], (ood_samples // 2,))
+ x_ood = np.concatenate([x_ood_1, x_ood_2], axis=0)
+
+ y_ood = target_generator_fn(x_ood)
+ np.random.seed(random_state)
+ y_ood += noise_generator_fn(x_ood) * np.random.randn(y_ood.shape[0])
+
+ x_test = np.linspace(test_limits[0], test_limits[1], test_samples)
+ y_test = target_generator_fn(x_test)
+
+ train_data, test_data, ood_data = [
+ tdata.TensorDataset(
+ torch.Tensor(x_c).unsqueeze(1),
+ torch.Tensor(y_c).unsqueeze(1)
+ ) for (x_c, y_c) in zip(
+ [x_train, x_test, x_ood], [y_train, y_test, y_ood]
+ )
+ ]
+ return train_data, test_data, ood_data, y_noise
+
+def get_arrays_from_loader(loader):
+ first_elems = []
+ second_elems = []
+ for item in loader:
+ first_elems += [item[0]]
+ second_elems += [item[1]]
+ return torch.cat(first_elems, dim=0), torch.cat(second_elems, dim=0)
+
+def get_table_loaders(
+ train_data, test_data, batch_size, ood_data=None, ood_test_data=None,
+ ood_batch_size=None, shuffle=True, normalize_targets=False, target_id=-1,
+):
+ feature_len = train_data.shape[1] - 1
+ if target_id == -1:
+ x_train, y_train = train_data[:, :-1], train_data[:, -1:]
+ x_test, y_test = test_data[:, :-1], test_data[:, -1:]
+ elif target_id > -1:
+ idxs = list(range(train_data.shape[1]))
+ idxs.pop(target_id)
+ x_train, y_train = train_data[:, idxs],\
+ train_data[:, target_id].reshape(-1,1)
+ x_test, y_test = test_data[:, idxs],\
+ test_data[:, target_id].reshape(-1,1)
+ else:
+ raise ValueError("Provide target_id >= -1")
+
+ # Normalize train/test features & targets (if necessary)
+ x_means, x_stds = x_train.mean(axis=0), x_train.std(axis=0)
+ if normalize_targets:
+ y_means, y_stds = y_train.mean(axis=0), y_train.std(axis=0)
+ else:
+ y_means, y_stds = 0., 1.
+ x_train = (x_train - x_means) / x_stds
+ y_train = (y_train - y_means) / y_stds
+ x_test = (x_test - x_means) / x_stds
+ y_test = (y_test - y_means) / y_stds
+ # Normalize ood features
+ if ood_data is not None:
+ if target_id > -1:
+ idxs = list(range(train_data.shape[1]))
+ idxs.pop(target_id)
+ x_ood = ood_data[:, idxs]
+ x_ood = (x_ood - x_means) / x_stds
+ else:
+ x_ood = ood_data[:, :feature_len]
+ x_ood = (x_ood - x_means) / x_stds
+ assert not np.isnan(x_ood).any()
+ if ood_test_data is not None:
+ if target_id > -1:
+ idxs = list(range(train_data.shape[1]))
+ idxs.pop(target_id)
+ x_ood_test = ood_test_data[:, idxs]
+ x_ood_test = (x_ood_test - x_means) / x_stds
+ else:
+ x_ood_test = ood_test_data[:, :feature_len]
+ x_ood_test = (x_ood_test - x_means) / x_stds
+ assert not np.isnan(x_ood_test).any()
+
+ assert not np.isnan(y_test).any()
+ assert not np.isnan(y_train).any()
+ assert not np.isnan(x_test).any()
+ assert not np.isnan(x_train).any()
+ ood_loader = None
+ ood_test_loader = None
+ # Initialize loaders
+ train_loader = tdata.DataLoader(
+ tdata.TensorDataset(
+ torch.Tensor(x_train), torch.Tensor(y_train)
+ ),
+ batch_size=batch_size,
+ shuffle=shuffle
+ )
+ test_loader = tdata.DataLoader(
+ tdata.TensorDataset(
+ torch.Tensor(x_test), torch.Tensor(y_test)
+ ),
+ batch_size=batch_size, shuffle=False
+ )
+ if ood_data is not None:
+ ood_loader = tdata.DataLoader(
+ tdata.TensorDataset(torch.Tensor(x_ood)),
+ batch_size=ood_batch_size, shuffle=shuffle
+ )
+ if ood_test_data is not None:
+ ood_test_loader = tdata.DataLoader(
+ tdata.TensorDataset(torch.Tensor(x_ood_test)),
+ batch_size=ood_batch_size, shuffle=False
+ )
+ return train_loader, test_loader, ood_loader, ood_test_loader,\
+ [torch.FloatTensor([y_means]), torch.FloatTensor([y_stds])]
diff --git a/src/distributions/__pycache__/distributions.cpython-37.pyc b/src/distributions/__pycache__/distributions.cpython-37.pyc
new file mode 100644
index 0000000..a437025
Binary files /dev/null and b/src/distributions/__pycache__/distributions.cpython-37.pyc differ
diff --git a/src/distributions/__pycache__/mixture_distribution.cpython-37.pyc b/src/distributions/__pycache__/mixture_distribution.cpython-37.pyc
new file mode 100644
index 0000000..433e137
Binary files /dev/null and b/src/distributions/__pycache__/mixture_distribution.cpython-37.pyc differ
diff --git a/src/distributions/__pycache__/prior_distribution.cpython-37.pyc b/src/distributions/__pycache__/prior_distribution.cpython-37.pyc
new file mode 100644
index 0000000..8272e92
Binary files /dev/null and b/src/distributions/__pycache__/prior_distribution.cpython-37.pyc differ
diff --git a/src/distributions/distributions.py b/src/distributions/distributions.py
new file mode 100644
index 0000000..3a7f366
--- /dev/null
+++ b/src/distributions/distributions.py
@@ -0,0 +1,270 @@
+import math
+import torch
+
+from torch.distributions import constraints, Distribution, Normal
+from torch.distributions import register_kl
+from torch.distributions.kl import kl_divergence
+from torch.distributions.independent import Independent
+
+from src.utils.func_utils import mvdigamma, rel_error
+
+
+class DiagonalWishart(Distribution):
+ r"""
+ Creates a diagonal version of Wishart distribution parameterized
+ by its scale :attr:`scale_diag` and degrees of freedom :attr:`df`.
+
+ Args:
+ scale_diag (Tensor) (or L): scale of the distribution with shapes (bs, ..., p),
+ where p is the dimensionality of a distribution.
+ df (Tensor) (or \nu): degrees of freedom with shapes (bs, ...). It should have
+ the same shape as :attr:`scale_diag`, but without last dim.
+ """
+ arg_constraints = {'scale_diag': constraints.positive, 'df': constraints.positive}
+ support = constraints.positive
+ has_rsample = False
+
+ def __init__(self, scale_diag, df, validate_args=True):
+ if scale_diag.dim() < 1 or df.dim() < 1:
+ raise ValueError("scale_diag or df must be at least one-dimensional.")
+ if df.size(-1) == 1 and scale_diag.size(-1) != 1:
+ raise ValueError("df shouldn't end with dimensionality 1 if scale_diag doesn't")
+ df_ = df.unsqueeze(-1) # add dim on right
+ self.scale_diag, df_ = torch.broadcast_tensors(scale_diag, df_)
+ self.df = df_[..., 0] # drop rightmost dim
+
+ batch_shape, event_shape = self.scale_diag.shape[:-1], self.scale_diag.shape[-1:]
+ self.dimensionality = event_shape.numel()
+ if (self.df <= (self.dimensionality - 1)).any():
+ raise ValueError("df must be greater than dimensionality - 1")
+ super(DiagonalWishart, self).__init__(batch_shape, event_shape, validate_args=validate_args)
+
+ @property
+ def mean(self):
+ return self.df.unsqueeze(-1) * self.scale_diag
+
+ @property
+ def variance(self):
+ return 2 * self.df.unsqueeze(-1) * self.scale_diag.pow(2)
+
+ def log_prob(self, value):
+ if self._validate_args:
+ self._validate_sample(value)
+
+ tr_term = -0.5 * torch.div(value, self.scale_diag).sum(dim=-1)
+ norm_term = 0.5 * (self.df - self.dimensionality - 1) * torch.log(value).sum(dim=-1)
+
+ return -self.log_normalizer() + norm_term + tr_term
+
+ def log_normalizer(self):
+ log_normalizer_1 = 0.5 * self.df * self.dimensionality * math.log(2)
+ log_normalizer_2 = 0.5 * self.df * self.scale_diag.log().sum(dim=-1)
+ log_normalizer_3 = torch.mvlgamma(0.5 * self.df, self.dimensionality)
+ return log_normalizer_1 + log_normalizer_2 + log_normalizer_3
+
+ def log_expectation(self):
+ mvdigamma_term = mvdigamma(0.5 * self.df, self.dimensionality)
+ other_terms = self.dimensionality * math.log(2) + torch.log(self.scale_diag).sum(dim=-1)
+ return mvdigamma_term + other_terms
+
+ def entropy(self):
+ return self.log_normalizer() - 0.5 * (self.df - self.dimensionality - 1) * self.log_expectation()\
+ + 0.5 * self.df * self.dimensionality
+
+
+class NormalDiagonalWishart(Distribution):
+ r"""
+ Creates a diagonal version of Normal-Wishart distribution parameterized
+ by its mean :attr:`mean`, diagonal precision :attr:`precision_diag`,
+ degrees of freedom :attr:`df` and belief in mean :attr:`belief`
+
+ Args:
+ loc (Tensor) (or m): location of the distribution with shapes (bs, ..., p),
+ where p is dimensionality of the distribution.
+ precision_diag (Tensor or float) (or L): precision of the distribution with shapes (bs, ..., p),
+ where p is dimensionality of the distribution. It should have the same shape
+ as :attr:`mean`.
+ belief (Tensor or float) (or \kappa): confidence of belief in mean with shapes (bs, ...).
+ It should have the same shape as :attr:`mean`, but without last dim.
+ df (Tensor or float) (or \nu): degrees of freedom with shapes (bs, ...). It should have
+ the same shape as :attr:`mean`, but without last dim.
+ """
+ arg_constraints = {
+ 'precision_diag': constraints.positive,
+ 'belief': constraints.positive,
+ 'df': constraints.positive,
+ }
+ support = constraints.real
+ has_rsample = False
+
+ def __init__(self, loc, precision_diag, belief, df, validate_args=True):
+ precision_diag, belief, df = self.convert_float_params_to_tensor(
+ loc, precision_diag, belief, df
+ )
+ if loc.dim() < 1 or precision_diag.dim() < 1 or df.dim() < 1 or belief.dim() < 1:
+ raise ValueError("loc, precision_diag, df, belief must be at least one-dimensional.")
+ if belief.size(-1) == 1 and precision_diag.size(-1) != 1:
+ raise ValueError("belief shouldn't end with dimensionality 1 if precision_diag doesn't")
+ if df.size(-1) == 1 and precision_diag.size(-1) != 1:
+ raise ValueError("df shouldn't end with dimensionality 1 if precision_diag doesn't")
+ df_, belief_ = df.unsqueeze(-1), belief.unsqueeze(-1) # add dim on right
+ self.loc, self.precision_diag, df_, belief_ = torch.broadcast_tensors(loc, precision_diag, df_, belief_)
+ self.df, self.belief = df_[..., 0], belief_[..., 0] # drop rightmost dim
+
+ batch_shape, event_shape = self.loc.shape[:-1], self.loc.shape[-1:]
+ self.dimensionality = event_shape.numel()
+ if (self.df <= (self.dimensionality + 1)).any():
+ raise ValueError("df must be greater than dimensionality + 1 to have expectation")
+ super(NormalDiagonalWishart, self).__init__(
+ batch_shape, event_shape, validate_args=validate_args
+ )
+
+ def log_prob(self, value_mean, value_precision):
+ if self._validate_args:
+ self._validate_sample(value_mean)
+ self._validate_sample(value_precision)
+ if (value_precision <= 0).any():
+ raise ValueError("desired precision must be greater that 0")
+ wishart_log_prob = DiagonalWishart(
+ self.precision_diag, self.df
+ ).log_prob(value_precision)
+ normal_log_prob = Independent(
+ Normal(
+ self.loc, (
+ 1 / (self.belief.unsqueeze(-1) * value_precision)
+ ).pow(0.5)
+ ), 1
+ ).log_prob(value_mean)
+ return normal_log_prob + wishart_log_prob
+
+ def expectation_entropy_normal(self):
+ return 0.5 * (
+ self.dimensionality * (
+ 1 + math.log(2 * math.pi)
+ ) - torch.log(
+ 2 * self.precision_diag * self.belief.unsqueeze(-1)
+ ).sum(dim=-1)
+ - mvdigamma(0.5 * self.df, self.dimensionality)
+ )
+
+ def entropy(self):
+ wishart_entropy = DiagonalWishart(self.precision_diag, self.df).entropy()
+ expectation_entropy_normal = self.expectation_entropy_normal()
+ return wishart_entropy + expectation_entropy_normal
+
+ def convert_float_params_to_tensor(self, loc, precision_diag, belief, df):
+ if isinstance(precision_diag, float):
+ precision_diag = precision_diag * torch.ones_like(loc).to(loc.device)
+ if isinstance(belief, float):
+ belief = belief * torch.ones_like(loc).to(loc.device)[..., 0]
+ if isinstance(df, float):
+ df = df * torch.ones_like(loc).to(loc.device)[..., 0]
+ return precision_diag, belief, df
+
+
+@register_kl(DiagonalWishart,DiagonalWishart)
+def kl_diag_wishart(p: DiagonalWishart, q: DiagonalWishart):
+ if p.event_shape != q.event_shape:
+ raise ValueError("KL-divergence between two Diagonal Wisharts with\
+ different event shapes cannot be computed")
+ log_det_term = -(0.5 * q.df) * torch.div(
+ p.scale_diag, q.scale_diag
+ ).log().sum(dim=-1)
+ tr_term = (0.5 * p.df) * (
+ torch.div(p.scale_diag, q.scale_diag).sum(dim=-1) - p.dimensionality
+ )
+ mvlgamma_term = torch.mvlgamma(0.5 * q.df, q.dimensionality) - torch.mvlgamma(0.5 * p.df, p.dimensionality)
+ digamma_term = 0.5 * (p.df - q.df) * mvdigamma(0.5 * p.df, p.dimensionality)
+ return log_det_term + tr_term + mvlgamma_term + digamma_term
+
+
+@register_kl(NormalDiagonalWishart, NormalDiagonalWishart)
+def kl_normal_diag_wishart(p: NormalDiagonalWishart, q: NormalDiagonalWishart):
+ if p.event_shape != q.event_shape:
+ raise ValueError("KL-divergence between two Normal Diagonal Wisharts with\
+ different event shapes cannot be computed")
+
+ wishart_KL = kl_divergence(
+ DiagonalWishart(p.precision_diag, p.df),
+ DiagonalWishart(q.precision_diag, q.df)
+ )
+ weighted_mse_term = torch.sum(
+ 0.5 * q.belief.unsqueeze(-1) *\
+ (p.loc - q.loc).pow(2) * p.precision_diag * p.df.unsqueeze(-1),
+ dim=-1
+ )
+ expected_conditioned_normal_KL = (
+ weighted_mse_term + (0.5 * p.dimensionality) * (
+ torch.div(q.belief, p.belief) - torch.div(q.belief, p.belief).log() - 1
+ )
+ )
+
+ return expected_conditioned_normal_KL + wishart_KL
+
+
+if __name__ == '__main__':
+ import numpy as np
+ from scipy.stats import wishart
+ x = np.linspace(1e-6, 20, 100)
+
+ print("Testing wishart entropy/logprob vs scipy implementation...")
+ for k in range(1000):
+ df_val = torch.randn(1).exp() + 2
+ scale_val = torch.randn(1).exp()
+
+ scipy_dist = wishart(df=df_val.item(), scale=scale_val.item())
+ torch_dist = DiagonalWishart(
+ scale_val.unsqueeze(-1),
+ df_val
+ )
+
+ torch_ent = torch_dist.entropy()[0]
+ scipy_ent = torch.FloatTensor([scipy_dist.entropy()])
+ if (rel_error(torch_ent, scipy_ent) > 1e-3).any():
+ raise ValueError("Entropies of torch and scipy versions doesn't match")
+
+ scipy_w = torch.FloatTensor(scipy_dist.logpdf(x))
+ torch_w = torch_dist.log_prob(torch.FloatTensor(x).unsqueeze(-1))
+
+ if (rel_error(torch_w, scipy_w) > 1e-6).any():
+ raise ValueError("Log pdf of torch and scipy versions doesn't match")
+
+ print("Testing wishart KL divergence...")
+ df1, scale1 = torch.randn(32).exp() + 2, torch.randn(32).exp() + 1e-5
+ df2, scale2 = torch.randn(32).exp() + 2, torch.randn(32).exp() + 1e-5
+ init_df1, init_scale1 = df1[0].clone(), scale1[0].clone()
+ dist2 = DiagonalWishart(scale2.unsqueeze(-1), df2)
+ df1.requires_grad, scale1.requires_grad = True, True
+ gamma = 0.1
+ for k in range(10000):
+ dist1 = DiagonalWishart(scale1.unsqueeze(-1), df1)
+ loss = kl_divergence(dist1, dist2).mean()
+ if k % 1000 == 0:
+ print(k, loss.item())
+ loss.backward()
+ with torch.no_grad():
+ scale1 = scale1 - gamma * scale1.grad
+ df1 = df1 - gamma * df1.grad
+ scale1.requires_grad, df1.requires_grad = True, True
+ print('df1 init', init_df1, init_scale1)
+ print('df1 final', df1[0], scale1[0])
+ print('df2', df2[0], scale2[0])
+
+ print("All tests passed.")
+
+ print("Testing normal wishart...")
+ y = np.linspace(5, 20, 100)
+ torch_dist = NormalDiagonalWishart(
+ torch.tensor([10]).float().view(1, 1).repeat(100, 1),
+ torch.tensor([2.57]).float().view(1, 1).repeat(100, 1),
+ torch.tensor([0.7]).float().repeat(100),
+ torch.tensor([3.33]).float().repeat(100),
+ )
+
+ ex_w = torch_dist.log_prob(
+ torch.FloatTensor(x).unsqueeze(-1),
+ torch.FloatTensor(y).unsqueeze(-1)
+ )
+ print(ex_w.shape)
+ ex_w = torch_dist.entropy()[0]
+ #print(ex_w)
diff --git a/src/distributions/mixture_distribution.py b/src/distributions/mixture_distribution.py
new file mode 100644
index 0000000..67efe71
--- /dev/null
+++ b/src/distributions/mixture_distribution.py
@@ -0,0 +1,78 @@
+import torch
+from itertools import combinations
+from torch.distributions import Distribution, Normal, kl_divergence
+
+
+class GaussianDiagonalMixture(Distribution):
+ r"""
+ Creates a mixture of diagonal Normal distributions parameterized
+ by their means :attr:`means` and scales :attr:`scales`.
+ """
+ def __init__(self, means, scales):
+ assert len(means) == len(scales)
+ assert means[0].size(-1) == 1 and scales[0].size(-1) == 1
+
+ self.distributions = []
+ for i in range(len(means)):
+ self.distributions.append(
+ Normal(means[i], scales[i], validate_args=True)
+ )
+
+ def expected_mean(self):
+ return sum([dist.mean for dist in self.distributions]) / len(self.distributions)
+
+ def expected_entropy(self):
+ return sum([dist.entropy().squeeze() for dist in self.distributions]) / len(self.distributions)
+
+ def expected_pairwise_kl(self):
+ curr_sum_pairwise_kl = None
+ num_pairs = 0
+
+ for dist1, dist2 in combinations(self.distributions, r=2):
+ num_pairs += 1
+ if curr_sum_pairwise_kl is None:
+ curr_sum_pairwise_kl = kl_divergence(dist1, dist2)
+ else:
+ curr_sum_pairwise_kl += kl_divergence(dist1, dist2)
+ return curr_sum_pairwise_kl.squeeze() / num_pairs
+
+ def variance_of_expected(self):
+ avg_mean = self.expected_mean()
+ return sum([(dist.mean.pow(2) - avg_mean.pow(2)).squeeze() for dist in self.distributions]) / len(self.distributions)
+
+ def log_variance_of_expected(self):
+ return self.variance_of_expected().log()
+
+ def expected_variance(self):
+ return sum([dist.variance.squeeze() for dist in self.distributions]) / len(self.distributions)
+
+ def log_expected_variance(self):
+ return self.expected_variance().log()
+
+ def total_variance(self):
+ return self.variance_of_expected() + self.expected_variance()
+
+ def log_total_variance(self):
+ return self.total_variance().log()
+
+ def estimated_total_entropy(self):
+ return self.expected_entropy() + self.expected_pairwise_kl()
+
+ def log_prob(self, value):
+ mean = self.expected_mean()
+ var = self.total_variance().unsqueeze(-1)
+ return Normal(mean, var.pow(0.5)).log_prob(value)
+
+
+if __name__ == "__main__":
+ ex_means = [torch.ones(32, 1) for _ in range(5)]
+ ex_vars = [2 * torch.ones(32, 1) for _ in range(5)]
+ mixture_dis = GaussianDiagonalMixture(ex_means, ex_vars)
+ print(mixture_dis.expected_mean().shape)
+ print(mixture_dis.log_prob(torch.zeros(32, 1)).shape)
+
+ print(mixture_dis.expected_entropy().shape)
+ print(mixture_dis.expected_pairwise_kl().shape)
+ print(mixture_dis.variance_of_expected().shape)
+ print(mixture_dis.expected_variance().shape)
+ print(mixture_dis.total_variance().shape)
diff --git a/src/distributions/prior_distribution.py b/src/distributions/prior_distribution.py
new file mode 100644
index 0000000..1344aee
--- /dev/null
+++ b/src/distributions/prior_distribution.py
@@ -0,0 +1,122 @@
+import math, torch
+
+from torch.distributions import StudentT
+from .distributions import NormalDiagonalWishart
+from src.utils.func_utils import mvdigamma
+
+
+class NormalWishartPrior(NormalDiagonalWishart):
+
+ def forward(self):
+ self.precision_coeff = (self.belief + 1) / (
+ self.belief * (self.df - self.dimensionality + 1)
+ )
+ return StudentT(
+ (self.df - self.dimensionality + 1).unsqueeze(-1),
+ loc=self.loc,
+ scale=(self.precision_coeff.unsqueeze(-1) / self.precision_diag).pow(0.5),
+ )
+
+ def predictive_posterior_log_prob(self, value):
+ return self.forward().log_prob(value)
+
+ def predictive_posterior_variance(self):
+ variance_res = self.forward().variance
+ if variance_res.size(-1) != 1:
+ raise ValueError("Predictive posterior returned entropy with incorrect shapes")
+ return variance_res[..., 0]
+
+ def log_predictive_posterior_variance(self):
+ return self.predictive_posterior_variance().log()
+
+ def predictive_posterior_entropy(self):
+ entropy_res = self.forward().entropy()
+ if entropy_res.size(-1) != 1:
+ raise ValueError("Predictive posterior returned entropy with incorrect shapes")
+ return entropy_res[..., 0]
+
+ def entropy_ub(self):
+ return self.expected_pairwise_kl() + self.expected_entropy()
+
+ def expected_entropy(self):
+ mvdigamma_term = mvdigamma(0.5 * self.df, self.dimensionality)
+ return 0.5 * (
+ self.dimensionality * (1 + math.log(2 * math.pi))
+ - (2 * self.precision_diag).log().sum(dim=-1)
+ - mvdigamma_term.squeeze()
+ )
+
+ def expected_log_prob(self, value):
+ neg_mse_term = -torch.sum(
+ (self.loc - value).pow(2) * self.precision_diag * self.df.unsqueeze(-1),
+ dim = -1
+ )
+ mvdigamma_term = mvdigamma(0.5 * self.df, self.dimensionality)
+
+ reg_terms = (2 * self.precision_diag).log().sum(dim=-1) + mvdigamma_term
+ conf_term = -self.dimensionality * self.belief.pow(-1)
+ return 0.5 * (neg_mse_term + reg_terms + conf_term)
+
+ def mutual_information(self):
+ predictive_posterior_entropy = self.predictive_posterior_entropy()
+ expected_entropy = self.expected_entropy()
+ return predictive_posterior_entropy - expected_entropy
+
+ def expected_pairwise_kl(self):
+ term1 = 0.5 * (
+ self.df * self.dimensionality / (self.df - self.dimensionality - 1) -\
+ self.dimensionality
+ )
+ term2 = 0.5 * (
+ self.df * self.dimensionality / (self.df - self.dimensionality - 1) +\
+ self.dimensionality
+ ) / self.belief
+ return term1 + term2
+
+ def variance_of_expected(self):
+ return self.expected_variance() / self.belief
+
+ def log_variance_of_expected(self):
+ return self.variance_of_expected().log()
+
+ def expected_variance(self):
+ result = 1 / (self.precision_diag * (self.df.unsqueeze(-1) - self.dimensionality - 1))
+ if result.size(-1) != 1:
+ raise ValueError("Expected variance currently supports only one-dimensional targets")
+
+ return result[..., 0]
+
+ def log_expected_variance(self):
+ return self.expected_variance().log()
+
+ def total_variance(self):
+ tv = self.variance_of_expected() + self.expected_variance()
+ ppv = self.predictive_posterior_variance()
+
+ rel_diff = (tv - ppv).abs() / tv.abs().pow(0.5) / ppv.abs().pow(0.5)
+ assert (rel_diff < 1e-6).all()
+ return tv
+
+ def log_total_variance(self):
+ return self.total_variance().log()
+
+
+if __name__ == '__main__':
+ ex_mean = torch.zeros(32, 200, 400, 1)
+ ex_var = torch.ones(32, 200, 400, 1)
+ ex_belief = torch.ones(32, 200, 400)
+ ex_df = 10 * torch.ones(32, 200, 400)
+
+ ex_dist = NormalWishartPrior(ex_mean, ex_var, ex_belief, ex_df)
+ print(ex_dist.predictive_posterior_log_prob(2 * torch.ones(32, 200, 400, 1)).shape)
+ print(ex_dist.log_prob(2 * torch.ones(32, 200, 400, 1), 2 * torch.ones(32, 200, 400, 1)).shape)
+
+ print(ex_dist.predictive_posterior_entropy().shape) #Total
+ print(ex_dist.expected_entropy().shape) #Data
+ print(ex_dist.mutual_information().shape) #Knowledge
+ print(ex_dist.expected_pairwise_kl().shape) #Knowledge
+ print(ex_dist.variance_of_expected().shape) #Knowledge
+ print(ex_dist.expected_variance().shape) #Data
+ print(ex_dist.total_variance().shape) #Total
+ print(ex_dist.predictive_posterior_variance().shape) #Total
+
diff --git a/src/models/__pycache__/simple_model.cpython-37.pyc b/src/models/__pycache__/simple_model.cpython-37.pyc
new file mode 100644
index 0000000..aed213d
Binary files /dev/null and b/src/models/__pycache__/simple_model.cpython-37.pyc differ
diff --git a/src/models/simple_model.py b/src/models/simple_model.py
new file mode 100644
index 0000000..6ccb508
--- /dev/null
+++ b/src/models/simple_model.py
@@ -0,0 +1,81 @@
+import torch
+import torch.nn as nn
+
+
+class GaussianNoise(nn.Module):
+ def __init__(self, mean=0.0, sigma=0.05):
+ super(GaussianNoise, self).__init__()
+ self.mean = mean
+ self.sigma = sigma
+
+ def forward(self, input):
+ if not self.training:
+ return input
+ noise = input.clone().normal_(self.mean, self.sigma)
+ return input + noise
+
+
+class SimpleModel(nn.Module):
+ def __init__(
+ self, input_dim, output_dim, num_units,
+ num_hidden=1, activation=nn.ReLU, isPrior=False,
+ drop_rate=0.0, use_bn=False, noise_level=0.05
+ ):
+ super(SimpleModel, self).__init__()
+
+ self.input_dim = input_dim
+ self.output_dim = output_dim
+ self.num_hidden = num_hidden
+ self.isPrior = isPrior
+ self.use_bn = use_bn
+
+ # dense network with %num_units hidden layers
+ self.features, curr_dim = [], input_dim
+ self.features.append(GaussianNoise(sigma=noise_level))
+ for _ in range(num_hidden):
+ self.features.append(nn.Linear(curr_dim, num_units))
+ if self.use_bn:
+ self.features.append(nn.BatchNorm1d(num_units))
+ self.features.append(activation())
+ if drop_rate > 0.0:
+ self.features.append(nn.Dropout(drop_rate))
+ curr_dim = num_units
+ self.features = nn.Sequential(*self.features)
+
+ # generate stats of output distribution
+ self.layer_mean = nn.Linear(num_units, output_dim)
+ self.layer_std = nn.Sequential(
+ nn.Linear(num_units, output_dim),
+ nn.Softplus()
+ )
+ if isPrior:
+ self.layer_beta = nn.Sequential(
+ nn.Linear(num_units, 1),
+ nn.Softplus()
+ )
+
+ self._initialize_weights()
+
+ def forward(self, x):
+ x = x.view(-1, self.input_dim)
+ x = self.features(x)
+
+ mean = self.layer_mean(x)
+ std = self.layer_std(x) + 1e-6
+ if self.isPrior:
+ beta = self.layer_beta(x) + 1e-6
+ kappa = beta
+ nu = beta + self.output_dim + 1
+ return mean, std, kappa[..., 0], nu[..., 0]
+ else:
+ return mean, std
+
+ def _initialize_weights(self):
+ """Initialize weights as in
+ `Probabilistic Backpropagation for Scalable Learning of Bayesian Neural Networks`
+ (https://arxiv.org/pdf/1502.05336.pdf), section 3.5
+ """
+ for m in self.modules():
+ if isinstance(m, nn.Linear):
+ nn.init.normal_(m.weight, 0, 1.0 / (m.weight.size(1) + 1))
+ nn.init.constant_(m.bias, 0)
diff --git a/src/training/__pycache__/ood_trainers.cpython-37.pyc b/src/training/__pycache__/ood_trainers.cpython-37.pyc
new file mode 100644
index 0000000..f589172
Binary files /dev/null and b/src/training/__pycache__/ood_trainers.cpython-37.pyc differ
diff --git a/src/training/__pycache__/trainers.cpython-37.pyc b/src/training/__pycache__/trainers.cpython-37.pyc
new file mode 100644
index 0000000..1bca611
Binary files /dev/null and b/src/training/__pycache__/trainers.cpython-37.pyc differ
diff --git a/src/training/ood_trainers.py b/src/training/ood_trainers.py
new file mode 100644
index 0000000..7c9300d
--- /dev/null
+++ b/src/training/ood_trainers.py
@@ -0,0 +1,292 @@
+from typing import Tuple
+from itertools import cycle
+
+import torch
+from torch.distributions import Distribution
+import torch.nn as nn
+from torch.optim import Adam
+
+from src.training.trainers import DistributionRKLTrainer
+from src.utils.func_utils import reduce_tensor, params_rmse
+
+
+class DistributionRKLTrainerWithOOD(DistributionRKLTrainer):
+ @property
+ def uncertainty_methods(self):
+ return [
+ 'predictive_posterior_entropy', 'expected_entropy',
+ 'mutual_information', 'expected_pairwise_kl',
+ 'variance_of_expected', 'expected_variance',
+ 'total_variance'
+ ]
+
+ def train_step(self, x, y, x_ood):
+ self.optimizer.zero_grad()
+
+ predicted_params = self.model(x)
+ prior_params = self.prior_converter(x)
+
+ ordinary_loss = self.loss_function(
+ predicted_params,
+ prior_params,
+ y
+ )
+
+ self.switch_bn_updates("eval")
+ predicted_ood_params = self.model(x_ood)
+ prior_params = self.prior_converter(x_ood)
+
+ ood_loss = self.loss_function(
+ predicted_ood_params,
+ prior_params
+ )
+ self.switch_bn_updates("train")
+ loss = ordinary_loss + self.loss_params["ood_coeff"] * ood_loss
+ loss.backward()
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
+ self.optimizer.step()
+ return ordinary_loss.item(), self.loss_params["ood_coeff"] * ood_loss.item()
+
+ def loss_function(self, predicted_params, prior_params, targets=None):
+ if targets is None:
+ return self.loss_params["inv_real_beta"] * self.rkl_loss(
+ predicted_params, prior_params, reduction='mean'
+ )
+ else:
+ predicted_dist = self.distribution(*predicted_params)
+ inv_beta = self.loss_params["inv_real_beta"]
+ return -predicted_dist.expected_log_prob(targets).mean() + inv_beta * self.rkl_loss(
+ predicted_params, prior_params, reduction='mean'
+ )
+
+ def eval_step(self, dataloader: torch.utils.data.DataLoader) -> Tuple[float, list]:
+ self.model.eval()
+ acc_eval_loss = 0.0
+ acc_metrics = [0.0 for m in self.test_metrics]
+ with torch.no_grad():
+ for i, (x, y) in enumerate(dataloader):
+ x, y = x.to(self.device), y.to(self.device)
+ predicted_params = self.model(x)
+ prior_params = self.prior_converter(x)
+ acc_eval_loss += self.loss_function(
+ predicted_params,
+ prior_params,
+ y
+ ).item() / len(dataloader)
+ for i, metric in enumerate(self.test_metrics):
+ acc_metrics[i] += metric(
+ predicted_params,
+ y
+ ) / len(dataloader)
+ return acc_eval_loss, acc_metrics
+
+ def train(self, dataloader, oodloader, num_epochs, eval_dataloader=None, log_per=0, verbose=True):
+ with torch.no_grad():
+ self.estimate_avg_mean_var(dataloader)
+
+ trainloss_hist, oodloss_hist, valloss_hist, metrics_hist = [], [], [], []
+
+ for e in range(num_epochs):
+ self.model.train()
+ acc_train_loss = 0.0
+ acc_ood_loss = 0.0
+ """With only zip() the iterator will be exhausted when the length
+ is equal to that of the smallest dataset.
+ But with the use of cycle(), we will repeat the smallest dataset again unless
+ our iterator looks at all the samples from the largest dataset."""
+ for (x, y), (x_ood,) in zip(dataloader, cycle(oodloader)):
+ x, y, x_ood = x.to(self.device), y.to(self.device), x_ood.to(self.device)
+ c_losses = self.train_step(x, y, x_ood)
+ acc_train_loss += c_losses[0] / len(dataloader)
+ acc_ood_loss += c_losses[1] / len(dataloader)
+
+ trainloss_hist += [acc_train_loss]
+ oodloss_hist += [acc_ood_loss]
+
+ if eval_dataloader and log_per > 0 and self.test_metrics:
+ if e % log_per == 0 or e == (num_epochs - 1):
+ acc_eval_loss, acc_metrics = self.eval_step(eval_dataloader)
+
+ if verbose:
+ print("Epoch %d train loss %.3f ood loss %.3f eval loss %.3f" % (
+ e, acc_train_loss, acc_ood_loss, acc_eval_loss
+ ), 'eval ' + ','.join(m.__name__ + " %.3f" % acc_metrics[i] for i, m in enumerate(self.test_metrics)),
+ flush=True
+ )
+ valloss_hist += [acc_eval_loss]
+ metrics_hist += [acc_metrics]
+
+ if self.scheduler:
+ self.scheduler.step()
+
+ return trainloss_hist, oodloss_hist, valloss_hist, metrics_hist
+
+ def estimate_avg_mean_var(self, dataloader):
+ self.avg_mean = None
+ for _, y in dataloader:
+ if self.avg_mean is None:
+ self.avg_mean = y.mean(dim=0) / len(dataloader)
+ else:
+ self.avg_mean += y.mean(dim=0) / len(dataloader)
+ sum_var = torch.zeros_like(self.avg_mean)
+ num_samples = 0
+ for _, y in dataloader:
+ avg_mean = torch.repeat_interleave(self.avg_mean.unsqueeze(0), repeats=y.size(0), dim=0)
+ sum_var += (y - avg_mean).pow(2).sum(dim=0)
+ num_samples += y.size(0)
+ self.avg_scatter = sum_var / num_samples
+
+ def prior_converter(self, inputs):
+ avg_mean_r = torch.repeat_interleave(
+ self.avg_mean.unsqueeze(0), repeats=inputs.size(0), dim=0
+ ).to(inputs.device)
+ prior_kappa, prior_nu = self.loss_params['prior_beta'],\
+ self.loss_params['prior_beta'] + self.model.output_dim + 1
+ avg_precision_r = torch.repeat_interleave(
+ (1 / (prior_nu * self.avg_scatter.unsqueeze(0))), repeats=inputs.size(0), dim=0
+ ).to(inputs.device)
+
+ all_params = [avg_mean_r, avg_precision_r]
+ return all_params + [prior_kappa, prior_nu]
+
+ def nll_loss(self, predicted_params, targets, reduction='mean'):
+ assert reduction in ['mean', 'sum', 'none']
+ predicted_dist = self.distribution(*predicted_params)
+ batched_loss = -predicted_dist.predictive_posterior_log_prob(targets)
+ assert batched_loss.dim() < 2 or batched_loss.size(-1) == 1
+ return reduce_tensor(batched_loss, reduction)
+
+ def switch_bn_updates(self, mode):
+ if self.model.use_bn:
+ for m in self.model.modules():
+ if isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d):
+ if mode == 'train':
+ m.train()
+ elif mode == 'eval':
+ m.eval()
+
+ def check_loss_params(self, loss_params):
+ for req_key in ['inv_real_beta', 'ood_coeff', 'prior_beta']:
+ if req_key not in loss_params.keys():
+ raise Exception("Rkl loss params dict should contain key", req_key)
+
+
+class DistributionEnsembleToPriorDistiller(DistributionRKLTrainer):
+ def __init__(
+ self, teacher_models: list, *args, **kwargs
+ ):
+ super(DistributionEnsembleToPriorDistiller, self).__init__(*args, **kwargs)
+ self.teacher_models = teacher_models
+ self.num_steps = 1
+ for model in self.teacher_models:
+ model.eval()
+ self.loss_params['temperature'] = self.loss_params['max_temperature']
+
+ @property
+ def uncertainty_methods(self):
+ return [
+ 'predictive_posterior_entropy', 'expected_entropy',
+ 'mutual_information', 'expected_pairwise_kl',
+ 'variance_of_expected', 'expected_variance',
+ 'total_variance'
+ ]
+
+ def train_step(self, x, y):
+ x += torch.empty(x.shape).normal_(
+ mean=0, std=self.loss_params["noise_level"]
+ ).to(x.device)
+
+ if "max_steps" in self.loss_params.keys():
+ T_0 = self.loss_params["max_temperature"]
+ first_part = float(0.2 * self.loss_params["max_steps"])
+ third_part = float(0.6 * self.loss_params["max_steps"])
+ if self.num_steps < first_part:
+ self.loss_params["temperature"] = T_0
+ elif self.num_steps < third_part:
+ self.loss_params["temperature"] = T_0 - (T_0 - 1) * min(
+ float(self.num_steps - first_part) / float(0.4 * self.loss_params["max_steps"]),
+ 1.0
+ )
+ else:
+ self.loss_params["temperature"] = 1.0
+
+ self.optimizer.zero_grad()
+
+ predicted_params = self.model(x)
+ loss = self.loss_function(
+ predicted_params,
+ x
+ )
+ loss.backward()
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
+
+ self.optimizer.step()
+ self.num_steps += 1
+ return loss.item()
+
+ def eval_step(self, dataloader: torch.utils.data.DataLoader) -> Tuple[float, list]:
+ self.model.eval()
+ acc_eval_loss = 0.0
+ acc_metrics = [0.0 for m in self.test_metrics]
+ with torch.no_grad():
+ for x, y in dataloader:
+ x, y = x.to(self.device), y.to(self.device)
+ predicted_params = self.model(x)
+ acc_eval_loss += self.loss_function(
+ predicted_params,
+ x
+ ).item() / len(dataloader)
+ for i, metric in enumerate(self.test_metrics):
+ acc_metrics[i] += metric(
+ predicted_params,
+ y
+ ) / len(dataloader)
+ return acc_eval_loss, acc_metrics
+
+ def loss_function(self, predicted_params, x):
+ T = self.loss_params["temperature"]
+ with torch.no_grad():
+ all_teachers_means, all_teachers_vars = [], []
+ aggr_teachers_mean = torch.zeros_like(predicted_params[0]).to(self.device)
+ aggr_teachers_var = torch.zeros_like(predicted_params[1]).to(self.device)
+ for i, teacher in enumerate(self.teacher_models):
+ teacher_params = teacher(x)
+ aggr_teachers_mean += teacher_params[0] / len(self.teacher_models)
+ aggr_teachers_var += teacher_params[1].pow(2) / len(self.teacher_models)
+ all_teachers_means.append(teacher_params[0])
+ all_teachers_vars.append(teacher_params[1].pow(2))
+ for i, _ in enumerate(self.teacher_models):
+ all_teachers_means[i] = (T - 1) * aggr_teachers_mean / (T + 1) +\
+ 2 * all_teachers_means[i] / (T + 1)
+ all_teachers_vars[i] = (T - 1) * aggr_teachers_var / (T + 1) +\
+ 2 * all_teachers_vars[i] / (T + 1)
+
+ new_nu = (predicted_params[-1] - self.model.output_dim - 1) * T +\
+ self.model.output_dim + 1
+ new_kappa = predicted_params[-2] * T
+ #new_nu = (predicted_params[-1] - self.model.output_dim - 2) * (1 / T) +\
+ # self.model.output_dim + 2
+ #new_kappa = predicted_params[-2] * (1 / T)
+
+ predicted_dist = self.distribution(*[
+ predicted_params[0], predicted_params[1], new_kappa, new_nu
+ ])
+ all_losses = []
+ for i in range(len(self.teacher_models)):
+ all_losses.append(-predicted_dist.log_prob(
+ all_teachers_means[i], 1 / all_teachers_vars[i]
+ ).sum())
+
+ return sum(all_losses) / len(all_losses) / T
+
+ def nll_loss(self, predicted_params, targets, reduction='mean'):
+ assert reduction in ['mean', 'sum', 'none']
+ predicted_dist = self.distribution(*predicted_params)
+ batched_loss = -predicted_dist.predictive_posterior_log_prob(targets)
+ assert batched_loss.dim() < 2 or batched_loss.size(-1) == 1
+ return reduce_tensor(batched_loss, reduction)
+
+ def check_loss_params(self, loss_params):
+ for req_key in ['max_temperature', 'noise_level']:
+ if req_key not in loss_params.keys():
+ raise Exception("NLL loss params dict should contain key", req_key)
diff --git a/src/training/trainers.py b/src/training/trainers.py
new file mode 100644
index 0000000..bc7db84
--- /dev/null
+++ b/src/training/trainers.py
@@ -0,0 +1,297 @@
+from typing import Tuple
+
+import torch
+from torch.distributions import Distribution
+from torch.distributions.kl import kl_divergence
+from torch.utils.data import SequentialSampler
+
+from sklearn.metrics import mean_squared_error
+
+
+def reduce_tensor(vec: torch.Tensor, reduction: str = 'mean'):
+ """Global reduction of tensor based on str
+
+ Args:
+ vec: torch.FloatTensor
+ reduction: str, one of ['sum', 'mean', 'none'], default 'mean'
+ """
+ assert reduction in ['sum', 'mean', 'none']
+ if reduction == 'mean':
+ return vec.mean()
+ elif reduction == 'sum':
+ return vec.sum()
+ elif reduction == 'none':
+ return vec
+
+
+def params_rmse(predicted_params, targets):
+ assert not torch.isnan(predicted_params[0]).any()
+ return mean_squared_error(predicted_params[0].cpu(), targets.cpu()) ** 0.5
+
+
+
+
+class DistributionMLETrainer:
+ """This class implements MLE training for a model that outputs parameters
+ of some distribution. Note that both trained model and optimizer instances
+ are created inside it.
+ """
+ def __init__(
+ self, model_params: dict, model: torch.nn.Module,
+ optim_params: dict, distribution=Distribution,
+ optimizer=torch.optim.Adam, scheduler=None, scheduler_params=None,
+ test_metrics=[params_rmse], device='cuda:0'
+ ):
+ self.model = model(**model_params).to(device)
+ self.distribution = distribution
+ self.optimizer = optimizer(self.model.parameters(), **optim_params)
+ self.scheduler = None
+ if scheduler is not None:
+ self.scheduler = scheduler(self.optimizer, **scheduler_params)
+ self.test_metrics = test_metrics
+ self.device = device
+
+ @property
+ def uncertainty_methods(self):
+ return ['entropy']
+
+ def train_step(self, x: torch.FloatTensor, y: torch.FloatTensor) -> float:
+ self.optimizer.zero_grad()
+ predicted_params = self.model(x)
+ loss = self.loss_function(
+ predicted_params,
+ y
+ )
+ loss.backward()
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
+ self.optimizer.step()
+ return loss.item()
+
+ def eval_step(self, dataloader: torch.utils.data.DataLoader) -> Tuple[float, list]:
+ self.model.eval()
+ acc_eval_loss = 0.0
+ acc_metrics = [0.0 for m in self.test_metrics]
+ with torch.no_grad():
+ for x, y in dataloader:
+ x, y = x.to(self.device), y.to(self.device)
+ predicted_params = self.model(x)
+ acc_eval_loss += self.loss_function(
+ predicted_params,
+ y
+ ).item() / len(dataloader)
+ for i, metric in enumerate(self.test_metrics):
+ acc_metrics[i] += metric(
+ predicted_params,
+ y
+ ) / len(dataloader)
+ return acc_eval_loss, acc_metrics
+
+ def train(self, dataloader: torch.utils.data.DataLoader,
+ num_epochs: int, eval_dataloader: torch.utils.data.DataLoader = None,
+ log_per: int = 0, verbose: str =True
+ ) -> Tuple[list, list, list]:
+ trainloss_hist, valloss_hist, metrics_hist = [], [], []
+
+ for e in range(num_epochs):
+ self.model.train()
+ acc_train_loss = 0.0
+ for x, y in dataloader:
+ x, y = x.to(self.device), y.to(self.device)
+ acc_train_loss += self.train_step(x, y) / len(dataloader)
+
+ trainloss_hist += [acc_train_loss]
+
+ if eval_dataloader and log_per > 0 and self.test_metrics:
+ if e % log_per == 0 or e == (num_epochs - 1):
+ acc_eval_loss, acc_metrics = self.eval_step(eval_dataloader)
+ if verbose:
+ print("Epoch %d train loss %.3f eval loss %.3f" % (
+ e, acc_train_loss, acc_eval_loss
+ ), 'eval ' + ','.join(
+ m.__name__ + " %.3f" % acc_metrics[i]
+ for i, m in enumerate(self.test_metrics)
+ ), flush=True
+ )
+ valloss_hist += [acc_eval_loss]
+ metrics_hist += [acc_metrics]
+
+ if self.scheduler:
+ self.scheduler.step()
+
+ return trainloss_hist, valloss_hist, metrics_hist
+
+ def compute_unsupervised_metric_through_data(
+ self, dataloader: torch.utils.data.DataLoader, metric
+ ) -> torch.FloatTensor:
+ metric_scores = []
+ self.model.eval()
+ with torch.no_grad():
+ for items in dataloader:
+ if isinstance(items, tuple) or isinstance(items, list):
+ predicted_params = self.model(items[0].to(self.device))
+ else:
+ predicted_params = self.model(items.to(self.device))
+ metric_scores += metric(
+ predicted_params
+ ).cpu().tolist()
+ return torch.FloatTensor(metric_scores)
+
+ def loss_function(self, predicted_params, targets):
+ return self.nll_loss(predicted_params, targets, reduction='mean')
+
+ def nll_loss(self, predicted_params, targets, reduction='mean'):
+ assert reduction in ['mean', 'sum', 'none']
+ predicted_dist = self.distribution(*predicted_params)
+ batched_loss = -predicted_dist.log_prob(targets)
+ assert batched_loss.dim() < 2 or batched_loss.size(-1) == 1
+ return reduce_tensor(batched_loss, reduction)
+
+ def get_predicted_params(self, dataloader: torch.utils.data.DataLoader) -> list:
+ all_predicted_params = []
+ self.model.eval()
+ with torch.no_grad():
+ for items in dataloader:
+ if isinstance(items, tuple) or isinstance(items, list):
+ predicted_params = self.model(items[0].to(self.device))
+ else:
+ predicted_params = self.model(items.to(self.device))
+ if len(all_predicted_params) == 0:
+ all_predicted_params = [param.cpu().tolist() for param in predicted_params]
+ else:
+ for i in range(len(all_predicted_params)):
+ all_predicted_params[i] += predicted_params[i].cpu().tolist()
+ return [torch.FloatTensor(param) for param in all_predicted_params]
+
+ def eval_uncertainty(self, dataloader, method: str = 'entropy'):
+ some_metric = lambda params: getattr(self.distribution(*params), method)()
+ return self.compute_unsupervised_metric_through_data(dataloader, some_metric)
+
+ def save_model(self, dir: str):
+ torch.save(self.model.state_dict(), dir + '.ckpt')
+
+ def load_model(self, dir: str):
+ self.model.load_state_dict(torch.load(dir + '.ckpt'))
+
+
+class DistributionRKLTrainer(DistributionMLETrainer):
+ """This class replaces standard MLE loss with Reverse-KL.
+ """
+ def __init__(self, loss_params: dict, *args, **kwargs):
+ super(DistributionRKLTrainer, self).__init__(*args, **kwargs)
+ self.check_loss_params(loss_params)
+ self.loss_params = loss_params
+
+ def loss_function(self, predicted_params, targets):
+ target_params = self.converter(targets)
+ return self.rkl_loss(predicted_params, target_params, reduction='mean')
+
+ def rkl_loss(self, predicted_params, target_params, reduction='mean'):
+ assert reduction in ['mean', 'sum', 'none']
+ predicted_dist = self.distribution(*predicted_params)
+ true_dist = self.distribution(*target_params)
+ batched_loss = kl_divergence(predicted_dist, true_dist)
+ assert batched_loss.dim() < 2 or batched_loss.size(-1) == 1
+ return reduce_tensor(batched_loss, reduction)
+
+ def converter(self, targets):
+ """Extend targets with manually specified params to parametrize target distribution"""
+ return [targets] + self.loss_params["real_params"]
+
+ def check_loss_params(self, loss_params):
+ for req_key in ['target_lambda', 'target_kappa', 'target_nu']:
+ if req_key not in loss_params.keys():
+ raise Exception("Rkl loss params dict should contain key", req_key)
+
+
+class DistributionEnsembleMLETrainer:
+ """This class sequentially trains multiple models and combines their outputs in an
+ ensemble distribution. Besides more accurate predictions, this allows
+ us to decompose uncertainty measures.
+ """
+ def __init__(
+ self, n_models: int, mixture_distribution=Distribution,
+ *args, **kwargs
+ ):
+ self.trainers = [
+ DistributionMLETrainer(*args, **kwargs) for _ in range(n_models)
+ ]
+ self.mixture_distribution = mixture_distribution
+
+ @property
+ def uncertainty_methods(self):
+ return [
+ 'expected_entropy', 'expected_pairwise_kl',
+ 'variance_of_expected', 'expected_variance',
+ 'total_variance'
+ ]
+
+ def train(self, dataloader: torch.utils.data.DataLoader,
+ num_epochs: int, eval_dataloader: torch.utils.data.DataLoader = None,
+ log_per: int = 0, verbose: str =True
+ ) -> Tuple[list, list, list]:
+ train_hists, val_hists, metrics_hists = [], [], []
+ for i, trainer in enumerate(self.trainers):
+ if verbose:
+ print('-'*20, flush=True)
+ print("Model %d" % i, flush=True)
+ res = trainer.train(dataloader, num_epochs, eval_dataloader, log_per, verbose)
+ train_hists.append(res[0])
+ val_hists.append(res[1])
+ metrics_hists.append(res[2])
+ return train_hists, val_hists, metrics_hists
+
+ def nll_loss(self, predicted_params, targets, reduction='mean'):
+ assert reduction in ['mean', 'sum', 'none']
+ predicted_dist = self.mixture_distribution(*predicted_params)
+ batched_loss = -predicted_dist.log_prob(targets)
+ assert batched_loss.dim() < 2 or batched_loss.size(-1) == 1
+ return reduce_tensor(batched_loss, reduction)
+
+ def get_predicted_params(self, dataloader: torch.utils.data.DataLoader) -> list:
+ if not isinstance(dataloader.sampler, SequentialSampler):
+ print(dataloader.batch_sampler)
+ raise ValueError("To merge predicted params correctly dataloader shouldn't shuffle")
+ all_means, all_stds = [], []
+ for trainer in self.trainers:
+ cmean, cstd = trainer.get_predicted_params(dataloader)
+
+ all_means.append(cmean)
+ all_stds.append(cstd)
+ return all_means, all_stds
+
+ def compute_unsupervised_metric_through_data(
+ self, dataloader: torch.utils.data.DataLoader, metric
+ ) -> torch.FloatTensor:
+ metric_scores = []
+ for trainer in self.trainers:
+ trainer.model.eval()
+ with torch.no_grad():
+ for items in dataloader:
+ all_means, all_stds = [], []
+ if isinstance(items, tuple) or isinstance(items, list):
+ for trainer in self.trainers:
+ cmean, cstd = trainer.model(items[0].to(trainer.device))
+ all_means.append(cmean)
+ all_stds.append(cstd)
+ else:
+ for trainer in self.trainers:
+ cmean, cstd = trainer.model(items.to(trainer.device))
+ all_means.append(cmean)
+ all_stds.append(cstd)
+ if len(all_means[0]) > 1:
+ metric_scores += metric(
+ [all_means, all_stds]
+ ).cpu().tolist()
+ return torch.FloatTensor(metric_scores)
+
+ def save_model(self, dir: str):
+ for i, trainer in enumerate(self.trainers):
+ trainer.save_model(dir + '_' + str(i))
+
+ def load_model(self, dir: str):
+ for i, trainer in enumerate(self.trainers):
+ trainer.load_model(dir + '_' + str(i))
+
+ def eval_uncertainty(self, dataloader, method: str = 'expected_pairwise_kl'):
+ some_metric = lambda params: getattr(self.mixture_distribution(*params), method)()
+ return self.compute_unsupervised_metric_through_data(dataloader, some_metric)
diff --git a/src/utils/__pycache__/func_utils.cpython-37.pyc b/src/utils/__pycache__/func_utils.cpython-37.pyc
new file mode 100644
index 0000000..7a02b2f
Binary files /dev/null and b/src/utils/__pycache__/func_utils.cpython-37.pyc differ
diff --git a/src/utils/func_utils.py b/src/utils/func_utils.py
new file mode 100644
index 0000000..9a7db93
--- /dev/null
+++ b/src/utils/func_utils.py
@@ -0,0 +1,100 @@
+from typing import Union
+import torch
+
+from sklearn.metrics import mean_squared_error
+
+
+def percentile(t: torch.tensor, q: float) -> Union[int, float]:
+ """
+ Return the ``q``-th percentile of the flattened input tensor's data.
+
+ CAUTION:
+ * Needs PyTorch >= 1.1.0, as ``torch.kthvalue()`` is used.
+ * Values are not interpolated, which corresponds to
+ ``numpy.percentile(..., interpolation="nearest")``.
+
+ :param t: Input tensor.
+ :param q: Percentile to compute, which must be between 0 and 100 inclusive.
+ :return: Resulting value (scalar).
+ """
+ # Note that ``kthvalue()`` works one-based, i.e. the first sorted value
+ # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,
+ # so that ``round()`` returns an integer, even if q is a np.float32.
+ k = 1 + round(.01 * float(q) * (t.numel() - 1))
+ result = t.view(-1).kthvalue(k).values.item()
+ return result
+
+
+def mvdigamma(vec: torch.FloatTensor, p: int, reduction: str = 'sum'):
+ """Implements batched Multivariate digamma function over a given float vector
+
+ Args:
+ vec: torch.FloatTensor of shapes (bs, ...), inp to apply function on
+ p: int, dimensionality
+ reduction: str, one of ['sum', 'mean', 'none'], default 'sum'
+ Returns:
+ Tensor with same shapes as vec, where the mvdigamma function is
+ computed for each position independently
+ """
+ assert reduction in ['sum', 'mean']
+
+ increasing_numbers = torch.arange(
+ 1, p + 1, dtype=torch.float, requires_grad=False
+ )
+ output = torch.digamma(
+ vec.unsqueeze(-1) + 0.5 * (1 - increasing_numbers.to(vec.device))
+ )
+
+ if reduction == 'sum':
+ return output.sum(axis=-1)
+ elif reduction == 'mean':
+ return output.mean(axis=-1)
+
+
+def reduce_tensor(vec: torch.Tensor, reduction: str = 'mean'):
+ """Global reduction of tensor based on str
+
+ Args:
+ vec: torch.FloatTensor
+ reduction: str, one of ['sum', 'mean', 'none'], default 'mean'
+ """
+ assert reduction in ['sum', 'mean', 'none']
+ if reduction == 'mean':
+ return vec.mean()
+ elif reduction == 'sum':
+ return vec.sum()
+ elif reduction == 'none':
+ return vec
+
+
+def params_rmse(predicted_params, targets):
+ assert not torch.isnan(predicted_params[0]).any()
+ return mean_squared_error(predicted_params[0].cpu(), targets.cpu()) ** 0.5
+
+
+def rel_error(value1, value2):
+ value1_norm = value1.norm(p=2, dim=-1)
+ value2_norm = value2.norm(p=2, dim=-1)
+ diff_norm = (value1 - value2).norm(p=2, dim=-1)
+ return diff_norm / (value1_norm.pow(0.5) * value2_norm.pow(0.5))
+
+
+if __name__ == '__main__':
+ from scipy.special import digamma
+
+ for k in range(1000):
+ rand_nu = torch.randn(512).exp() + 1e-5
+ digammas_scipy = torch.zeros(512)
+ digammas_torch = mvdigamma(rand_nu, 1)
+
+ for k in range(512):
+ digammas_scipy[k] = digamma(rand_nu[k].item())
+
+ if (rel_error(digammas_scipy, digammas_torch) > 1e-6).any():
+ raise Exception("Digamma functions of torch and scipy doesn't match")
+
+ if ((rand_nu.log() - 1 / (2 * rand_nu) - digammas_torch) < 0.0).any():
+ raise Exception("Upper inequality doesn't satisfied")
+
+ if ((rand_nu.log() - 1 / (rand_nu) - digammas_torch) > 0.0).any():
+ raise Exception("Lower inequality doesn't satisfied")
diff --git a/uncertainty_example.ipynb b/uncertainty_example.ipynb
new file mode 100644
index 0000000..1dd7a0c
--- /dev/null
+++ b/uncertainty_example.ipynb
@@ -0,0 +1,1618 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Structure of this notebook:\n",
+ "#### 1) Uncertainty and deep ensembles for classification\n",
+ "#### 2) Uncertainty and deep ensembles for regression\n",
+ "#### 3) Ensemble distribution distillation and Prior networks for regression"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Deep ensembles for classification"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Why ensembles?\n",
+ "\n",
+ "#### Assume that we have an ensemble of different probabilistic models that predict $y$ having features $x^*$: \n",
+ "#### $$ \\{ P(y|x^*, \\theta_m) \\}_{m=1..M}, \\theta_m \\sim P(\\theta|D),$$\n",
+ "#### where $P(\\theta|D)$ is the probability to train such model, having dataset $D$, some optimization procedure and model architecture.\n",
+ "#### In deep ensembles with train each model in the ensemble with different random inits.\n",
+ "#### Each model $P(y|x^*, \\theta_m)$ captures different estimate of data uncertainty, it has it's own view, own local optima in the task.\n",
+ "\n",
+ "#### H - entropy, for some discrete distribution with classes probabilities $p_i$:\n",
+ "$$H(p) = -\\sum_{i=1..N} p_i \\cdot \\log p_i$$\n",
+ "\n",
+ "#### Through ensemble we can calculate *Total uncertainty*:\n",
+ "#### $$H[\\mathbb{E}_{\\theta \\sim P(\\theta|D)} [P(y|x^*, \\theta)]] \\simeq H[\\frac{1}{M} \\sum_{m=1}^M [P(y|x^*, \\theta_m)]]$$\n",
+ "#### and *Expected data (aleatoric) uncertainty*:\n",
+ "#### $$\\mathbb{E}_{\\theta \\sim P(\\theta|D)} [H[P(y|x^*, \\theta)]] \\simeq \\frac{1}{M} \\sum_{m=1}^M [H[P(y|x^*, \\theta_m)]]$$\n",
+ "#### *Knowledge (epistemic) uncertainty* is the difference between *Total uncertainty* and *Expected Data uncertainty*:\n",
+ "#### $$\\mathcal{I}(y, \\theta| x^*, D) = H[\\mathbb{E}_{\\theta \\sim P(\\theta|D)} [P(y|x^*, \\theta)]] - \\mathbb{E}_{\\theta \\sim P(\\theta|D)} [H[P(y|x^*, \\theta)]]$$"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**To visualize the behaviour of ensemble in different cases let's consider 3 classes classification, and show it on triangular simplex, each corner in the triangular is some class, each point is the prediction of some model in the ensemble. The closer a point is to a corner, the greater its prediction of the probability of this class**"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**There are different ways to generate ensembles, more about them you can find in video about \"Ensemble Generation\".**\n",
+ "\n",
+ "**Here we will use the simplest, yet the most effective way to generate ensembles for uncertainty estimation: we will train different models with different random seeds.**"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**For simplicity, we will show all concepts on easy tasks, where you can train your model fast and play with it. You can fidn high scale experiments in scientific papers introduced in our track.**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "1.6.0 10.2\n"
+ ]
+ }
+ ],
+ "source": [
+ "import torch\n",
+ "import torchvision\n",
+ "import torchvision.transforms as transforms\n",
+ "import matplotlib.pyplot as plt\n",
+ "import torch.nn as nn\n",
+ "import seaborn as sns\n",
+ "import sklearn\n",
+ "\n",
+ "print(torch.__version__, torch.version.cuda)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**MNIST dataset**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "n_epochs = 3\n",
+ "batch_size_train = 64\n",
+ "batch_size_test = 200\n",
+ "learning_rate = 0.01\n",
+ "momentum = 0.5\n",
+ "num_networks = 5 # Ensemble size\n",
+ "\n",
+ "random_seed = 1\n",
+ "torch.manual_seed(random_seed)\n",
+ "device='cuda:0'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "train_loader = torch.utils.data.DataLoader(\n",
+ " torchvision.datasets.MNIST('./data/', train=True, download=True,\n",
+ " transform=torchvision.transforms.Compose([\n",
+ " torchvision.transforms.ToTensor(),\n",
+ " torchvision.transforms.Normalize(\n",
+ " (0.1307,), (0.3081,))\n",
+ " ])),\n",
+ " batch_size=batch_size_train, shuffle=True)\n",
+ "\n",
+ "test_loader = torch.utils.data.DataLoader(\n",
+ " torchvision.datasets.MNIST('./data/', train=False, download=True,\n",
+ " transform=torchvision.transforms.Compose([\n",
+ " torchvision.transforms.ToTensor(),\n",
+ " torchvision.transforms.Normalize(\n",
+ " (0.1307,), (0.3081,))\n",
+ " ])),\n",
+ " batch_size=batch_size_test, shuffle=True)\n",
+ "\n",
+ "classes = tuple(str(i) for i in range(10))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n"
+ ]
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXYAAAB5CAYAAAAtfwoEAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAAAqHElEQVR4nO2daXBc13mmn9ON3jc00GgAxM4F4AJzMyUzlkxTlGVrcUQ7dlxSyWOr5IrLjqcmmUrFcSY/pjK/PDVTM+PJzERRxXEcW5YlS7LFqGTLlCyJWkyKlCgCFEFSBIl9RwONRi/o7cwP9L1pkADRABoAu3GeKhSA28s9d3vvud/5vvcIKSUKhUKhKB4M690AhUKhUOQXJewKhUJRZChhVygUiiJDCbtCoVAUGUrYFQqFoshQwq5QKBRFxoqEXQhxrxDikhDiihDie/lqlEKhUCiWj1huHrsQwghcBu4B+oDTwMNSygv5a55CoVAolkrJCj57O3BFSnkVQAjxc+AosKCwCyFUNZRCoVAsnTEpZUWub15JKKYG6M36vy+zbA5CiG8KIc4IIc6sYF0KhUKxkeleyptX0mPPCSnlE8AToHrsCoVCsRaspMfeD9Rl/V+bWaZQKBSKdWQlwn4a2CaEaBJCmIGHgGP5aZZCoVAolsuyQzFSyqQQ4t8DLwNG4J+klB/mrWUKhUKhWBbLTndc1spUjF2hUCiWw3tSygO5vllVnioUCkWRoYRdoVAoigwl7AqFQlFkKGFXKBSKIkMJu0KhUBQZStgVCoWiyFDCrlAoFEWGEnaFQqEoMlbdBEyhUCjWC5PJhNlsRgihL0ulUsTjcVKp1Dq2bHVRwq5QKIqSkpISbr/9dg4dOoTVatWX9/f389vf/paurq71a9wqo4RdoVAUJUajkdtvv53vfOc7lJaW6svff/99Ojo6lLArFPlECIHNZsPtdlNSUoLBYJjzqHw9kUiEyclJkskka+ltdCsjhMBgMFBSUoLT6cRisWAwGDAajTl9Pp1OE4/HSafTRKNRwuFwUe5bk8mEw+HA4XDoy2w2W877qVBRwq5YM4QQGI1GhBDs3LmT+++/H5/Ph9VqxWazLSjup06d4rnnnmN0dJRUKkU6nV7jlt9aCCGwWq1YrVbKy8s5fPgw27Ztw2Kx4PF4MJlMi35HMBikr6+PcDhMe3s7J0+eJBqNrkHrFWuBEnbFmqL1Kuvq6rjnnntoaGjA6XTidrsXFHaTycTx48cJBAJIKTe8sAOYzWbsdjs+n4/bb7+dT37ykzgcDiorK7FYLIt+fmRkhA8//JBAIEAkEuG9995Twl5EbFhht9vteijAbDZjNpsxGo3Y7XbMZvOSvmtqaor+/n4ikQipVIpEIrFKrS4MhBC4XC699+h2u3E4HAgh9NDLjh07cLvdWCwWTCaTHlq4HiklFRUV7N+/n8rKSnp6eujp6SnqjIZs3G43dXV1OBwODAaD/uNwOLDZbPj9fqqqqrDb7VitVv31xTCbzZSWlmIwGKipqWHr1q1MTk4SCASYnJxc/Q1bR5xOJ62trSQSCcbGxuju7iYWi613s/LKhhR2IQSbNm1iz549OJ1OfD4f5eXl2O12Nm/ejM/nW9L3nTt3jp/97Gdcu3aNcDhMMBjc0L1Kg8HAli1b2LdvH6Wlpezdu5ctW7YghNB/SktLqa6uxmKx6OGZhdi9ezd/+Zd/STAY5Gc/+xk//elPiUQia7hF60dTUxOPPfYYW7duxWw2Y7PZ9Kceo9GI2WzG7/fjcrkwGo05hWFgVty2bt2qd0IsFguBQIA333yT06dPF/X5W1tby7e+9S2CwSCvvfYajz/+OH19fevdrLyyIYT9etEwGAw4nU6qq6vxeDxs2rSJqqoqXC4Xu3btorq6+qZCcz2pVIqysjKGh4dJJBIYDAaklEU5GDUf1+8ro9GIx+Ohvr4en8/Hnj17aG1tzaknOd93l5WV4fV6iUQinDhxYlnfU2ho+9Tj8bBz5052796N1WrF4XDkZeBPe0pNp9NUV1ezZcsWPB4P7e3tGAyGohZ2p9PJrl27kFLS398/JxWyWChaYTebzZhMJv0g1tTU6K8JIairq6O5uRm73U5paSkejwebzYbNZlvyuvx+P4cPH6a5uZlIJML09DTRaJT29nYuXbpUdBeJ0WikrKwMl8uFy+WioaEBj8cz5/Vt27axY8cOnE4npaWlS7pRZiOlZHh4mM7OTiYmJujs7Cz6MIzL5WL//v3U19fT0tJCVVUVVqtVD1nlEyEEHo+HhoYG3G43TU1NNDY2Eo1GCQQCKu5eoBSlsGtZA06nk7q6Or72ta9x+PDhOReFyWSakyKmhQNyfZTNpr6+nkceeYREIkE6nSadTjM5OckPfvADrly5UnTCbjKZqK+vp7Gxkfr6eh544AG2bds25z0Wi0UPs+QymHczurq6eO655xgcHKSjo6PoxzDKy8t56KGHuO+++7Barbjdbr16Mt/CDuDz+XC73YRCIbq6uhgZGWFiYoILFy4oYS9QFhV2IcQ/AZ8HRqSUrZllZcDTQCPQBXxFSjmxes3MDW0g1Gg04nK5cLvdeL1eqqurqa+v1y8KKSXJZJKZmRk9XKL1ApPJ5JLXazAY9KwO7Uahxe6dTiczMzPMzMwUfE9TK8/WnnLKy8upqKigpqaG+vr6VVtvIpFgenqa6elpZmZmVm096432lOnxeKiqqppzzq4WWmfGZDKRTqdxOp36OVtSUvj9vkQiQTQa1Qfprx/P0a7ZYguf5nLk/hn4P8C/ZC37HvCqlPL7QojvZf7/q/w3L3eEEOzatYtDhw7pWRg2mw2v18vmzZvnvFdKyblz53jttdcIhUIrXndFRQXbt2/H4/Hg9/upqanBZrNx5MgR3G43g4ODvPLKK1y6dGnF61ovtCq+Q4cO4XQ6qaqqory8XBf41aSuro7777+fQCDASy+9RFdX17JuwLcyFouFQ4cOcfDgQfx+P83NzWveBu1J1+PxEI/HC17YU6kUFy5c4Be/+AU+n4/9+/fT3Nw8R9itVisVFRV6xyEUChWFuC965KSUJ4QQjdctPgoczvz9Y+B1bgFh37lzJ4899piebWEymTAYDDeEV6SUtLe38/jjjzM0NLTidbe0tPCHf/iH1NbWsmPHDiorK7FarRw+fJg777yTjo4Orl69WtDCXlJSwoEDB/TybK2Xo6UwriZ1dXVUV1cTDofp7u7m5ZdfXtX1rQeasH/729/GZrMtOeU2HxgMBj30E41GC17Y0+k0HR0dRCIRqqqqKC0tvSFkqAl7JBJBCMH09PTGEPYFqJRSDmb+HgIqF3qjEOKbwDeXuZ4F0URF62FYrVaqqqpwu904nU798TKZTOpFGBqpVIqhoSFCoVBe0uYikQiRSIRoNEo8HkdKiRBCjzPb7faCv0hg/vLsxUgmk0xOTjI9PT1nudVqxeVyUVJSMmeMYz5isRjBYJBQKFQUpe+aFYDBYMBms+kDzFrobiljEpo1gOZYGA6H5zzNZJ+HRqMRm8224DiSEAK73U5ZWRmxWGxdbi75RAu5xmIxYrHYvE952eHT1RrDWA9WrDZSSimEWPBKk1I+ATwBcLP3LQWtQMNisbB161YefPBBGhoaaGxsxOfzYTab9ZS48fFxnn76ad59993sNtHZ2ZmXMAzMxvGCwSAOh4NIJFJ0g6UrIRQK8fzzz/Pmm2/O2S+tra3cfffdejjH6/UueFF1dHTwq1/9iuHhYc6dO0c8Hl+r5q8KVqsVv9+P3W5nz549HDp0iLKyMlpbW5fcAZiZmaG3t5eJiQl6e3t56623GB0d1V83m81s375dT2e8WTqvyWSipaWF8vJyrly5MueaURQWyxX2YSFEtZRyUAhRDYzks1GLYTAYsFgsOBwOGhoauO+++9i9e/e8J2soFOLNN9/k+eefn7M8n72+VCqlpznGYrGC71Hmk2g0yqlTp3jqqafmCPvdd9+txzu1KsiF6Ovr4+WXX6a7u5tIJFLw8XWz2UxZWRkej4e9e/fypS99ifLy8mX1FpPJJGNjYwwMDNDW1saxY8fmuBbabDYOHTpEJBKhsrKS+vp6qqur5/0uo9HIpk2b2LRpE0ajEbfbvdxNVKwzyxX2Y8DXge9nfr+QtxblgMViYcuWLdTW1tLS0oLT6UQIQTqdJplMkkwmGRoaYnh4mJ6eHsbGxlZVbG02GzU1NdTV1VFeXl6UznHpdJr+/n5Onjypp9+ZTCbdu0X7SSQSc/Z1MBjEaDSyfft2PfRgMpnYtWsX5eXlC4YetEfoRCJBKBRiZmZGDzkU4o1T64yYTCb8fj8tLS34fD5qa2vnPGHmQjqd1sNSk5OTXLlyRbdaiEajc/aP5t44OTmJ3W6/aapoOp1mamqKUCjE4OCgSnUsYHJJd3yK2YFSnxCiD/jPzAr6M0KIbwDdwFdWs5HX4/V6+eIXv8hnPvMZXC6X3gOZmZkhFAoRCoX45S9/yYsvvsjU1BQ9PT2r2p6Kigo+/elPs3PnTlwu14rztm9Fkskkr7/+OleuXNFDCV6vl1QqRTQa1dPKpqam5vSobTYbu3bt4tFHH51TzOTxeHTDKm2QO5tYLEZfXx9TU1P09fURDAYJh8MFG+YymUz6AN6OHTt4+OGHaWpqwuv1Yrfbl/RdyWSSa9eucfnyZYaHh3nllVe4ePEi4XCYiYm5WcepVIqxsTGuXr1KMpkkHA4v+L2JRIKLFy9y4cIF+vr65oR0FIVFLlkxDy/w0t15bktOaAOmTU1N7N+/f05ueiqVYmZmhnA4TGdnJ6dOnVrVvGdt3Xa7nU2bNtHY2Ki/pvWatB5tIfYys5FSMjg4yODgICaTibq6Ovx+P8lkkunpaX3gbnx8XBd2zQ5gz549bN++Ha/Xy/bt2xdMj8zeZ1pPXRt4jcfjBR2CMRgM2O12/Ya2bds2Wlpacv589vmTSqUIBoMMDg4yMDDA1atXuXz58oKf0wafp6enF+2xT05O0tfXx9DQUNEZYy1EtodRsVAwqRpGo1Evd66traWycm4ijpSS7u5u3nnnHUZHR/noo49WpSBIC0G4XC5aW1uprq6mtbUVr9c7533xeJy2tjYuXbpEb29vUZkMaaEALfylZRxoPjkWi4WWlhZ27tyJ1+vl4x//ODU1NfqA90JoNgyamdrAwADT09N0dHQUZFhACIHX69UtKw4cOEBjY6Nevr9UJicnGR0dZXp6mra2Nt57771F3Ri1YzUyMoLL5Vq0o6MV6RRLsU4kEmF8fByTyTTvjaqqqopDhw4xMjLCyZMnGRkZKfhCQiggYTebzRw4cIDPf/7zlJWVzVt0dP78ef7hH/5BF4R89/C0mX8cDgeNjY08+uij3HnnndhsthsG/6LRKC+99BI/+clP9BmAioVUKsXExATBYBBAD49oaWNWq5W77rqLP/mTP8Hj8eBwOHRL2ZtlfUxNTfGv//qvHDt2TH/yisfjzMzM3JAuWQhog5Hbtm2jqqqKBx98kH379mE2m3E6nUv6LiklQ0NDvP/++wQCAV599VXefvtt4vH4TW966XSaQCBAMBjEarUumt6bPWZS6Egp9ZTmdDo9bxhqy5Yt+P1+Pcy32k/5a0XBCLvm8a05Ms5n1hUOhxkaGspL0VE2RqNRz7d2OBx4PJ55rQqy0cSvp6enKL1NtPxfrSRdmx1JsxyoqKigrq5uST3TVCpFIBCgt7eXRCJBLBYr6N6TlhdeXl6Oz+ejoqICv9+/pEf+ZDKph6GCwSDj4+OMj48TCASYmJjIaf9oCQUzMzOLCrZ28y0pKSmK0EQqldJz/K/fV9kzUTkcDpxOZ9E4hxaMsBsMBjweDzU1NTidzmW5MC4HzQly586denrlpk2bKC8vp6GhYU3acKthtVrZs2cPW7ZswWaz4fP5sNvtehVqSUkJt91225ILXLI9fJLJZMH3GktKSmhubuazn/2s3hFYKt3d3Rw/fpyBgQFGRkbo6+sjEonQ09OT91CJ0WjE7/ezbds23URPUZgUjLALIXC73boPy1rdWYUQ1NfXc+TIEXw+H62trTQ3N+uGY8XQq1kqNpuNffv2ceTIEbxeL83NzTcMiGqVv0slkUjk1LMsBDRhv+eee7Db7ctKg+3p6eGnP/0pZ8+e1UMkWqJAvveR0WikoqIC+LciQEVhUjDCDuj+JAtdICsd2S4pKcHtdusTKxuNRgwGA5s2bcLn81FWVobb7V72RVrouN1uvUq0srJSL7JxuVx5EQGj0YjP56OhoYFYLEYgECjIeKdmEVBaWkpZWRlWq3VJTy+JRIKJiQkikQgDAwNMTk4u2/oi21LA7XYverPVZmmyWq0b8hwvFgpK2HNBE/flPKZ6vV4eeOABPvaxj2E2m3G5XHpBSV1dHRaLRTfA2miUlJTwqU99iqNHj1JaWkpTUxNVVVV6TD0feDwe/uiP/oh9+/bx0Ucf8dRTTxWccZoQgv379/PHf/zH+P1+du7cuWSbgLGxMZ555hneffddhoaGGBgYWHZ7tKeGzZs36zNaLYRmP22xWJicnCzKeoyNQtEJOzAnt30pOBwODhw4wGc/+1l90Gujhluux2g0smPHDr785S/j8XhWZZ/YbDYOHjzIJz7xCU6dOsWrr75akMK+efNmjh49Sk1NzbL2U7YNxkrj6CUlJXpKrmaStxBa1pfNZsupd6+4dSkYYU+lUnR3d/P73/8ej8dDY2PjHH8NIQSVlZXcdtttesWcVuiiPdZmP45mO7ppVFdXU11djc1m02dXEkLo1gTZ5fI2m43GxkbKysrm3Ei0bI5QKFTwZlXzoWXDXI8W900kEqRSKaanp/Uq1EgksmCxi8FgoKamhpqaGr1nW4jFIi6Xi7q6OlwuF5s3b9bPn1xJJpP09fUxODhIT08P4+PjeR0czb5OFiKdTusumspSoLApGGGfmZnhN7/5DefOnaO+vp5vfetbHDlyRH9dCMFtt91GVVXVHEENBoOcPHmSzs5O/H4/e/bswev1YrVasdvtcy4+i8XCpk2b9HBLSUkJ6XSakydP8sMf/pBAIKAXbjQ0NPCnf/qnfOpTn5rTzqmpKYaGhhgfHycYDBZFkUeuxGIxxsfHicViXLhwgfb2dqanp+ns7GRwcHDefWGxWHj44Yf56le/WtDWxg0NDTz22GO0tLRQU1Oz5AKkWCzGr3/9a5599lmCweAcI6+1QrMUOH/+PP39/cpSoIApmCsplUrR29tLb28vY2NjjI6Okk6n55QD+/1+/H7/nM+NjY0xNjbGzMwMtbW17N69W7dMdblcN/SqrhefVCrF8PAw7733HsPDw3omwsTExA2+HFJK4vG4PhNLsfXYFyteSSQSujf98PAwV65cYXJykvPnz3Pt2rV5P2Oz2bjjjjvmzccupF672+1mx44d7Nu376ae59lkn2vJZJKenh5Onz5NNBrNe/5+Lk9B6XRat/8dHh5WPfYCpmCEPZvp6Wneeust4vE4VVVV7N+/f8FBIYvFQlNTEyaTibKyMrxerz5DjTbIOjMzQywWY3p6mosXLzI4OKh/XqtGi0QiG6r3fT3aNGPPPPMMTqdTrybVvGI0E7BgMMjMzAxXr17l6tWrup3xUrHZbNTV1dHc3EwoFGJsbOyWK/Qym81UV1frM/OUlpZitVr1gq1c0GwCJicnGRsby3sao1b9qtle3OxJQrMfGB4e1jtDisKkIIU9EAjw85//nGPHjvHJT36SysrKBYXdbrezd+9edu3ahdFo1GeS0Xow2sms9VR+9KMf8fbbb88R8XA4XDRzIS6XZDLJW2+9RXt7O2azWS/Sikaj9Pf3MzU1pcfZtZtlPB7XZ/hZKg6Hgx07dmAwGOjq6iIUCt1ywq45V7a0tLBlyxYqKyt1C+lchF1KyfDwMO+//z5jY2P09fWRSCTyKuwmk4mtW7dy+PBhvcpyobalUinGx8fp6uoiGAzmZXYxxfpQkMKeTCb10motz3eh2ZC0i0x7NE4kEvogqBZamJqaIhgMEggEGBgYuKnN7/VTaW0kNEtkbUb7mZkZIpEIvb29TE1N5XVdRqNRD5fdTIzWk2zHRqfTidlszin3O51O6z1zzRFzfHw8r0+FWgGdw+HA5XLpaYzXo92MNf/7aDRKOBxelXCQYu0oSGHPpqenh5/85Ce8+uqr876u+btcf1LHYjHC4TCpVEo/kScnJ28q6gaDAa/Xq1sb5Ct/u9DQbGO1m+RqPLJrBWKaR8+tKOzLJRQK0d3dTSgU4uzZs7zxxhsEAgF6enry0lsXQtDa2sodd9yBz+djz549C95wpJR0dXXR0dFBIBDggw8+oL+/X58nVFGYFLyw9/b28uSTTy6YWmY2m6moqMDlcunLpJQEg0HGxsZIJpN6L0mbAWghNGGvq6vb0MKu+XZrWT+rEaLSfGdMJlPRVUCGQiE6OjoYHh7m3Xff5Y033iAYDOYtvm4wGNi5cydf//rX8fv9lJaW3lTYu7u7eeWVVxgdHaWtrY3+/v6icXjcqBS8sGt+4AuRSCSwWq03nKTa1GJLedzUjMiqq6vx+/1YrdYb3qNlhOT70fpWY6WCnu0EOV+5vfYkFQwGb5ju7VZBK9fPtiXOhXg8TiAQYHh4mMnJSX0KwJViNpv1kIvP58PtdushopsRi8X0LC8tBHMr7u+VkB1yklLq9iTFSsEL+2JoYYPrB4K0gb2lYDabue222/jCF76Ax+Ohrq5uzuvpdJoLFy7w5JNPMjIywrVr11SvZwE8Hg/19fW43W4qKytv6FGGQiHa29s5ffo0U1NTt2RYwGw2U1dXp8/fmqvjaCAQ4O233+bChQuMj4/nLZRVW1vLPffcQ3V1NXv37sXn8+FwOG5aHyClZHx8nA8//FDPzik2UYfZazMSiTAxMaHfjJfqPlpI5DLnaR3wL0AlIIEnpJQ/EEKUAU8DjUAX8BUp5cRC37NeaJP55iMn12QysWXLFj796U/rMfvs2K+Ukr6+Pk6cOMHw8PCK11fMOBwONm3aRFlZGaWlpTfE0KPRKN3d3Vy8eHGdWrg4JSUllJeXU19fv+jsUNmEQiEuXbrE2bNn89qesrIyDh48qE/soXkd3QwpJVNTU/T29hZ1QZKWqaVNqDHf03YxkUuPPQn8hZTyfSGEC3hPCHEceBR4VUr5fSHE94DvAX+1ek29dVgsna0Yezz5Riu99/v9+Hy+eWPAt/p+1EIxdrt9yRYC+do2i8Wie8A0Nzfj8/nweDx6JtFC5+nExATd3d1MTU1x7dq1Wy6VVLEycpnMehAYzPwdEkJ0ADXAUeBw5m0/Bl5ngwi7YuU0NDRw9OhRGhoa8Hq9BWk4ZTQa9cmpjUbjumxDaWkp999/P3v37qWqqordu3fr+/Nmg86XL1/m8ccf56OPPmJoaGjeaeMUhcuSYuxCiEZgH3AKqMyIPsAQs6Ga+T7zTeCbK2ijYh24PsSU7+92uVw0NTWxZcuWvH73WqLVR+SSZ5+9D/O1P7Wp3err62ltbcXj8VBeXp6TN/7k5CTt7e20tbXpefWK4iFnYRdCOIHngD+XUk5dd+FLIcS8Z6uU8gngicx33NrP1hsYg8FAaWkpLpcLp9NJU1MTXq+Xnp4ezp49m5cCJI/Hw/bt2/H5fOzbt6/g00Xj8Ti9vb20t7fjdDqprq5ecJvi8bheTNfZ2bmiqk6TycSOHTvYunUrlZWV7Nixg4qKCux2+5KM1LSURjXAX3zkdBYIIUzMivqTUsrnM4uHhRDVUspBIUQ1MLJajVSsPiUlJdTU1FBfX099fT1Hjx5l+/btvPzyy/T09ORF2P1+P0ePHuVjH/sY1dXVS3ZAvNWIxWJ8+OGHmM1mamtrcTgcCwp7JBKhra2Ny5cvc+XKlRXtT6vVyl133cVDDz2E0+mksrISt9t909nFrkerulY99eIkl6wYAfwQ6JBS/o+sl44BXwe+n/n9wqq0ULEmaHNclpWV4fP5dJH3+Xwrih0LITCbzZhMJjweD36/n5qaGkpLS2/oXWq+Mpqh2K0uOloKXTAYxOv13rS9mvNnJBIhHo/r4ZjF7Cm0Qq3s110uFxUVFdTW1mK32/XUvVzcG7WJwgth/+abbCfYYqpkno9ceux3AP8OaBdCfJBZ9p+YFfRnhBDfALqBr6xKCxVrgsViYffu3dx9992UlZXpkxqvFKfTyZEjR9i7dy9+v5+9e/dSWVmJzWabN3f9d7/7HefOnaO3t5e+vr68tOFWwGq1smPHDsrLy3G5XLzzzjsAVFZW0tTUtGCqpNfrZfPmzXMqpy0WC5/4xCf0dMZce+nj4+McP36cixcv0tnZWdTpjddjMBiw2Wx4PJ6cPX0KmVyyYt4CFrq93Z3f5ijWC03YH3jgAUwmU94mvXA4HHzuc5/jkUce0S0CtJmpru81TU9P8/LLL/Pkk0+STCaLys/eZrOxfft2mpubSaVSugtkZWUlBw4cmCPc2TQ2NnLXXXdRVVU1Z7l2jHKZGUkjEAjwwgsv8OKLL+qzXW0UtGn/3G530XkPzUfRV54qbo7b7dZT9rxeLxaLRRf1XLI3DAaD3mvUKvqye0OVlZV6psZ8vaRsd82BgQHGx8cJh8O3fA77UtFCKjD7FFNTU8PExAQ1NTVUVlYumMlSVlaGy+XKKdNFQ9t3Whn91NQUoVCI3t5eAoHAhrXj1QaKNYfWYhZ3JewbmJKSEu68806OHj1KeXk5u3fvXrJ/htVqpa6uDrfbzdatW7njjjsoLS3VX7fZbOzZs2fB700mk7z++uu8+OKLjI+P09bWVnSifj2bN2/m29/+NhMTE3i9XiorKxccx3A6nQv25m9GMpkkkUgwPT3NSy+9xGuvvUYgEKCjo2OlzS9IlKWAYsNgMBjYvn07X/rSl/B6vcvqwZjNZn1Kwn379vHFL37xhrDBzb43mUzy4Ycf8otf/GLDTGbi9/u59957kVKuWq9RcyoNh8OcOXOGp59+ekPPiKQsBRQbCi3WvVCPury8nP379y84mKpZA3i9Xmpra29aWq/NiZpIJAiFQoyOjure5Nn2yYWClJJEIkEsFmNmZibnfPClxMUXY2ZmhrGxMX2GqXA4rPfW4/E4U1NTDAwMbLgMmPnIHtcp5jAMKGFXLEJrayvf/e53F3RXLCkp0QtjXC7XTcMG0WiUq1evMjExQXt7O8ePH2d0dJTBwcGC7E2mUil9Ptby8nKSyeSat2Fqaoo33niDjo4OxsfHuXLliv7kk06nSSaTStiZm+pYzHa9GkrYl8BGyH/NRghBeXk55eXly/6O7IG8RCKhT3DS1dXFmTNnCtoFU8tNz+6xZz91rNa5kr1PZ2Zm6O/v5/LlywwNDdHW1sbk5OSqrLcY2CjXrxL2HPB6vfj9fl3kNsrJkQ+i0SgffPABnZ2dTE9P09vbSygU4qOPPsqLlfJ6kkgkGBkZIZ1OY7FYuHr1KjAbniorK8tbyuj1pFIpent7GRgYYGhoiIsXL9LV1cXU1FRRpYjmk+yb4UZACfsiCCGorq7mtttuw+fzUVtbq4R9CYRCIX75y1/y7LPP6nOkahWQhe4oGI/H6e7upq+vj1gsRktLC1NTUzQ0NOByuVZN2OPxOOfOnePEiROMjY1x5swZuru7SaVSSthvghaeuv7JqhhRwp4DVqtV763bbLaiEvZ4PE44HMZsNus57EvZPi2Om20olX3RBINBhoeH8zZR862EFgqB2eKqQCBAaWkpHo+HUChEKpVakn/LYmjeLuFwWJ9aLxAIzDtDmGIu12fFmM3mOWMisVhsjtVDoaOEfRG0HvvBgwfx+/3U1tYWzeBLKpXi9OnT/N3f/R0+n48jR46wf//+JQl7MBicM63awMDAnIHQ6elp2tvbi+aCWYjx8XHeeecdLl68iNfr5Xe/+52e419TUzPnnMkeq1nK3LGDg4N0d3cTDoe5dOkSnZ2dRKNRQqHQqmxTMRGJRDhx4gSjo6O61XL2E1UymeTUqVNF88SjhH0RsoVdm1ChWHrsqVSKM2fO0NbWpleI7t27d0k3rqmpKU6fPs3Fixfp7e29weJXGzQtdmEPBAL8/ve/17MuDAaD7uny8Y9/fI6IZFc+ak84i+0fKSVtbW288847TE9Pk0ql9Emni+1JaDUIh8OcOHFC9+iZ7xrWQoXFgBL2HMi+WOc7IWKxGMFgkGg0yuTkZEGlliWTSZLJJKFQiOHhYbq6upYUG+7t7WVkZIRAIMDExAShUGhDhgW0G1g2yWSSyclJxsbG5oRjslPuNFHPRdgnJiaYnp7ekPs3HxSTcC+GEvYc0Ao9bDYbNpvthqq1rq4uXnzxRXp7ezl//nxBXnjT09O88MILnD9/fklPJOFwWJ87MxwOF2Q++mqRTCa5evUqwWDwhqegpc5QpQl7sYQKFKuLEvYc0Pyro9HovJ4eo6OjepFIMBgsyIsvFovx7rvvcvr06SV/ttjDLMslnU4zMjLCyIiag0axtihhXwQpJSMjI3zwwQeUlZVht9tvmCWnvb2dQCBANBot+HhyIbddoVDMooR9EdLpNGfPnmVoaEi3p51vgoj+/n59Vho1mKVQKNYTsZY9NDWZtUKhUCyL96SUB3J9c3EkZCsUCoVCRwm7QqFQFBmLCrsQwiqEeFcIcU4I8aEQ4m8zy5uEEKeEEFeEEE8LIYp3OhKFQqEoIHLpsc8AR6SUe4C9wL1CiIPAfwX+p5RyKzABfGPVWqlQKBSKnFlU2OUs05l/TZkfCRwBns0s/zHwhdVooEKhUCiWRk4xdiGEUQjxATACHAc6gUkppWaP1gfULPDZbwohzgghzuShvQqFQqFYhJyEXUqZklLuBWqB24Htua5ASvmElPLAUlJ1FAqFQrF8lpQVI6WcBF4D/gAoFUJoBU61QH9+m6ZQKBSK5ZBLVkyFEKI087cNuAfoYFbgv5x529eBF1apjQqFQqFYAotWngohdjM7OGpk9kbwjJTyvwghNgM/B8qAs8BXpZQ3tfYTQowCYWAsD22/FfGhtq0QUdtWmGykbWuQUlbk+uE1tRQAEEKcKdZ4u9q2wkRtW2Gitm1hVOWpQqFQFBlK2BUKhaLIWA9hf2Id1rlWqG0rTNS2FSZq2xZgzWPsCoVCoVhdVChGoVAoigwl7AqFQlFkrKmwCyHuFUJcylj9fm8t151vhBB1QojXhBAXMnbGf5ZZXiaEOC6E+Cjz27vebV0OGX+gs0KIFzP/F4VNsxCiVAjxrBDiohCiQwjxB0V0zP5j5lw8L4R4KmO5XZDHTQjxT0KIESHE+axl8x4nMcv/zmxjmxBi//q1fHEW2Lb/ljkn24QQv9SKQjOv/XVm2y4JIT6XyzrWTNiFEEbg/wL3ATuBh4UQO9dq/atAEvgLKeVO4CDwncz2fA94VUq5DXg1838h8mfMVhhrFItN8w+A30gptwN7mN3Ggj9mQoga4D8AB6SUrcwWFD5E4R63fwbuvW7ZQsfpPmBb5uebwN+vURuXyz9z47YdB1qllLuBy8BfA2Q05SFgV+Yz/y+jpTdlLXvstwNXpJRXpZRxZqtWj67h+vOKlHJQSvl+5u8QswJRw+w2/TjztoK0MxZC1AIPAP+Y+V9QBDbNQggPcAj4IYCUMp7xPyr4Y5ahBLBlPJzswCAFetyklCeAwHWLFzpOR4F/yViMn2TWx6p6TRq6DObbNinlb7Pcck8y678Fs9v2cynljJTyGnCFWS29KWsp7DVAb9b/C1r9FhpCiEZgH3AKqJRSDmZeGgIq16tdK+B/Ad8F0pn/y8nRpvkWpwkYBX6UCTP9oxDCQREcMyllP/DfgR5mBT0IvEdxHDeNhY5TsWnLY8CvM38va9vU4OkKEUI4geeAP5dSTmW/JmdzSQsqn1QI8XlgREr53nq3ZRUoAfYDfy+l3Mesb9GcsEshHjOATLz5KLM3r02Agxsf94uGQj1OiyGE+Btmw7xPruR71lLY+4G6rP8L3upXCGFiVtSflFI+n1k8rD0GZn6PrFf7lskdwINCiC5mw2VHmI1LF4NNcx/QJ6U8lfn/WWaFvtCPGcBngGtSylEpZQJ4ntljWQzHTWOh41QU2iKEeBT4PPCI/LcCo2Vt21oK+2lgW2aU3szsgMCxNVx/XsnEnX8IdEgp/0fWS8eYtTGGArQzllL+tZSyVkrZyOwx+p2U8hGKwKZZSjkE9AohWjKL7gYuUODHLEMPcFAIYc+cm9q2Ffxxy2Kh43QM+FomO+YgEMwK2RQEQoh7mQ1/PiiljGS9dAx4SAhhEUI0MTtA/O6iXyilXLMf4H5mR3w7gb9Zy3WvwrbcyeyjYBvwQebnfmbj0a8CHwGvAGXr3dYVbONh4MXM35szJ9QV4BeAZb3bt8xt2gucyRy3XwHeYjlmwN8CF4HzwE8AS6EeN+ApZscKEsw+aX1joeMECGYz7jqBdmYzg9Z9G5a4bVeYjaVrWvJ41vv/JrNtl4D7clmHshRQKBSKIkMNnioUCkWRoYRdoVAoigwl7AqFQlFkKGFXKBSKIkMJu0KhUBQZStgVCoWiyFDCrlAoFEXG/wf8KNMW8RAa1QAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ "