Skip to content

Commit

Permalink
Enable python 3.11 in CI pipelines (#263)
Browse files Browse the repository at this point in the history
  • Loading branch information
clonker authored Nov 16, 2022
1 parent b54e7c5 commit 6eab3e6
Show file tree
Hide file tree
Showing 14 changed files with 130 additions and 97 deletions.
6 changes: 4 additions & 2 deletions deeptime/base_torch.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from typing import Callable, Union, List, Optional

import numpy as np
import torch

from deeptime.util.platform import try_import

torch = try_import("torch")

class DLEstimatorMixin:
r""" Estimator subclass which offers some deep-learning estimators commonly used functionality.
Expand Down Expand Up @@ -52,7 +54,7 @@ def dtype(self, value):
self._dtype = value

@property
def optimizer(self) -> Optional[torch.optim.Optimizer]:
def optimizer(self) -> Optional["torch.optim.Optimizer"]:
r""" The optimizer that is used.
:getter: Gets the currently configured optimizer.
Expand Down
5 changes: 4 additions & 1 deletion deeptime/decomposition/deep/_kvadnet.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import torch
from . import _vampnet as vnet
from ...kernels import Kernel, GaussianKernel, is_torch_kernel
from ...util.torch import multi_dot
Expand All @@ -12,6 +11,8 @@ def whiten(data, epsilon=1e-6, mode='regularize'):


def gramian(y, kernel):
import torch

with torch.no_grad():
if is_torch_kernel(kernel):
g_yy = kernel.gram(y)
Expand All @@ -22,6 +23,8 @@ def gramian(y, kernel):


def kvad_score(chi_x, y, kernel: Kernel = GaussianKernel(1.), epsilon=1e-6, mode='regularize'):
import torch

G = gramian(y, kernel)

N = y.shape[0]
Expand Down
33 changes: 19 additions & 14 deletions deeptime/decomposition/deep/_tae.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,12 @@

import numpy as np

import torch
import torch.nn as nn
from torch.utils.data import DataLoader

from ...base import Transformer, Model, EstimatorTransformer
from ...base_torch import DLEstimatorMixin
from ...util.platform import try_import
from ...util.torch import map_data, MLP

torch = try_import("torch")

class TAEModel(Model, Transformer):
r""" Model produced by time-lagged autoencoders. Contains the encoder, decoder, and can transform data to
Expand Down Expand Up @@ -53,7 +51,7 @@ def decoder(self):
"""
return self._decoder

def _encode(self, x: torch.Tensor):
def _encode(self, x: "torch.Tensor"):
return self._encoder(x)

def transform(self, data, **kwargs):
Expand Down Expand Up @@ -99,7 +97,10 @@ class TAE(EstimatorTransformer, DLEstimatorMixin):
"""
_MUTABLE_INPUT_DATA = True

def __init__(self, encoder: nn.Module, decoder: nn.Module, optimizer='Adam', learning_rate=3e-4, device='cpu'):
def __init__(self, encoder: "torch.nn.Module", decoder: "torch.nn.Module",
optimizer='Adam', learning_rate=3e-4, device='cpu'):
import torch.nn as nn

super().__init__()
self.device = device
self._encoder = encoder.to(self.device)
Expand All @@ -110,7 +111,7 @@ def __init__(self, encoder: nn.Module, decoder: nn.Module, optimizer='Adam', lea
self._train_losses = []
self._val_losses = []

def evaluate_loss(self, x: torch.Tensor, y: torch.Tensor):
def evaluate_loss(self, x: "torch.Tensor", y: "torch.Tensor"):
r""" Evaluates the loss based on input tensors.
Parameters
Expand Down Expand Up @@ -144,7 +145,8 @@ def validation_losses(self) -> np.ndarray:
"""
return np.array(self._val_losses)

def fit(self, data_loader: DataLoader, n_epochs: int = 5, validation_loader: Optional[DataLoader] = None, **kwargs):
def fit(self, data_loader: "torch.utils.data.DataLoader", n_epochs: int = 5,
validation_loader: Optional["torch.utils.data.DataLoader"] = None, **kwargs):
r""" Fits the encoder and decoder based on data. Note that a call to fit does not reset the weights in the
networks that are currently in :attr:`encoder` and :attr:`decoder`.
Expand Down Expand Up @@ -215,7 +217,7 @@ def fetch_model(self) -> TAEModel:
return TAEModel(deepcopy(self._encoder), deepcopy(self._decoder), device=self.device, dtype=self.dtype)


def _reparameterize(mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
def _reparameterize(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
Expand All @@ -229,7 +231,7 @@ class TVAEModel(TAEModel):
--------
TAEModel
"""
def _encode(self, x: torch.Tensor):
def _encode(self, x: "torch.Tensor"):
return _reparameterize(*self.encoder(x))


Expand All @@ -245,7 +247,10 @@ class TVAEEncoder(MLP):
The nonlinearity to use. Callable must produce a `torch.nn.Module` which implements the nonlinear operation.
"""

def __init__(self, units: List[int], nonlinearity=nn.ELU):
def __init__(self, units: List[int], nonlinearity=None):
import torch.nn as nn
if nonlinearity is None:
nonlinearity = nn.ELU
super().__init__(units[:-1], nonlinearity=nonlinearity, initial_batchnorm=False,
output_nonlinearity=nonlinearity)
lat_in = units[-2]
Expand All @@ -267,12 +272,12 @@ class TVAE(TAE):
TAE
TVAEModel
"""
def __init__(self, encoder: nn.Module, decoder: nn.Module, optimizer='Adam', learning_rate: float = 5e-4,
beta: float = 1.):
def __init__(self, encoder: "torch.nn.Module", decoder: "torch.nn.Module", optimizer='Adam',
learning_rate: float = 5e-4, beta: float = 1.):
super().__init__(encoder, decoder, optimizer=optimizer, learning_rate=learning_rate)
self._beta = beta

def evaluate_loss(self, x: torch.Tensor, y: torch.Tensor):
def evaluate_loss(self, x: "torch.Tensor", y: "torch.Tensor"):
r""" Evaluates the reconstruction loss and latent regularization loss, returns the sum.
Parameters
Expand Down
45 changes: 23 additions & 22 deletions deeptime/decomposition/deep/_vampnet.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,18 @@
from typing import Optional, Union, Callable, Tuple

import numpy as np
import torch
import torch.nn as nn

from ...base import Model, Transformer, EstimatorTransformer
from ...util.platform import try_import

torch = try_import('torch')

from ...base_torch import DLEstimatorMixin
from ...util.torch import map_data, eigh, multi_dot, disable_TF32


def symeig_reg(mat, epsilon: float = 1e-6, mode='regularize', eigenvectors=True) \
-> Tuple[torch.Tensor, Optional[torch.Tensor]]:
-> Tuple["torch.Tensor", Optional["torch.Tensor"]]:
r""" Solves a eigenvector/eigenvalue decomposition for a hermetian matrix also if it is rank deficient.
Parameters
Expand Down Expand Up @@ -95,8 +97,8 @@ def sym_inverse(mat, epsilon: float = 1e-6, return_sqrt=False, mode='regularize'
sym_inverse.valid_modes = ('trunc', 'regularize', 'clamp')


def koopman_matrix(x: torch.Tensor, y: torch.Tensor, epsilon: float = 1e-6, mode: str = 'trunc',
c_xx: Optional[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]] = None) -> torch.Tensor:
def koopman_matrix(x: "torch.Tensor", y: "torch.Tensor", epsilon: float = 1e-6, mode: str = 'trunc',
c_xx: Optional[Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]] = None) -> "torch.Tensor":
r""" Computes the Koopman matrix
.. math:: K = C_{00}^{-1/2}C_{0t}C_{tt}^{-1/2}
Expand Down Expand Up @@ -130,7 +132,7 @@ def koopman_matrix(x: torch.Tensor, y: torch.Tensor, epsilon: float = 1e-6, mode
return multi_dot([c00_sqrt_inv, c0t, ctt_sqrt_inv]).t()


def covariances(x: torch.Tensor, y: torch.Tensor, remove_mean: bool = True):
def covariances(x: "torch.Tensor", y: "torch.Tensor", remove_mean: bool = True):
"""Computes instantaneous and time-lagged covariances matrices.
Parameters
Expand All @@ -156,7 +158,6 @@ def covariances(x: torch.Tensor, y: torch.Tensor, remove_mean: bool = True):
deeptime.covariance.Covariance : Estimator yielding these kind of covariance matrices based on raw numpy arrays
using an online estimation procedure.
"""

assert x.shape == y.shape, "x and y must be of same shape"
batch_size = x.shape[0]

Expand All @@ -178,7 +179,7 @@ def covariances(x: torch.Tensor, y: torch.Tensor, remove_mean: bool = True):
valid_score_methods = ('VAMP1', 'VAMP2', 'VAMPE')


def vamp_score(data: torch.Tensor, data_lagged: torch.Tensor, method='VAMP2', epsilon: float = 1e-6, mode='trunc'):
def vamp_score(data: "torch.Tensor", data_lagged: "torch.Tensor", method='VAMP2', epsilon: float = 1e-6, mode='trunc'):
r"""Computes the VAMP score based on data and corresponding time-shifted data.
Parameters
Expand Down Expand Up @@ -234,7 +235,7 @@ def vamp_score(data: torch.Tensor, data_lagged: torch.Tensor, method='VAMP2', ep
return 1 + out


def vampnet_loss(data: torch.Tensor, data_lagged: torch.Tensor, method='VAMP2', epsilon: float = 1e-6,
def vampnet_loss(data: "torch.Tensor", data_lagged: "torch.Tensor", method='VAMP2', epsilon: float = 1e-6,
mode: str = 'trunc'):
r"""Loss function that can be used to train VAMPNets. It evaluates as :math:`-\mathrm{score}`. The score
is implemented in :meth:`score`."""
Expand Down Expand Up @@ -262,7 +263,7 @@ class VAMPNetModel(Transformer, Model):
VAMPNet : The corresponding estimator.
"""

def __init__(self, lobe: nn.Module, lobe_timelagged: Optional[nn.Module] = None,
def __init__(self, lobe: "torch.nn.Module", lobe_timelagged: Optional["torch.nn.Module"] = None,
dtype=np.float32, device=None):
super().__init__()
self._lobe = lobe
Expand All @@ -278,7 +279,7 @@ def __init__(self, lobe: nn.Module, lobe_timelagged: Optional[nn.Module] = None,
self._device = device

@property
def lobe(self) -> nn.Module:
def lobe(self) -> "torch.nn.Module":
r""" The instantaneous lobe.
Returns
Expand All @@ -288,7 +289,7 @@ def lobe(self) -> nn.Module:
return self._lobe

@property
def lobe_timelagged(self) -> nn.Module:
def lobe_timelagged(self) -> "torch.nn.Module":
r""" The timelagged lobe. Might be equal to :attr:`lobe`.
Returns
Expand Down Expand Up @@ -370,7 +371,7 @@ class VAMPNet(EstimatorTransformer, DLEstimatorMixin):
"""
_MUTABLE_INPUT_DATA = True

def __init__(self, lobe: nn.Module, lobe_timelagged: Optional[nn.Module] = None,
def __init__(self, lobe: "torch.nn.Module", lobe_timelagged: Optional["torch.nn.Module"] = None,
device=None, optimizer: Union[str, Callable] = 'Adam', learning_rate: float = 5e-4,
score_method: str = 'VAMP2', score_mode: str = 'regularize', epsilon: float = 1e-6,
dtype=np.float32):
Expand Down Expand Up @@ -437,7 +438,7 @@ def score_method(self, value: str):
self._score_method = value

@property
def lobe(self) -> nn.Module:
def lobe(self) -> "torch.nn.Module":
r""" The instantaneous lobe of the VAMPNet.
:getter: Gets the instantaneous lobe.
Expand All @@ -447,7 +448,7 @@ def lobe(self) -> nn.Module:
return self._lobe

@lobe.setter
def lobe(self, value: nn.Module):
def lobe(self, value: "torch.nn.Module"):
self._lobe = value
if self.dtype == np.float32:
self._lobe = self._lobe.float()
Expand All @@ -456,7 +457,7 @@ def lobe(self, value: nn.Module):
self._lobe = self._lobe.to(device=self.device)

@property
def lobe_timelagged(self) -> nn.Module:
def lobe_timelagged(self) -> "torch.nn.Module":
r""" The timelagged lobe of the VAMPNet.
:getter: Gets the timelagged lobe. Can be the same a the instantaneous lobe.
Expand All @@ -466,7 +467,7 @@ def lobe_timelagged(self) -> nn.Module:
return self._lobe_timelagged

@lobe_timelagged.setter
def lobe_timelagged(self, value: Optional[nn.Module]):
def lobe_timelagged(self, value: Optional["torch.nn.Module"]):
if value is None:
value = self.lobe
else:
Expand All @@ -477,7 +478,7 @@ def lobe_timelagged(self, value: Optional[nn.Module]):
self._lobe_timelagged = value
self._lobe_timelagged = self._lobe_timelagged.to(device=self.device)

def partial_fit(self, data, train_score_callback: Callable[[int, torch.Tensor], None] = None):
def partial_fit(self, data, train_score_callback: Callable[[int, "torch.Tensor"], None] = None):
r""" Performs a partial fit on data. This does not perform any batching.
Parameters
Expand Down Expand Up @@ -530,7 +531,7 @@ def partial_fit(self, data, train_score_callback: Callable[[int, torch.Tensor],

return self

def validate(self, validation_data: Tuple[torch.Tensor]) -> torch.Tensor:
def validate(self, validation_data: Tuple["torch.Tensor"]) -> "torch.Tensor":
r""" Evaluates the currently set lobe(s) on validation data and returns the value of the configured score.
Parameters
Expand All @@ -553,9 +554,9 @@ def validate(self, validation_data: Tuple[torch.Tensor]) -> torch.Tensor:
score_value = vamp_score(val, val_t, method=self.score_method, mode=self.score_mode, epsilon=self.epsilon)
return score_value

def fit(self, data_loader: torch.utils.data.DataLoader, n_epochs=1, validation_loader=None,
train_score_callback: Callable[[int, torch.Tensor], None] = None,
validation_score_callback: Callable[[int, torch.Tensor], None] = None,
def fit(self, data_loader: "torch.utils.data.DataLoader", n_epochs=1, validation_loader=None,
train_score_callback: Callable[[int, "torch.Tensor"], None] = None,
validation_score_callback: Callable[[int, "torch.Tensor"], None] = None,
progress=None, **kwargs):
r""" Fits a VampNet on data.
Expand Down
9 changes: 5 additions & 4 deletions deeptime/kernels/_kernels_torch.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from typing import Union

import numpy as np
import torch

from . import GaussianKernel
from ..util.platform import try_import

torch = try_import("torch")

TensorOrArray = Union[np.ndarray, torch.Tensor]
TensorOrArray = Union[np.ndarray, "torch.Tensor"]


class TorchGaussianKernel(GaussianKernel):
Expand Down Expand Up @@ -37,7 +38,7 @@ def cdist(x1, x2):
).add_(x1_norm).clamp_min_(1e-16)
return res

def apply_torch(self, data_1: torch.Tensor, data_2: torch.Tensor):
def apply_torch(self, data_1: "torch.Tensor", data_2: "torch.Tensor"):
distance_matrix = TorchGaussianKernel.cdist(data_1, data_2)
return torch.exp(-distance_matrix / (2. * self.sigma ** 2))

Expand Down
27 changes: 18 additions & 9 deletions deeptime/util/platform.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,20 @@
import warnings


def try_import(modname: str):
r""" Tries to import given module and returns `None` if it is not available or could not be imported for another
reason. """
try:
import importlib
return importlib.import_module(modname)
except ImportError:
pass
except BaseException as e:
warnings.warn(f"There was a problem importing {modname}, treating it as unavailable. Stacktrace: {e}",
RuntimeWarning)
return None


def module_available(modname: str) -> bool:
r"""Checks whether a module is installed and available for import by the current interpreter.
Expand All @@ -14,15 +28,7 @@ def module_available(modname: str) -> bool:
available: bool
Whether the module is available.
"""
try:
__import__(modname)
return True
except ImportError:
return False
except BaseException as e:
warnings.warn(f"There was a problem importing {modname}, treating it as unavailable. Stacktrace: {e}",
RuntimeWarning)
return False
return try_import(modname) is not None


def handle_progress_bar(progress):
Expand All @@ -46,14 +52,17 @@ def __init__(self, x=None, **_):
self.n = 0

def __enter__(self): return self

def __exit__(self, exc_type, exc_val, exc_tb): return False

def __iter__(self):
for x in self._x:
yield x

def update(self, *_): pass

def close(self): pass

def set_description(self, *_): pass

return progress
Loading

0 comments on commit 6eab3e6

Please sign in to comment.