Skip to content

Commit

Permalink
Merge pull request #18 from Vivswan/master
Browse files Browse the repository at this point in the history
v1.0.0 final
  • Loading branch information
Vivswan authored Mar 7, 2023
2 parents 34f0a20 + 2f9f3bf commit 8ed52ea
Show file tree
Hide file tree
Showing 47 changed files with 249 additions and 179 deletions.
1 change: 1 addition & 0 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ extend-ignore =
D100, # Missing docstring in public module
D104, # Missing docstring in public package
D202, # No blank lines allowed after function docstring
D210, # No whitespaces allowed surrounding docstring text
D401, # First line should be in imperative mood
R504, # unnecessary variable assignment before return statement
R505, # unnecessary else after return statement
Expand Down
3 changes: 0 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,6 @@ __pycache__/
*.py[cod]
*$py.class

_run_files/
_crc_slurm/
_results/
_data/
.idea
.idea/**
Expand Down
3 changes: 2 additions & 1 deletion analogvnn/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
"""AnalogVNN: A fully modular framework for modeling and optimizing analog/photonic neural networks."""

import sys

if sys.version_info[:2] >= (3, 8):
from importlib import metadata
else:
import importlib_metadata as metadata # pragma: no cover


__package__ = 'analogvnn'
__author__ = 'Vivswan Shah (vivswanshah@pitt.edu)'

Expand Down
25 changes: 12 additions & 13 deletions analogvnn/backward/BackwardFunction.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from __future__ import annotations

from abc import ABC
from typing import Callable

from torch import nn, Tensor

from analogvnn.backward.BackwardModule import BackwardModule
from analogvnn.utils.common_types import TENSORS
from analogvnn.utils.common_types import TENSORS, TENSOR_CALLABLE

__all__ = ['BackwardFunction']

Expand All @@ -15,47 +14,47 @@ class BackwardFunction(BackwardModule, ABC):
"""The backward module that uses a function to compute the backward gradient.
Attributes:
_backward_function (Callable): The function used to compute the backward gradient.
_backward_function (TENSOR_CALLABLE): The function used to compute the backward gradient.
"""

_backward_function: Callable
_backward_function: TENSOR_CALLABLE

def __init__(self, backward_function: Callable, layer: nn.Module = None):
def __init__(self, backward_function: TENSOR_CALLABLE, layer: nn.Module = None):
"""Initializes the backward module.
Args:
backward_function (Callable): The function used to compute the backward gradient.
backward_function (TENSOR_CALLABLE): The function used to compute the backward gradient.
layer (nn.Module): The layer that this backward module is associated with.
"""

super(BackwardFunction, self).__init__(layer)
super().__init__(layer)
self._backward_function = backward_function

@property
def backward_function(self) -> Callable:
def backward_function(self) -> TENSOR_CALLABLE:
"""The function used to compute the backward gradient.
Returns:
Callable: The function used to compute the backward gradient.
TENSOR_CALLABLE: The function used to compute the backward gradient.
"""

return self._backward_function

@backward_function.setter
def backward_function(self, backward_function: Callable):
def backward_function(self, backward_function: TENSOR_CALLABLE):
"""Sets the function used to compute the backward gradient with.
Args:
backward_function (Callable): The function used to compute the backward gradient with.
backward_function (TENSOR_CALLABLE): The function used to compute the backward gradient with.
"""

self.set_backward_function(backward_function)

def set_backward_function(self, backward_function: Callable) -> BackwardFunction:
def set_backward_function(self, backward_function: TENSOR_CALLABLE) -> BackwardFunction:
"""Sets the function used to compute the backward gradient with.
Args:
backward_function (Callable): The function used to compute the backward gradient with.
backward_function (TENSOR_CALLABLE): The function used to compute the backward gradient with.
Returns:
BackwardFunction: self.
Expand Down
8 changes: 4 additions & 4 deletions analogvnn/backward/BackwardModule.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import annotations

import abc
from typing import Callable, Any, Optional, Sequence, Tuple, Type
from typing import Callable, Any, Optional, Tuple, Type

import torch
from torch import nn, Tensor, autograd
Expand Down Expand Up @@ -69,7 +69,7 @@ def backward(ctx: Any, *grad_outputs: Tensor) -> Tuple[None, None, TENSORS]:
backward_module: BackwardModule = ctx.backward_module
results = backward_module._call_impl_backward(*grad_outputs)

if isinstance(results, Sequence):
if isinstance(results, (tuple, list)):
return (None, None, *results)

return None, None, results
Expand All @@ -81,7 +81,7 @@ def __init__(self, layer: nn.Module = None):
layer (nn.Module): The layer for which the backward gradient is computed.
"""

super(BackwardModule, self).__init__()
super().__init__()
self._layer = None
self._set_autograd_backward()
if not isinstance(self, nn.Module):
Expand Down Expand Up @@ -285,4 +285,4 @@ def __getattr__(self, name: str) -> Any:
return super(BackwardModule, self).__getattr__(name)
if not str(name).startswith('__') and self._layer is not None and hasattr(self._layer, name):
return getattr(self._layer, name)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name))
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
1 change: 1 addition & 0 deletions analogvnn/fn/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Additional functions for analogvnn."""
13 changes: 6 additions & 7 deletions analogvnn/fn/dirac_delta.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,18 @@

from analogvnn.utils.common_types import TENSOR_OPERABLE

__all__ = ['dirac_delta']
__all__ = ['gaussian_dirac_delta']


def dirac_delta(x: TENSOR_OPERABLE, a: TENSOR_OPERABLE = 0.001) -> TENSOR_OPERABLE:
"""`dirac_delta` takes `x` and returns the Dirac delta function of `x` with standard deviation of `a`.
def gaussian_dirac_delta(x: TENSOR_OPERABLE, std: TENSOR_OPERABLE = 0.001) -> TENSOR_OPERABLE:
"""Gaussian Dirac Delta function with standard deviation `std`.
Args:
x (TENSOR_OPERABLE): Tensor
a (TENSOR_OPERABLE): standard deviation.
std (TENSOR_OPERABLE): standard deviation.
Returns:
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, but with values equal to the Dirac delta function
of x.
TENSOR_OPERABLE: TENSOR_OPERABLE with the same shape as x, with values of the Gaussian Dirac Delta function.
"""

return 1 / (np.abs(a) * np.sqrt(np.pi)) * np.exp(-((x / a) ** 2))
return 1 / (np.abs(std) * np.sqrt(np.pi)) * np.exp(-((x / std) ** 2))
2 changes: 1 addition & 1 deletion analogvnn/graph/AccumulateGrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def __init__(self, module: Union[nn.Module, Callable]):
module (Union[nn.Module, Callable]): Module from which to accumulate gradients.
"""

super(AccumulateGrad, self).__init__()
super().__init__()
self.input_output_connections = {}
self.module = module

Expand Down
5 changes: 4 additions & 1 deletion analogvnn/graph/AcyclicDirectedGraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(self, graph_state: ModelGraphState = None):
NotImplementedError: If allow_loops is True, since this is not implemented yet.
"""

super(AcyclicDirectedGraph, self).__init__()
super().__init__()
self.graph = nx.MultiDiGraph()
self.graph_state = graph_state
self._is_static = False
Expand Down Expand Up @@ -448,3 +448,6 @@ def render(self, *args, real_label: bool = False, **kwargs) -> str:
"""

return to_graphviz_digraph(self.graph, real_label=real_label).render(*args, **kwargs)

save = render
"""Alias for render."""
6 changes: 3 additions & 3 deletions analogvnn/graph/ArgsKwargs.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(self, args=None, kwargs=None):
kwargs: The keyword arguments.
"""

super(ArgsKwargs, self).__init__()
super().__init__()
if args is None:
args = []
if kwargs is None:
Expand All @@ -48,7 +48,7 @@ def __init__(self, args=None, kwargs=None):
if isinstance(args, tuple):
args = list(args)

if not isinstance(args, List):
if not isinstance(args, list):
args = [args]

self.args = args
Expand Down Expand Up @@ -79,7 +79,7 @@ def to_args_kwargs_object(cls, outputs: ArgsKwargsInput) -> ArgsKwargs:
pass
elif isinstance(outputs, dict):
outputs = cls(kwargs=outputs)
elif isinstance(outputs, tuple) and len(outputs) == 2 and isinstance(outputs[1], dict):
elif isinstance(outputs, (tuple, list)) and len(outputs) == 2 and isinstance(outputs[1], dict):
outputs = cls(args=outputs[0], kwargs=outputs[1])
else:
outputs = cls(args=outputs)
Expand Down
4 changes: 2 additions & 2 deletions analogvnn/graph/ForwardGraph.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from typing import Dict, Sequence
from typing import Dict

import torch
from torch import Tensor
Expand Down Expand Up @@ -69,7 +69,7 @@ def calculate(
ArgsKwargsOutput: Output of the graph
"""

if not isinstance(inputs, Sequence):
if not isinstance(inputs, (tuple, list)):
inputs = (inputs,)

if not self.graph_state.use_autograd_graph and is_training:
Expand Down
2 changes: 1 addition & 1 deletion analogvnn/graph/ModelGraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __init__(self, use_autograd_graph: bool = False, allow_loops: bool = False):
allow_loops: If True, the graph is allowed to contain loops.
"""

super(ModelGraph, self).__init__(use_autograd_graph, allow_loops)
super().__init__(use_autograd_graph, allow_loops)
self.forward_graph = ForwardGraph(self)
self.backward_graph = BackwardGraph(self)

Expand Down
2 changes: 1 addition & 1 deletion analogvnn/graph/ModelGraphState.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def __init__(self, use_autograd_graph: bool = False, allow_loops=False):
allow_loops: If True, the graph is allowed to contain loops.
"""

super(ModelGraphState, self).__init__()
super().__init__()
self.allow_loops = allow_loops
self.use_autograd_graph = use_autograd_graph

Expand Down
8 changes: 1 addition & 7 deletions analogvnn/graph/to_graph_viz_digraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,14 @@ def to_graphviz_digraph(from_graph: networkx.DiGraph, real_label: bool = False)
graphviz.Digraph: the converted graph.
Raises:
ImportError: if pygraphviz (https://pygraphviz.github.io/) is not available.
ImportError: if graphviz (https://pygraphviz.github.io/) is not available.
"""

try:
# noinspection PyPackageRequirements
import pygraphviz # noqa: F401
except ImportError as e:
raise ImportError('requires pygraphviz: https://pygraphviz.github.io/') from e
try:
from graphviz import Digraph
except ImportError as e:
raise ImportError('requires graphviz: https://pygraphviz.github.io/') from e

strict = networkx.number_of_selfloops(from_graph) == 0 and not from_graph.is_multigraph()
node_attr = {
'style': 'filled',
Expand All @@ -50,7 +45,6 @@ def to_graphviz_digraph(from_graph: networkx.DiGraph, real_label: bool = False)
name=from_graph.name,
strict=strict,
node_attr=node_attr,
graph_attr={'size': '12,12'},
format='svg'
)

Expand Down
7 changes: 3 additions & 4 deletions analogvnn/nn/Linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,20 +31,19 @@ def forward(self, x: Tensor):

return y

def backward(self, grad_output: Optional[Tensor], weight: Optional[Tensor] = None) -> Optional[Tensor]:
def backward(self, grad_output: Optional[Tensor]) -> Optional[Tensor]:
"""Backward pass of the linear layer.
Args:
grad_output (Optional[Tensor]): The gradient of the output.
weight (Optional[Tensor]): The weight of the layer.
Returns:
Optional[Tensor]: The gradient of the input.
"""

grad_output = to_matrix(grad_output)

weight = to_matrix(self.weight if weight is None else weight)
weight = to_matrix(self.weight)
grad_input = grad_output @ weight

self.set_grad_of(self.weight, torch.mm(grad_output.t(), self.inputs))
Expand Down Expand Up @@ -77,7 +76,7 @@ def __init__(self, in_features: int, out_features: int, bias: bool = True):
bias (bool): True if the layer has a bias.
"""

super(Linear, self).__init__()
super().__init__()
self.in_features = in_features
self.out_features = out_features

Expand Down
4 changes: 2 additions & 2 deletions analogvnn/nn/activation/ELU.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __init__(self, alpha: float = 1.0507, scale_factor: float = 1.):
scale_factor (float): the scale factor parameter.
"""

super(SELU, self).__init__()
super().__init__()
self.alpha = nn.Parameter(torch.tensor(alpha), requires_grad=False)
self.scale_factor = nn.Parameter(torch.tensor(scale_factor), requires_grad=False)

Expand Down Expand Up @@ -103,4 +103,4 @@ def __init__(self, alpha: float = 1.0507):
alpha (float): the alpha parameter.
"""

super(ELU, self).__init__(alpha=alpha, scale_factor=1.)
super().__init__(alpha=alpha, scale_factor=1.)
2 changes: 1 addition & 1 deletion analogvnn/nn/activation/Identity.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(self, name=None):
name (str): the name of the activation function.
"""

super(Identity, self).__init__()
super().__init__()
self.name = name

def extra_repr(self) -> str:
Expand Down
6 changes: 3 additions & 3 deletions analogvnn/nn/activation/ReLU.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __init__(self, alpha: float):
alpha (float): the slope of the negative part of the activation function.
"""

super(PReLU, self).__init__()
super().__init__()
self.alpha = nn.Parameter(torch.tensor(alpha), requires_grad=False)
self._zero = nn.Parameter(torch.tensor(0), requires_grad=False)

Expand Down Expand Up @@ -95,7 +95,7 @@ class ReLU(PReLU):
def __init__(self):
"""Initialize the rectified linear unit (ReLU) activation function."""

super(ReLU, self).__init__(alpha=0)
super().__init__(alpha=0)

@staticmethod
def initialise(tensor: Tensor) -> Tensor:
Expand Down Expand Up @@ -134,4 +134,4 @@ class LeakyReLU(PReLU):
def __init__(self):
"""Initialize the leaky rectified linear unit (LeakyReLU) activation function."""

super(LeakyReLU, self).__init__(alpha=0.01)
super().__init__(alpha=0.01)
4 changes: 2 additions & 2 deletions analogvnn/nn/module/Layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ class Layer(nn.Module):

def __init__(self):
"""Initializes the layer."""
super(Layer, self).__init__()
super().__init__()
self._inputs = None
self._outputs = None
self._backward_module = None
Expand All @@ -95,7 +95,7 @@ def __call__(self, *inputs, **kwargs):
"""

self._forward_wrapper(self.forward)
outputs = super(Layer, self).__call__(*inputs, **kwargs)
outputs = super().__call__(*inputs, **kwargs)
if self.training:
self._inputs = ArgsKwargs(args=inputs, kwargs=kwargs)
self._outputs = outputs
Expand Down
Loading

0 comments on commit 8ed52ea

Please sign in to comment.