Skip to content

Commit

Permalink
[pre-commit.ci] pre-commit autoupdate (#1332)
Browse files Browse the repository at this point in the history
* [pre-commit.ci] pre-commit autoupdate

updates:
- [github.com/psf/black-pre-commit-mirror: 23.12.1 → 24.1.1](psf/black-pre-commit-mirror@23.12.1...24.1.1)

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Fabian Hoppe <112093564+mrfh92@users.noreply.github.com>
Co-authored-by: Michael Tarnawa <m.tarnawa@fz-juelich.de>
  • Loading branch information
3 people authored Feb 5, 2024
1 parent b7d5a8f commit 3b03a1d
Show file tree
Hide file tree
Showing 33 changed files with 92 additions and 66 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ repos:
- id: check-added-large-files
- id: check-toml
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 23.12.1
rev: 24.1.1
hooks:
- id: black
- repo: https://github.com/PyCQA/flake8
Expand Down
1 change: 1 addition & 0 deletions heat/classification/kneighborsclassifier.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Implements the k-nearest neighbors (kNN) classifier"""

from typing import Callable

import heat as ht
Expand Down
1 change: 1 addition & 0 deletions heat/cluster/kmeans.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Module Implementing the Kmeans Algorithm
"""

from typing import Optional, Union, TypeVar

import heat as ht
Expand Down
1 change: 1 addition & 0 deletions heat/cluster/kmedians.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Module Implementing the Kmedians Algorithm
"""

import heat as ht
from heat.cluster._kcluster import _KCluster
from heat.core.dndarray import DNDarray
Expand Down
1 change: 1 addition & 0 deletions heat/cluster/kmedoids.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Module Implementing the Kmedoids Algorithm
"""

import heat as ht
from heat.cluster._kcluster import _KCluster
from heat.core.dndarray import DNDarray
Expand Down
1 change: 1 addition & 0 deletions heat/core/communication.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Module implementing the communication layer of HeAT
"""

from __future__ import annotations

import numpy as np
Expand Down
1 change: 1 addition & 0 deletions heat/core/dndarray.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Provides HeAT's core data structure, the DNDarray, a distributed n-dimensional array"""

from __future__ import annotations

import math
Expand Down
14 changes: 8 additions & 6 deletions heat/core/factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,9 +318,11 @@ def array(
try:
obj = torch.tensor(
obj,
device=device.torch_device
if device is not None
else devices.get_device().torch_device,
device=(
device.torch_device
if device is not None
else devices.get_device().torch_device
),
)
except RuntimeError:
raise TypeError(f"invalid data of type {type(obj)}")
Expand All @@ -341,9 +343,9 @@ def array(
try:
obj = torch.as_tensor(
obj,
device=device.torch_device
if device is not None
else devices.get_device().torch_device,
device=(
device.torch_device if device is not None else devices.get_device().torch_device
),
)
except RuntimeError:
raise TypeError(f"invalid data of type {type(obj)}")
Expand Down
1 change: 1 addition & 0 deletions heat/core/io.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Enables parallel I/O with data on disk."""

from __future__ import annotations

import os.path
Expand Down
1 change: 1 addition & 0 deletions heat/core/linalg/basics.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Basic linear algebra operations on distributed ``DNDarray``
"""

import itertools
import numpy as np
import torch
Expand Down
1 change: 1 addition & 0 deletions heat/core/linalg/qr.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
QR decomposition of (distributed) 2-D ``DNDarray``s.
"""

import collections
import torch
from typing import Type, Callable, Dict, Any, TypeVar, Union, Tuple
Expand Down
1 change: 1 addition & 0 deletions heat/core/linalg/solver.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Collection of solvers for systems of linear equations.
"""

import heat as ht
from ..dndarray import DNDarray
from ..sanitation import sanitize_out
Expand Down
1 change: 1 addition & 0 deletions heat/core/linalg/svd.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
file for future "full" SVD implementation
"""

from typing import Tuple
from ..dndarray import DNDarray

Expand Down
1 change: 1 addition & 0 deletions heat/core/linalg/svdtools.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
distributed hierarchical SVD
"""

import numpy as np
import collections
import torch
Expand Down
22 changes: 11 additions & 11 deletions heat/core/logical.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,10 +161,10 @@ def allclose(
return bool(_local_allclose.item())


DNDarray.allclose: Callable[
[DNDarray, DNDarray, float, float, bool], bool
] = lambda self, other, rtol=1e-05, atol=1e-08, equal_nan=False: allclose(
self, other, rtol, atol, equal_nan
DNDarray.allclose: Callable[[DNDarray, DNDarray, float, float, bool], bool] = (
lambda self, other, rtol=1e-05, atol=1e-08, equal_nan=False: allclose(
self, other, rtol, atol, equal_nan
)
)
DNDarray.allclose.__doc__ = all.__doc__

Expand Down Expand Up @@ -219,9 +219,9 @@ def local_any(t, *args, **kwargs):
)


DNDarray.any: Callable[
[DNDarray, Optional[int], Optional[DNDarray], bool], DNDarray
] = lambda self, axis=None, out=None, keepdims=False: any(self, axis, out, keepdims)
DNDarray.any: Callable[[DNDarray, Optional[int], Optional[DNDarray], bool], DNDarray] = (
lambda self, axis=None, out=None, keepdims=False: any(self, axis, out, keepdims)
)
DNDarray.any.__doc__ = any.__doc__


Expand Down Expand Up @@ -376,10 +376,10 @@ def isposinf(x: DNDarray, out: Optional[DNDarray] = None):
return _operations.__local_op(torch.isposinf, x, out, no_cast=True)


DNDarray.isclose: Callable[
[DNDarray, DNDarray, float, float, bool], DNDarray
] = lambda self, other, rtol=1e-05, atol=1e-08, equal_nan=False: isclose(
self, other, rtol, atol, equal_nan
DNDarray.isclose: Callable[[DNDarray, DNDarray, float, float, bool], DNDarray] = (
lambda self, other, rtol=1e-05, atol=1e-08, equal_nan=False: isclose(
self, other, rtol, atol, equal_nan
)
)
DNDarray.isclose.__doc__ = isclose.__doc__

Expand Down
15 changes: 8 additions & 7 deletions heat/core/manipulations.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Manipulation operations for (potentially distributed) `DNDarray`s.
"""

from __future__ import annotations

import numpy as np
Expand Down Expand Up @@ -3010,9 +3011,9 @@ def squeeze(x: DNDarray, axis: Union[int, Tuple[int, ...]] = None) -> DNDarray:
)


DNDarray.squeeze: Callable[
[DNDarray, Union[int, Tuple[int, ...]]], DNDarray
] = lambda self, axis=None: squeeze(self, axis)
DNDarray.squeeze: Callable[[DNDarray, Union[int, Tuple[int, ...]]], DNDarray] = (
lambda self, axis=None: squeeze(self, axis)
)
DNDarray.squeeze.__doc__ = squeeze.__doc__


Expand Down Expand Up @@ -3404,10 +3405,10 @@ def unique(
return return_value


DNDarray.unique: Callable[
[DNDarray, bool, bool, int], Tuple[DNDarray, torch.tensor]
] = lambda self, sorted=False, return_inverse=False, axis=None: unique(
self, sorted, return_inverse, axis
DNDarray.unique: Callable[[DNDarray, bool, bool, int], Tuple[DNDarray, torch.tensor]] = (
lambda self, sorted=False, return_inverse=False, axis=None: unique(
self, sorted, return_inverse, axis
)
)
DNDarray.unique.__doc__ = unique.__doc__

Expand Down
1 change: 1 addition & 0 deletions heat/core/random.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Provides parallel random number generators (pRNG)"""

from __future__ import annotations

import time
Expand Down
1 change: 1 addition & 0 deletions heat/core/relational.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Functions for relational oprations, i.e. equal/no equal...
"""

from __future__ import annotations

import torch
Expand Down
18 changes: 9 additions & 9 deletions heat/core/rounding.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,9 @@ def abs(
return absolute_values


DNDarray.abs: Callable[
[DNDarray, Optional[DNDarray], Optional[datatype]], DNDarray
] = lambda self, out=None, dtype=None: abs(self, out, dtype)
DNDarray.abs: Callable[[DNDarray, Optional[DNDarray], Optional[datatype]], DNDarray] = (
lambda self, out=None, dtype=None: abs(self, out, dtype)
)
DNDarray.abs.__doc__ = abs.__doc__


Expand All @@ -87,9 +87,9 @@ def absolute(
return abs(x, out, dtype)


DNDarray.absolute: Callable[
[DNDarray, Optional[DNDarray], Optional[datatype]], DNDarray
] = lambda self, out=None, dtype=None: absolute(self, out, dtype)
DNDarray.absolute: Callable[[DNDarray, Optional[DNDarray], Optional[datatype]], DNDarray] = (
lambda self, out=None, dtype=None: absolute(self, out, dtype)
)
DNDarray.absolute.__doc__ = absolute.__doc__


Expand Down Expand Up @@ -330,9 +330,9 @@ def round(
return rounded_values


DNDarray.round: Callable[
[DNDarray, int, Optional[DNDarray], Optional[datatype]], DNDarray
] = lambda self, decimals=0, out=None, dtype=None: round(self, decimals, out, dtype)
DNDarray.round: Callable[[DNDarray, int, Optional[DNDarray], Optional[datatype]], DNDarray] = (
lambda self, decimals=0, out=None, dtype=None: round(self, decimals, out, dtype)
)
DNDarray.round.__doc__ = round.__doc__


Expand Down
1 change: 1 addition & 0 deletions heat/core/sanitation.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Collection of validation/sanitation routines.
"""

from __future__ import annotations

import numpy as np
Expand Down
55 changes: 28 additions & 27 deletions heat/core/statistics.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Distributed statistical operations.
"""

import numpy as np
import torch
from typing import Any, Callable, Union, Tuple, List, Optional
Expand Down Expand Up @@ -108,9 +109,9 @@ def local_argmax(*args, **kwargs):
)


DNDarray.argmax: Callable[
[DNDarray, int, DNDarray, object], DNDarray
] = lambda self, axis=None, out=None, **kwargs: argmax(self, axis, out, **kwargs)
DNDarray.argmax: Callable[[DNDarray, int, DNDarray, object], DNDarray] = (
lambda self, axis=None, out=None, **kwargs: argmax(self, axis, out, **kwargs)
)
DNDarray.argmax.__doc__ = argmax.__doc__


Expand Down Expand Up @@ -180,9 +181,9 @@ def local_argmin(*args, **kwargs):
)


DNDarray.argmin: Callable[
[DNDarray, int, DNDarray, object], DNDarray
] = lambda self, axis=None, out=None, **kwargs: argmin(self, axis, out, **kwargs)
DNDarray.argmin: Callable[[DNDarray, int, DNDarray, object], DNDarray] = (
lambda self, axis=None, out=None, **kwargs: argmin(self, axis, out, **kwargs)
)
DNDarray.argmin.__doc__ = argmin.__doc__


Expand Down Expand Up @@ -773,9 +774,9 @@ def kurtosis(
return __moment_w_axis(__torch_kurtosis, x, axis, None, unbiased, Fischer)


DNDarray.kurtosis: Callable[
[DNDarray, int, bool, bool], DNDarray
] = lambda x, axis=None, unbiased=True, Fischer=True: kurtosis(x, axis, unbiased, Fischer)
DNDarray.kurtosis: Callable[[DNDarray, int, bool, bool], DNDarray] = (
lambda x, axis=None, unbiased=True, Fischer=True: kurtosis(x, axis, unbiased, Fischer)
)
DNDarray.kurtosis.__doc__ = average.__doc__


Expand Down Expand Up @@ -832,9 +833,9 @@ def local_max(*args, **kwargs):
)


DNDarray.max: Callable[
[DNDarray, Union[int, Tuple[int, ...]], DNDarray, bool], DNDarray
] = lambda x, axis=None, out=None, keepdims=None: max(x, axis, out, keepdims)
DNDarray.max: Callable[[DNDarray, Union[int, Tuple[int, ...]], DNDarray, bool], DNDarray] = (
lambda x, axis=None, out=None, keepdims=None: max(x, axis, out, keepdims)
)
DNDarray.max.__doc__ = max.__doc__


Expand Down Expand Up @@ -1035,9 +1036,9 @@ def median(x: DNDarray, axis: Optional[int] = None, keepdims: bool = False) -> D
return percentile(x, q=50, axis=axis, keepdims=keepdims)


DNDarray.median: Callable[
[DNDarray, int, bool], DNDarray
] = lambda x, axis=None, keepdims=False: median(x, axis, keepdims)
DNDarray.median: Callable[[DNDarray, int, bool], DNDarray] = (
lambda x, axis=None, keepdims=False: median(x, axis, keepdims)
)
DNDarray.mean.__doc__ = mean.__doc__


Expand Down Expand Up @@ -1164,9 +1165,9 @@ def local_min(*args, **kwargs):
)


DNDarray.min: Callable[
[DNDarray, Union[int, Tuple[int, ...]], DNDarray, bool], DNDarray
] = lambda self, axis=None, out=None, keepdims=None: min(self, axis, out, keepdims)
DNDarray.min: Callable[[DNDarray, Union[int, Tuple[int, ...]], DNDarray, bool], DNDarray] = (
lambda self, axis=None, out=None, keepdims=None: min(self, axis, out, keepdims)
)
DNDarray.min.__doc__ = min.__doc__


Expand Down Expand Up @@ -1709,9 +1710,9 @@ def skew(x: DNDarray, axis: int = None, unbiased: bool = True) -> DNDarray:
return __moment_w_axis(__torch_skew, x, axis, None, unbiased)


DNDarray.skew: Callable[
[DNDarray, int, bool], DNDarray
] = lambda self, axis=None, unbiased=True: skew(self, axis, unbiased)
DNDarray.skew: Callable[[DNDarray, int, bool], DNDarray] = (
lambda self, axis=None, unbiased=True: skew(self, axis, unbiased)
)
DNDarray.skew.__doc__ = skew.__doc__


Expand Down Expand Up @@ -1772,9 +1773,9 @@ def std(
return exponential.sqrt(var(x, axis, ddof, **kwargs), out=None)


DNDarray.std: Callable[
[DNDarray, Union[int, Tuple[int], List[int]], int, object], DNDarray
] = lambda self, axis=None, ddof=0, **kwargs: std(self, axis, ddof, **kwargs)
DNDarray.std: Callable[[DNDarray, Union[int, Tuple[int], List[int]], int, object], DNDarray] = (
lambda self, axis=None, ddof=0, **kwargs: std(self, axis, ddof, **kwargs)
)
DNDarray.std.__doc__ = std.__doc__


Expand Down Expand Up @@ -1988,7 +1989,7 @@ def reduce_vars_elementwise(output_shape_i: torch.Tensor) -> DNDarray:
return __moment_w_axis(torch.var, x, axis, reduce_vars_elementwise, unbiased)


DNDarray.var: Callable[
[DNDarray, Union[int, Tuple[int], List[int]], int, object], DNDarray
] = lambda self, axis=None, ddof=0, **kwargs: var(self, axis, ddof, **kwargs)
DNDarray.var: Callable[[DNDarray, Union[int, Tuple[int], List[int]], int, object], DNDarray] = (
lambda self, axis=None, ddof=0, **kwargs: var(self, axis, ddof, **kwargs)
)
DNDarray.var.__doc__ = var.__doc__
1 change: 0 additions & 1 deletion heat/core/tiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
Tiling functions/classes. With these classes, you can classes you can address blocks of data in a DNDarray
"""


from __future__ import annotations
import itertools
import torch
Expand Down
Loading

0 comments on commit 3b03a1d

Please sign in to comment.