diff --git a/adaptive/learner/average_learner.py b/adaptive/learner/average_learner.py index ef27a2ef4..2705eb04e 100644 --- a/adaptive/learner/average_learner.py +++ b/adaptive/learner/average_learner.py @@ -1,6 +1,8 @@ from __future__ import annotations from math import sqrt +from numbers import Integral as Int +from numbers import Real from typing import Callable import cloudpickle @@ -8,7 +10,7 @@ from adaptive.learner.base_learner import BaseLearner from adaptive.notebook_integration import ensure_holoviews -from adaptive.types import Float, Int, Real +from adaptive.types import Float from adaptive.utils import ( assign_defaults, cache_latest, diff --git a/adaptive/learner/average_learner1D.py b/adaptive/learner/average_learner1D.py index ef99e50a5..aa4be9d71 100644 --- a/adaptive/learner/average_learner1D.py +++ b/adaptive/learner/average_learner1D.py @@ -5,6 +5,8 @@ from collections import defaultdict from copy import deepcopy from math import hypot +from numbers import Integral as Int +from numbers import Real from typing import Callable, DefaultDict, Iterable, List, Sequence, Tuple import numpy as np @@ -14,7 +16,6 @@ from adaptive.learner.learner1D import Learner1D, _get_intervals from adaptive.notebook_integration import ensure_holoviews -from adaptive.types import Int, Real from adaptive.utils import assign_defaults, partial_function_from_dataframe try: @@ -576,10 +577,10 @@ def tell_many_at_point(self, x: Real, seed_y_mapping: dict[int, Real]) -> None: self._update_interpolated_loss_in_interval(*interval) self._oldscale = deepcopy(self._scale) - def _get_data(self) -> dict[Real, Real]: + def _get_data(self) -> dict[Real, dict[Int, Real]]: return self._data_samples - def _set_data(self, data: dict[Real, Real]) -> None: + def _set_data(self, data: dict[Real, dict[Int, Real]]) -> None: if data: for x, samples in data.items(): self.tell_many_at_point(x, samples) diff --git a/adaptive/learner/balancing_learner.py b/adaptive/learner/balancing_learner.py index 0215b3af6..593331792 100644 --- a/adaptive/learner/balancing_learner.py +++ b/adaptive/learner/balancing_learner.py @@ -1,11 +1,13 @@ from __future__ import annotations import itertools +import numbers from collections import defaultdict from collections.abc import Iterable from contextlib import suppress from functools import partial from operator import itemgetter +from typing import Any, Callable, Dict, Sequence, Tuple, Union import numpy as np @@ -13,20 +15,33 @@ from adaptive.notebook_integration import ensure_holoviews from adaptive.utils import cache_latest, named_product, restore +try: + from typing import Literal, TypeAlias +except ImportError: + from typing_extensions import Literal, TypeAlias + try: import pandas with_pandas = True - except ModuleNotFoundError: with_pandas = False -def dispatch(child_functions, arg): +def dispatch(child_functions: list[Callable], arg: Any) -> Any: index, x = arg return child_functions[index](x) +STRATEGY_TYPE: TypeAlias = Literal["loss_improvements", "loss", "npoints", "cycle"] + +CDIMS_TYPE: TypeAlias = Union[ + Sequence[Dict[str, Any]], + Tuple[Sequence[str], Sequence[Tuple[Any, ...]]], + None, +] + + class BalancingLearner(BaseLearner): r"""Choose the optimal points from a set of learners. @@ -78,13 +93,19 @@ class BalancingLearner(BaseLearner): behave in an undefined way. Change the `strategy` in that case. """ - def __init__(self, learners, *, cdims=None, strategy="loss_improvements"): + def __init__( + self, + learners: list[BaseLearner], + *, + cdims: CDIMS_TYPE = None, + strategy: STRATEGY_TYPE = "loss_improvements", + ) -> None: self.learners = learners # Naively we would make 'function' a method, but this causes problems # when using executors from 'concurrent.futures' because we have to # pickle the whole learner. - self.function = partial(dispatch, [l.function for l in self.learners]) + self.function = partial(dispatch, [l.function for l in self.learners]) # type: ignore self._ask_cache = {} self._loss = {} @@ -96,7 +117,7 @@ def __init__(self, learners, *, cdims=None, strategy="loss_improvements"): "A BalacingLearner can handle only one type" " of learners." ) - self.strategy = strategy + self.strategy: STRATEGY_TYPE = strategy def new(self) -> BalancingLearner: """Create a new `BalancingLearner` with the same parameters.""" @@ -107,21 +128,21 @@ def new(self) -> BalancingLearner: ) @property - def data(self): + def data(self) -> dict[tuple[int, Any], Any]: data = {} for i, l in enumerate(self.learners): data.update({(i, p): v for p, v in l.data.items()}) return data @property - def pending_points(self): + def pending_points(self) -> set[tuple[int, Any]]: pending_points = set() for i, l in enumerate(self.learners): pending_points.update({(i, p) for p in l.pending_points}) return pending_points @property - def npoints(self): + def npoints(self) -> int: return sum(l.npoints for l in self.learners) @property @@ -134,7 +155,7 @@ def nsamples(self): ) @property - def strategy(self): + def strategy(self) -> STRATEGY_TYPE: """Can be either 'loss_improvements' (default), 'loss', 'npoints', or 'cycle'. The points that the `BalancingLearner` choses can be either based on: the best 'loss_improvements', the smallest total 'loss' of @@ -145,7 +166,7 @@ def strategy(self): return self._strategy @strategy.setter - def strategy(self, strategy): + def strategy(self, strategy: STRATEGY_TYPE) -> None: self._strategy = strategy if strategy == "loss_improvements": self._ask_and_tell = self._ask_and_tell_based_on_loss_improvements @@ -162,7 +183,9 @@ def strategy(self, strategy): ' strategy="npoints", or strategy="cycle" is implemented.' ) - def _ask_and_tell_based_on_loss_improvements(self, n): + def _ask_and_tell_based_on_loss_improvements( + self, n: int + ) -> tuple[list[tuple[int, Any]], list[float]]: selected = [] # tuples ((learner_index, point), loss_improvement) total_points = [l.npoints + len(l.pending_points) for l in self.learners] for _ in range(n): @@ -185,7 +208,9 @@ def _ask_and_tell_based_on_loss_improvements(self, n): points, loss_improvements = map(list, zip(*selected)) return points, loss_improvements - def _ask_and_tell_based_on_loss(self, n): + def _ask_and_tell_based_on_loss( + self, n: int + ) -> tuple[list[tuple[int, Any]], list[float]]: selected = [] # tuples ((learner_index, point), loss_improvement) total_points = [l.npoints + len(l.pending_points) for l in self.learners] for _ in range(n): @@ -206,7 +231,9 @@ def _ask_and_tell_based_on_loss(self, n): points, loss_improvements = map(list, zip(*selected)) return points, loss_improvements - def _ask_and_tell_based_on_npoints(self, n): + def _ask_and_tell_based_on_npoints( + self, n: numbers.Integral + ) -> tuple[list[tuple[numbers.Integral, Any]], list[float]]: selected = [] # tuples ((learner_index, point), loss_improvement) total_points = [l.npoints + len(l.pending_points) for l in self.learners] for _ in range(n): @@ -222,7 +249,9 @@ def _ask_and_tell_based_on_npoints(self, n): points, loss_improvements = map(list, zip(*selected)) return points, loss_improvements - def _ask_and_tell_based_on_cycle(self, n): + def _ask_and_tell_based_on_cycle( + self, n: int + ) -> tuple[list[tuple[numbers.Integral, Any]], list[float]]: points, loss_improvements = [], [] for _ in range(n): index = next(self._cycle) @@ -233,7 +262,9 @@ def _ask_and_tell_based_on_cycle(self, n): return points, loss_improvements - def ask(self, n, tell_pending=True): + def ask( + self, n: int, tell_pending: bool = True + ) -> tuple[list[tuple[numbers.Integral, Any]], list[float]]: """Chose points for learners.""" if n == 0: return [], [] @@ -244,20 +275,20 @@ def ask(self, n, tell_pending=True): else: return self._ask_and_tell(n) - def tell(self, x, y): + def tell(self, x: tuple[numbers.Integral, Any], y: Any) -> None: index, x = x self._ask_cache.pop(index, None) self._loss.pop(index, None) self._pending_loss.pop(index, None) self.learners[index].tell(x, y) - def tell_pending(self, x): + def tell_pending(self, x: tuple[numbers.Integral, Any]) -> None: index, x = x self._ask_cache.pop(index, None) self._loss.pop(index, None) self.learners[index].tell_pending(x) - def _losses(self, real=True): + def _losses(self, real: bool = True) -> list[float]: losses = [] loss_dict = self._loss if real else self._pending_loss @@ -269,11 +300,16 @@ def _losses(self, real=True): return losses @cache_latest - def loss(self, real=True): + def loss(self, real: bool = True) -> float: losses = self._losses(real) return max(losses) - def plot(self, cdims=None, plotter=None, dynamic=True): + def plot( + self, + cdims: CDIMS_TYPE = None, + plotter: Callable[[BaseLearner], Any] | None = None, + dynamic: bool = True, + ): """Returns a DynamicMap with sliders. Parameters @@ -346,13 +382,19 @@ def plot_function(*args): vals = {d.name: d.values for d in dm.dimensions() if d.values} return hv.HoloMap(dm.select(**vals)) - def remove_unfinished(self): + def remove_unfinished(self) -> None: """Remove uncomputed data from the learners.""" for learner in self.learners: learner.remove_unfinished() @classmethod - def from_product(cls, f, learner_type, learner_kwargs, combos): + def from_product( + cls, + f, + learner_type: BaseLearner, + learner_kwargs: dict[str, Any], + combos: dict[str, Sequence[Any]], + ) -> BalancingLearner: """Create a `BalancingLearner` with learners of all combinations of named variables’ values. The `cdims` will be set correctly, so calling `learner.plot` will be a `holoviews.core.HoloMap` with the correct labels. @@ -448,7 +490,11 @@ def load_dataframe( for i, gr in df.groupby(index_name): self.learners[i].load_dataframe(gr, **kwargs) - def save(self, fname, compress=True): + def save( + self, + fname: Callable[[BaseLearner], str] | Sequence[str], + compress: bool = True, + ) -> None: """Save the data of the child learners into pickle files in a directory. @@ -486,7 +532,11 @@ def save(self, fname, compress=True): for l in self.learners: l.save(fname(l), compress=compress) - def load(self, fname, compress=True): + def load( + self, + fname: Callable[[BaseLearner], str] | Sequence[str], + compress: bool = True, + ) -> None: """Load the data of the child learners from pickle files in a directory. @@ -510,20 +560,20 @@ def load(self, fname, compress=True): for l in self.learners: l.load(fname(l), compress=compress) - def _get_data(self): + def _get_data(self) -> list[Any]: return [l._get_data() for l in self.learners] - def _set_data(self, data): + def _set_data(self, data: list[Any]): for l, _data in zip(self.learners, data): l._set_data(_data) - def __getstate__(self): + def __getstate__(self) -> tuple[list[BaseLearner], CDIMS_TYPE, STRATEGY_TYPE]: return ( self.learners, self._cdims_default, self.strategy, ) - def __setstate__(self, state): + def __setstate__(self, state: tuple[list[BaseLearner], CDIMS_TYPE, STRATEGY_TYPE]): learners, cdims, strategy = state self.__init__(learners, cdims=cdims, strategy=strategy) diff --git a/adaptive/learner/data_saver.py b/adaptive/learner/data_saver.py index 8da281d7f..074718e9b 100644 --- a/adaptive/learner/data_saver.py +++ b/adaptive/learner/data_saver.py @@ -2,6 +2,7 @@ import functools from collections import OrderedDict +from typing import Any, Callable from adaptive.learner.base_learner import BaseLearner from adaptive.utils import copy_docstring_from @@ -39,7 +40,7 @@ class DataSaver: >>> learner = DataSaver(_learner, arg_picker=itemgetter('y')) """ - def __init__(self, learner, arg_picker): + def __init__(self, learner: BaseLearner, arg_picker: Callable) -> None: self.learner = learner self.extra_data = OrderedDict() self.function = learner.function @@ -49,21 +50,21 @@ def new(self) -> DataSaver: """Return a new `DataSaver` with the same `arg_picker` and `learner`.""" return DataSaver(self.learner.new(), self.arg_picker) - def __getattr__(self, attr): + def __getattr__(self, attr: str) -> Any: return getattr(self.learner, attr) @copy_docstring_from(BaseLearner.tell) - def tell(self, x, result): + def tell(self, x: Any, result: Any) -> None: y = self.arg_picker(result) self.extra_data[x] = result self.learner.tell(x, y) @copy_docstring_from(BaseLearner.tell_pending) - def tell_pending(self, x): + def tell_pending(self, x: Any) -> None: self.learner.tell_pending(x) def to_dataframe( - self, extra_data_name: str = "extra_data", **kwargs + self, extra_data_name: str = "extra_data", **kwargs: Any ) -> pandas.DataFrame: """Return the data as a concatenated `pandas.DataFrame` from child learners. @@ -98,7 +99,7 @@ def load_dataframe( extra_data_name: str = "extra_data", input_names: tuple[str] = (), **kwargs, - ): + ) -> None: """Load the data from a `pandas.DataFrame` into the learner. Parameters @@ -122,33 +123,36 @@ def load_dataframe( key = _to_key(x[:-1]) self.extra_data[key] = x[-1] - def _get_data(self): + def _get_data(self) -> tuple[Any, OrderedDict]: return self.learner._get_data(), self.extra_data - def _set_data(self, data): + def _set_data( + self, + data: tuple[Any, OrderedDict], + ) -> None: learner_data, self.extra_data = data self.learner._set_data(learner_data) - def __getstate__(self): + def __getstate__(self) -> tuple[BaseLearner, Callable, OrderedDict]: return ( self.learner, self.arg_picker, self.extra_data, ) - def __setstate__(self, state): + def __setstate__(self, state: tuple[BaseLearner, Callable, OrderedDict]) -> None: learner, arg_picker, extra_data = state self.__init__(learner, arg_picker) self.extra_data = extra_data @copy_docstring_from(BaseLearner.save) - def save(self, fname, compress=True): + def save(self, fname, compress=True) -> None: # We copy this method because the 'DataSaver' is not a # subclass of the 'BaseLearner'. BaseLearner.save(self, fname, compress) @copy_docstring_from(BaseLearner.load) - def load(self, fname, compress=True): + def load(self, fname, compress=True) -> None: # We copy this method because the 'DataSaver' is not a # subclass of the 'BaseLearner'. BaseLearner.load(self, fname, compress) diff --git a/adaptive/learner/integrator_coeffs.py b/adaptive/learner/integrator_coeffs.py index 9ccc54be1..711f30b76 100644 --- a/adaptive/learner/integrator_coeffs.py +++ b/adaptive/learner/integrator_coeffs.py @@ -1,4 +1,5 @@ # Based on an adaptive quadrature algorithm by Pedro Gonnet +from __future__ import annotations from collections import defaultdict from fractions import Fraction @@ -8,7 +9,7 @@ import scipy.linalg -def legendre(n): +def legendre(n: int) -> list[list[Fraction]]: """Return the first n Legendre polynomials. The polynomials have *standard* normalization, i.e. @@ -29,7 +30,7 @@ def legendre(n): return result -def newton(n): +def newton(n: int) -> np.ndarray: """Compute the monomial coefficients of the Newton polynomial over the nodes of the n-point Clenshaw-Curtis quadrature rule. """ @@ -86,7 +87,7 @@ def newton(n): return cf -def scalar_product(a, b): +def scalar_product(a: list[Fraction], b: list[Fraction]) -> Fraction: """Compute the polynomial scalar product int_-1^1 dx a(x) b(x). The args must be sequences of polynomial coefficients. This @@ -107,7 +108,7 @@ def scalar_product(a, b): return 2 * sum(c[i] / (i + 1) for i in range(0, lc, 2)) -def calc_bdef(ns): +def calc_bdef(ns: tuple[int, int, int, int]) -> list[np.ndarray]: """Calculate the decompositions of Newton polynomials (over the nodes of the n-point Clenshaw-Curtis quadrature rule) in terms of Legandre polynomials. @@ -133,7 +134,7 @@ def calc_bdef(ns): return result -def calc_V(x, n): +def calc_V(x: np.ndarray, n: int) -> np.ndarray: V = [np.ones(x.shape), x.copy()] for i in range(2, n): V.append((2 * i - 1) / i * x * V[-1] - (i - 1) / i * V[-2]) diff --git a/adaptive/learner/integrator_learner.py b/adaptive/learner/integrator_learner.py index da9bd7ffc..f0712778d 100644 --- a/adaptive/learner/integrator_learner.py +++ b/adaptive/learner/integrator_learner.py @@ -5,6 +5,7 @@ from collections import defaultdict from math import sqrt from operator import attrgetter +from typing import TYPE_CHECKING, Callable import cloudpickle import numpy as np @@ -25,7 +26,7 @@ with_pandas = False -def _downdate(c, nans, depth): +def _downdate(c: np.ndarray, nans: list[int], depth: int) -> np.ndarray: # This is algorithm 5 from the thesis of Pedro Gonnet. b = coeff.b_def[depth].copy() m = coeff.ns[depth] - 1 @@ -45,7 +46,7 @@ def _downdate(c, nans, depth): return c -def _zero_nans(fx): +def _zero_nans(fx: np.ndarray) -> list[int]: """Caution: this function modifies fx.""" nans = [] for i in range(len(fx)): @@ -55,7 +56,7 @@ def _zero_nans(fx): return nans -def _calc_coeffs(fx, depth): +def _calc_coeffs(fx: np.ndarray, depth: int) -> np.ndarray: """Caution: this function modifies fx.""" nans = _zero_nans(fx) c_new = coeff.V_inv[depth] @ fx @@ -135,19 +136,24 @@ class _Interval: "removed", ] - def __init__(self, a, b, depth, rdepth): - self.children = [] - self.data = {} + def __init__(self, a: int | float, b: int | float, depth: int, rdepth: int) -> None: + self.children: list[_Interval] = [] + self.data: dict[float, float] = {} self.a = a self.b = b self.depth = depth self.rdepth = rdepth - self.done_leaves = set() - self.depth_complete = None + self.done_leaves: set[_Interval] = set() + self.depth_complete: int | None = None self.removed = False + if TYPE_CHECKING: + self.ndiv: int + self.parent: _Interval | None + self.err: float + self.c: np.ndarray @classmethod - def make_first(cls, a, b, depth=2): + def make_first(cls, a: int, b: int, depth: int = 2) -> _Interval: ival = _Interval(a, b, depth, rdepth=1) ival.ndiv = 0 ival.parent = None @@ -155,7 +161,7 @@ def make_first(cls, a, b, depth=2): return ival @property - def T(self): + def T(self) -> np.ndarray: """Get the correct shift matrix. Should only be called on children of a split interval. @@ -166,24 +172,24 @@ def T(self): assert left != right return coeff.T_left if left else coeff.T_right - def refinement_complete(self, depth): + def refinement_complete(self, depth: int) -> bool: """The interval has all the y-values to calculate the intergral.""" if len(self.data) < coeff.ns[depth]: return False return all(p in self.data for p in self.points(depth)) - def points(self, depth=None): + def points(self, depth: int | None = None) -> np.ndarray: if depth is None: depth = self.depth a = self.a b = self.b return (a + b) / 2 + (b - a) * coeff.xi[depth] / 2 - def refine(self): + def refine(self) -> _Interval: self.depth += 1 return self - def split(self): + def split(self) -> list[_Interval]: points = self.points() m = points[len(points) // 2] ivals = [ @@ -198,10 +204,10 @@ def split(self): return ivals - def calc_igral(self): + def calc_igral(self) -> None: self.igral = (self.b - self.a) * self.c[0] / sqrt(2) - def update_heuristic_err(self, value): + def update_heuristic_err(self, value: float) -> None: """Sets the error of an interval using a heuristic (half the error of the parent) when the actual error cannot be calculated due to its parents not being finished yet. This error is propagated down to its @@ -214,7 +220,7 @@ def update_heuristic_err(self, value): continue child.update_heuristic_err(value / 2) - def calc_err(self, c_old): + def calc_err(self, c_old: np.ndarray) -> float: c_new = self.c c_diff = np.zeros(max(len(c_old), len(c_new))) c_diff[: len(c_old)] = c_old @@ -226,9 +232,9 @@ def calc_err(self, c_old): child.update_heuristic_err(self.err / 2) return c_diff - def calc_ndiv(self): + def calc_ndiv(self) -> None: div = self.parent.c00 and self.c00 / self.parent.c00 > 2 - self.ndiv += div + self.ndiv += int(div) if self.ndiv > coeff.ndiv_max and 2 * self.ndiv > self.rdepth: raise DivergentIntegralError @@ -237,7 +243,7 @@ def calc_ndiv(self): for child in self.children: child.update_ndiv_recursively() - def update_ndiv_recursively(self): + def update_ndiv_recursively(self) -> None: self.ndiv += 1 if self.ndiv > coeff.ndiv_max and 2 * self.ndiv > self.rdepth: raise DivergentIntegralError @@ -245,7 +251,7 @@ def update_ndiv_recursively(self): for child in self.children: child.update_ndiv_recursively() - def complete_process(self, depth): + def complete_process(self, depth: int) -> tuple[bool, bool] | tuple[bool, np.bool_]: """Calculate the integral contribution and error from this interval, and update the done leaves of all ancestor intervals.""" assert self.depth_complete is None or self.depth_complete == depth - 1 @@ -322,7 +328,7 @@ def complete_process(self, depth): return force_split, remove - def __repr__(self): + def __repr__(self) -> str: lst = [ f"(a, b)=({self.a:.5f}, {self.b:.5f})", f"depth={self.depth}", @@ -334,7 +340,7 @@ def __repr__(self): class IntegratorLearner(BaseLearner): - def __init__(self, function, bounds, tol): + def __init__(self, function: Callable, bounds: tuple[int, int], tol: float) -> None: """ Parameters ---------- @@ -368,16 +374,18 @@ def __init__(self, function, bounds, tol): plot : hv.Scatter Plots all the points that are evaluated. """ - self.function = function + self.function = function # type: ignore self.bounds = bounds self.tol = tol self.max_ivals = 1000 - self.priority_split = [] + self.priority_split: list[_Interval] = [] self.data = {} self.pending_points = set() - self._stack = [] - self.x_mapping = defaultdict(lambda: SortedSet([], key=attrgetter("rdepth"))) - self.ivals = set() + self._stack: list[float] = [] + self.x_mapping: dict[float, SortedSet] = defaultdict( + lambda: SortedSet([], key=attrgetter("rdepth")) + ) + self.ivals: set[_Interval] = set() ival = _Interval.make_first(*self.bounds) self.add_ival(ival) self.first_ival = ival @@ -387,10 +395,10 @@ def new(self) -> IntegratorLearner: return IntegratorLearner(self.function, self.bounds, self.tol) @property - def approximating_intervals(self): + def approximating_intervals(self) -> set[_Interval]: return self.first_ival.done_leaves - def tell(self, point, value): + def tell(self, point: float, value: float) -> None: if point not in self.x_mapping: raise ValueError(f"Point {point} doesn't belong to any interval") self.data[point] = value @@ -426,7 +434,7 @@ def tell(self, point, value): def tell_pending(self): pass - def propagate_removed(self, ival): + def propagate_removed(self, ival: _Interval) -> None: def _propagate_removed_down(ival): ival.removed = True self.ivals.discard(ival) @@ -436,7 +444,7 @@ def _propagate_removed_down(ival): _propagate_removed_down(ival) - def add_ival(self, ival): + def add_ival(self, ival: _Interval) -> None: for x in ival.points(): # Update the mappings self.x_mapping[x].add(ival) @@ -447,7 +455,7 @@ def add_ival(self, ival): self._stack.append(x) self.ivals.add(ival) - def ask(self, n, tell_pending=True): + def ask(self, n: int, tell_pending: bool = True) -> tuple[list[float], list[float]]: """Choose points for learners.""" if not tell_pending: with restore(self): @@ -455,7 +463,7 @@ def ask(self, n, tell_pending=True): else: return self._ask_and_tell_pending(n) - def _ask_and_tell_pending(self, n): + def _ask_and_tell_pending(self, n: int) -> tuple[list[float], list[float]]: points, loss_improvements = self.pop_from_stack(n) n_left = n - len(points) while n_left > 0: @@ -471,7 +479,7 @@ def _ask_and_tell_pending(self, n): return points, loss_improvements - def pop_from_stack(self, n): + def pop_from_stack(self, n: int) -> tuple[list[float], list[float]]: points = self._stack[:n] self._stack = self._stack[n:] loss_improvements = [ @@ -482,7 +490,7 @@ def pop_from_stack(self, n): def remove_unfinished(self): pass - def _fill_stack(self): + def _fill_stack(self) -> list[float]: # XXX: to-do if all the ivals have err=inf, take the interval # with the lowest rdepth and no children. force_split = bool(self.priority_split) @@ -518,16 +526,16 @@ def _fill_stack(self): return self._stack @property - def npoints(self): + def npoints(self) -> int: """Number of evaluated points.""" return len(self.data) @property - def igral(self): + def igral(self) -> float: return sum(i.igral for i in self.approximating_intervals) @property - def err(self): + def err(self) -> float: if self.approximating_intervals: err = sum(i.err for i in self.approximating_intervals) if err > sys.float_info.max: diff --git a/adaptive/learner/learner1D.py b/adaptive/learner/learner1D.py index 3af731e29..9987da468 100644 --- a/adaptive/learner/learner1D.py +++ b/adaptive/learner/learner1D.py @@ -4,6 +4,8 @@ import itertools import math from copy import copy, deepcopy +from numbers import Integral as Int +from numbers import Real from typing import Any, Callable, Dict, List, Sequence, Tuple, Union import cloudpickle @@ -15,7 +17,7 @@ from adaptive.learner.learnerND import volume from adaptive.learner.triangulation import simplex_volume_in_embedding from adaptive.notebook_integration import ensure_holoviews -from adaptive.types import Float, Int, Real +from adaptive.types import Float from adaptive.utils import ( assign_defaults, cache_latest, diff --git a/adaptive/learner/learner2D.py b/adaptive/learner/learner2D.py index 6c5be845f..385f5b7d5 100644 --- a/adaptive/learner/learner2D.py +++ b/adaptive/learner/learner2D.py @@ -5,14 +5,17 @@ from collections import OrderedDict from copy import copy from math import sqrt +from typing import Callable, Iterable import cloudpickle import numpy as np from scipy import interpolate +from scipy.interpolate.interpnd import LinearNDInterpolator from adaptive.learner.base_learner import BaseLearner from adaptive.learner.triangulation import simplex_volume_in_embedding from adaptive.notebook_integration import ensure_holoviews +from adaptive.types import Bool, Float, Real from adaptive.utils import ( assign_defaults, cache_latest, @@ -30,7 +33,7 @@ # Learner2D and helper functions. -def deviations(ip): +def deviations(ip: LinearNDInterpolator) -> list[np.ndarray]: """Returns the deviation of the linear estimate. Is useful when defining custom loss functions. @@ -68,7 +71,7 @@ def deviation(p, v, g): return devs -def areas(ip): +def areas(ip: LinearNDInterpolator) -> np.ndarray: """Returns the area per triangle of the triangulation inside a `LinearNDInterpolator` instance. @@ -89,7 +92,7 @@ def areas(ip): return areas -def uniform_loss(ip): +def uniform_loss(ip: LinearNDInterpolator) -> np.ndarray: """Loss function that samples the domain uniformly. Works with `~adaptive.Learner2D` only. @@ -120,7 +123,9 @@ def uniform_loss(ip): return np.sqrt(areas(ip)) -def resolution_loss_function(min_distance=0, max_distance=1): +def resolution_loss_function( + min_distance: float = 0, max_distance: float = 1 +) -> Callable[[LinearNDInterpolator], np.ndarray]: """Loss function that is similar to the `default_loss` function, but you can set the maximimum and minimum size of a triangle. @@ -159,7 +164,7 @@ def resolution_loss(ip): return resolution_loss -def minimize_triangle_surface_loss(ip): +def minimize_triangle_surface_loss(ip: LinearNDInterpolator) -> np.ndarray: """Loss function that is similar to the distance loss function in the `~adaptive.Learner1D`. The loss is the area spanned by the 3D vectors of the vertices. @@ -205,7 +210,7 @@ def _get_vectors(points): return np.linalg.norm(np.cross(a, b) / 2, axis=1) -def default_loss(ip): +def default_loss(ip: LinearNDInterpolator) -> np.ndarray: """Loss function that combines `deviations` and `areas` of the triangles. Works with `~adaptive.Learner2D` only. @@ -225,7 +230,7 @@ def default_loss(ip): return losses -def choose_point_in_triangle(triangle, max_badness): +def choose_point_in_triangle(triangle: np.ndarray, max_badness: int) -> np.ndarray: """Choose a new point in inside a triangle. If the ratio of the longest edge of the triangle squared @@ -364,7 +369,12 @@ class Learner2D(BaseLearner): over each triangle. """ - def __init__(self, function, bounds, loss_per_triangle=None): + def __init__( + self, + function: Callable, + bounds: tuple[tuple[Real, Real], tuple[Real, Real]], + loss_per_triangle: Callable | None = None, + ) -> None: self.ndim = len(bounds) self._vdim = None self.loss_per_triangle = loss_per_triangle or default_loss @@ -379,7 +389,7 @@ def __init__(self, function, bounds, loss_per_triangle=None): self._bounds_points = list(itertools.product(*bounds)) self._stack.update({p: np.inf for p in self._bounds_points}) - self.function = function + self.function = function # type: ignore self._ip = self._ip_combined = None self.stack_size = 10 @@ -388,7 +398,7 @@ def new(self) -> Learner2D: return Learner2D(self.function, self.bounds, self.loss_per_triangle) @property - def xy_scale(self): + def xy_scale(self) -> np.ndarray: xy_scale = self._xy_scale if self.aspect_ratio == 1: return xy_scale @@ -486,21 +496,21 @@ def load_dataframe( self.function, df, function_prefix ) - def _scale(self, points): + def _scale(self, points: list[tuple[float, float]] | np.ndarray) -> np.ndarray: points = np.asarray(points, dtype=float) return (points - self.xy_mean) / self.xy_scale - def _unscale(self, points): + def _unscale(self, points: np.ndarray) -> np.ndarray: points = np.asarray(points, dtype=float) return points * self.xy_scale + self.xy_mean @property - def npoints(self): + def npoints(self) -> int: """Number of evaluated points.""" return len(self.data) @property - def vdim(self): + def vdim(self) -> int: """Length of the output of ``learner.function``. If the output is unsized (when it's a scalar) then `vdim = 1`. @@ -516,12 +526,14 @@ def vdim(self): return self._vdim or 1 @property - def bounds_are_done(self): + def bounds_are_done(self) -> bool: return not any( (p in self.pending_points or p in self._stack) for p in self._bounds_points ) - def interpolated_on_grid(self, n=None): + def interpolated_on_grid( + self, n: int = None + ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """Get the interpolated data on a grid. Parameters @@ -553,7 +565,7 @@ def interpolated_on_grid(self, n=None): xs, ys = self._unscale(np.vstack([xs, ys]).T).T return xs, ys, zs - def _data_in_bounds(self): + def _data_in_bounds(self) -> tuple[np.ndarray, np.ndarray]: if self.data: points = np.array(list(self.data.keys())) values = np.array(list(self.data.values()), dtype=float) @@ -562,7 +574,7 @@ def _data_in_bounds(self): return points[inds], values[inds].reshape(-1, self.vdim) return np.zeros((0, 2)), np.zeros((0, self.vdim), dtype=float) - def _data_interp(self): + def _data_interp(self) -> tuple[np.ndarray | list[tuple[float, float]], np.ndarray]: if self.pending_points: points = list(self.pending_points) if self.bounds_are_done: @@ -575,7 +587,7 @@ def _data_interp(self): return points, values return np.zeros((0, 2)), np.zeros((0, self.vdim), dtype=float) - def _data_combined(self): + def _data_combined(self) -> tuple[np.ndarray, np.ndarray]: points, values = self._data_in_bounds() if not self.pending_points: return points, values @@ -584,7 +596,7 @@ def _data_combined(self): values_combined = np.vstack([values, values_interp]) return points_combined, values_combined - def ip(self): + def ip(self) -> LinearNDInterpolator: """Deprecated, use `self.interpolator(scaled=True)`""" warnings.warn( "`learner.ip()` is deprecated, use `learner.interpolator(scaled=True)`." @@ -593,7 +605,7 @@ def ip(self): ) return self.interpolator(scaled=True) - def interpolator(self, *, scaled=False): + def interpolator(self, *, scaled: bool = False) -> LinearNDInterpolator: """A `scipy.interpolate.LinearNDInterpolator` instance containing the learner's data. @@ -624,7 +636,7 @@ def interpolator(self, *, scaled=False): points, values = self._data_in_bounds() return interpolate.LinearNDInterpolator(points, values) - def _interpolator_combined(self): + def _interpolator_combined(self) -> LinearNDInterpolator: """A `scipy.interpolate.LinearNDInterpolator` instance containing the learner's data *and* interpolated data of the `pending_points`.""" @@ -634,12 +646,12 @@ def _interpolator_combined(self): self._ip_combined = interpolate.LinearNDInterpolator(points, values) return self._ip_combined - def inside_bounds(self, xy): + def inside_bounds(self, xy: tuple[float, float]) -> Bool: x, y = xy (xmin, xmax), (ymin, ymax) = self.bounds return xmin <= x <= xmax and ymin <= y <= ymax - def tell(self, point, value): + def tell(self, point: tuple[float, float], value: float | Iterable[float]) -> None: point = tuple(point) self.data[point] = value if not self.inside_bounds(point): @@ -648,7 +660,7 @@ def tell(self, point, value): self._ip = None self._stack.pop(point, None) - def tell_pending(self, point): + def tell_pending(self, point: tuple[float, float]) -> None: point = tuple(point) if not self.inside_bounds(point): return @@ -656,7 +668,9 @@ def tell_pending(self, point): self._ip_combined = None self._stack.pop(point, None) - def _fill_stack(self, stack_till=1): + def _fill_stack( + self, stack_till: int = 1 + ) -> tuple[list[tuple[float, float]], list[float]]: if len(self.data) + len(self.pending_points) < self.ndim + 1: raise ValueError("too few points...") @@ -695,7 +709,9 @@ def _fill_stack(self, stack_till=1): return points_new, losses_new - def ask(self, n, tell_pending=True): + def ask( + self, n: int, tell_pending: bool = True + ) -> tuple[list[tuple[float, float] | np.ndarray], list[float]]: # Even if tell_pending is False we add the point such that _fill_stack # will return new points, later we remove these points if needed. points = list(self._stack.keys()) @@ -726,14 +742,14 @@ def ask(self, n, tell_pending=True): return points[:n], loss_improvements[:n] @cache_latest - def loss(self, real=True): + def loss(self, real: bool = True) -> float: if not self.bounds_are_done: return np.inf ip = self.interpolator(scaled=True) if real else self._interpolator_combined() losses = self.loss_per_triangle(ip) return losses.max() - def remove_unfinished(self): + def remove_unfinished(self) -> None: self.pending_points = set() for p in self._bounds_points: if p not in self.data: @@ -807,10 +823,10 @@ def plot(self, n=None, tri_alpha=0): return im.opts(style=im_opts) * tris.opts(style=tri_opts, **no_hover) - def _get_data(self): + def _get_data(self) -> dict[tuple[float, float], Float | np.ndarray]: return self.data - def _set_data(self, data): + def _set_data(self, data: dict[tuple[float, float], Float | np.ndarray]) -> None: self.data = data # Remove points from stack if they already exist for point in copy(self._stack): diff --git a/adaptive/learner/sequence_learner.py b/adaptive/learner/sequence_learner.py index b01e2cd14..2b3515801 100644 --- a/adaptive/learner/sequence_learner.py +++ b/adaptive/learner/sequence_learner.py @@ -1,6 +1,8 @@ from __future__ import annotations from copy import copy +from numbers import Integral as Int +from typing import Any, Tuple import cloudpickle from sortedcontainers import SortedDict, SortedSet @@ -16,6 +18,14 @@ except ModuleNotFoundError: with_pandas = False +try: + from typing import TypeAlias +except ImportError: + from typing_extensions import TypeAlias + + +PointType: TypeAlias = Tuple[Int, Any] + class _IgnoreFirstArgument: """Remove the first argument from the call signature. @@ -30,7 +40,7 @@ class _IgnoreFirstArgument: def __init__(self, function): self.function = function - def __call__(self, index_point, *args, **kwargs): + def __call__(self, index_point: PointType, *args, **kwargs): index, point = index_point return self.function(point, *args, **kwargs) @@ -71,7 +81,9 @@ class SequenceLearner(BaseLearner): def __init__(self, function, sequence): self._original_function = function self.function = _IgnoreFirstArgument(function) - self._to_do_indices = SortedSet({i for i, _ in enumerate(sequence)}) + # prefer range(len(...)) over enumerate to avoid slowdowns + # when passing lazy sequences + self._to_do_indices = SortedSet(range(len(sequence))) self._ntotal = len(sequence) self.sequence = copy(sequence) self.data = SortedDict() @@ -81,7 +93,9 @@ def new(self) -> SequenceLearner: """Return a new `~adaptive.SequenceLearner` without the data.""" return SequenceLearner(self._original_function, self.sequence) - def ask(self, n, tell_pending=True): + def ask( + self, n: int, tell_pending: bool = True + ) -> tuple[list[PointType], list[float]]: indices = [] points = [] loss_improvements = [] @@ -99,40 +113,40 @@ def ask(self, n, tell_pending=True): return points, loss_improvements - def loss(self, real=True): + def loss(self, real: bool = True) -> float: if not (self._to_do_indices or self.pending_points): - return 0 + return 0.0 else: npoints = self.npoints + (0 if real else len(self.pending_points)) return (self._ntotal - npoints) / self._ntotal - def remove_unfinished(self): + def remove_unfinished(self) -> None: for i in self.pending_points: self._to_do_indices.add(i) self.pending_points = set() - def tell(self, point, value): + def tell(self, point: PointType, value: Any) -> None: index, point = point self.data[index] = value self.pending_points.discard(index) self._to_do_indices.discard(index) - def tell_pending(self, point): + def tell_pending(self, point: PointType) -> None: index, point = point self.pending_points.add(index) self._to_do_indices.discard(index) - def done(self): + def done(self) -> bool: return not self._to_do_indices and not self.pending_points - def result(self): + def result(self) -> list[Any]: """Get the function values in the same order as ``sequence``.""" if not self.done(): raise Exception("Learner is not yet complete.") return list(self.data.values()) @property - def npoints(self): + def npoints(self) -> int: return len(self.data) def to_dataframe( @@ -213,16 +227,18 @@ def load_dataframe( y_name : str, optional The ``y_name`` used in ``to_dataframe``, by default "y" """ - self.tell_many(df[[index_name, x_name]].values, df[y_name].values) + indices = df[index_name].values + xs = df[x_name].values + self.tell_many(zip(indices, xs), df[y_name].values) if with_default_function_args: self.function = partial_function_from_dataframe( self._original_function, df, function_prefix ) - def _get_data(self): + def _get_data(self) -> dict[int, Any]: return self.data - def _set_data(self, data): + def _set_data(self, data: dict[int, Any]) -> None: if data: indices, values = zip(*data.items()) # the points aren't used by tell, so we can safely pass None diff --git a/adaptive/notebook_integration.py b/adaptive/notebook_integration.py index 426c04541..60329110e 100644 --- a/adaptive/notebook_integration.py +++ b/adaptive/notebook_integration.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import asyncio import datetime import importlib @@ -76,7 +78,7 @@ def ensure_plotly(): raise RuntimeError("plotly is not installed; plotting is disabled.") -def in_ipynb(): +def in_ipynb() -> bool: try: # If we are running in IPython, then `get_ipython()` is always a global return get_ipython().__class__.__name__ == "ZMQInteractiveShell" diff --git a/adaptive/tests/algorithm_4.py b/adaptive/tests/algorithm_4.py index fb401e866..4566c0fa1 100644 --- a/adaptive/tests/algorithm_4.py +++ b/adaptive/tests/algorithm_4.py @@ -2,7 +2,8 @@ # Copyright 2017 Christoph Groth from collections import defaultdict -from fractions import Fraction as Frac +from fractions import Fraction +from typing import Callable, List, Tuple, Union import numpy as np from numpy.testing import assert_allclose @@ -11,7 +12,7 @@ eps = np.spacing(1) -def legendre(n): +def legendre(n: int) -> List[List[Fraction]]: """Return the first n Legendre polynomials. The polynomials have *standard* normalization, i.e. @@ -19,12 +20,12 @@ def legendre(n): The return value is a list of list of fraction.Fraction instances. """ - result = [[Frac(1)], [Frac(0), Frac(1)]] + result = [[Fraction(1)], [Fraction(0), Fraction(1)]] if n <= 2: return result[:n] for i in range(2, n): # Use Bonnet's recursion formula. - new = (i + 1) * [Frac(0)] + new = (i + 1) * [Fraction(0)] new[1:] = (r * (2 * i - 1) for r in result[-1]) new[:-2] = (n - r * (i - 1) for n, r in zip(new[:-2], result[-2])) new[:] = (n / i for n in new) @@ -32,7 +33,7 @@ def legendre(n): return result -def newton(n): +def newton(n: int) -> np.ndarray: """Compute the monomial coefficients of the Newton polynomial over the nodes of the n-point Clenshaw-Curtis quadrature rule. """ @@ -89,7 +90,7 @@ def newton(n): return cf -def scalar_product(a, b): +def scalar_product(a: List[Fraction], b: List[Fraction]) -> Fraction: """Compute the polynomial scalar product int_-1^1 dx a(x) b(x). The args must be sequences of polynomial coefficients. This @@ -110,7 +111,7 @@ def scalar_product(a, b): return 2 * sum(c[i] / (i + 1) for i in range(0, lc, 2)) -def calc_bdef(ns): +def calc_bdef(ns: Tuple[int, int, int, int]) -> List[np.ndarray]: """Calculate the decompositions of Newton polynomials (over the nodes of the n-point Clenshaw-Curtis quadrature rule) in terms of Legandre polynomials. @@ -123,7 +124,7 @@ def calc_bdef(ns): result = [] for n in ns: poly = [] - a = list(map(Frac, newton(n))) + a = list(map(Fraction, newton(n))) for b in legs[: n + 1]: igral = scalar_product(a, b) @@ -145,7 +146,7 @@ def calc_bdef(ns): b_def = calc_bdef(n) -def calc_V(xi, n): +def calc_V(xi: np.ndarray, n: int) -> np.ndarray: V = [np.ones(xi.shape), xi.copy()] for i in range(2, n): V.append((2 * i - 1) / i * xi * V[-1] - (i - 1) / i * V[-2]) @@ -183,7 +184,7 @@ def calc_V(xi, n): gamma = np.concatenate([[0, 0], np.sqrt(k[2:] ** 2 / (4 * k[2:] ** 2 - 1))]) -def _downdate(c, nans, depth): +def _downdate(c: np.ndarray, nans: List[int], depth: int) -> None: # This is algorithm 5 from the thesis of Pedro Gonnet. b = b_def[depth].copy() m = n[depth] - 1 @@ -200,7 +201,7 @@ def _downdate(c, nans, depth): m -= 1 -def _zero_nans(fx): +def _zero_nans(fx: np.ndarray) -> List[int]: nans = [] for i in range(len(fx)): if not np.isfinite(fx[i]): @@ -209,7 +210,7 @@ def _zero_nans(fx): return nans -def _calc_coeffs(fx, depth): +def _calc_coeffs(fx: np.ndarray, depth: int) -> np.ndarray: """Caution: this function modifies fx.""" nans = _zero_nans(fx) c_new = V_inv[depth] @ fx @@ -220,7 +221,7 @@ def _calc_coeffs(fx, depth): class DivergentIntegralError(ValueError): - def __init__(self, msg, igral, err, nr_points): + def __init__(self, msg: str, igral: float, err: None, nr_points: int) -> None: self.igral = igral self.err = err self.nr_points = nr_points @@ -230,19 +231,23 @@ def __init__(self, msg, igral, err, nr_points): class _Interval: __slots__ = ["a", "b", "c", "fx", "igral", "err", "depth", "rdepth", "ndiv", "c00"] - def __init__(self, a, b, depth, rdepth): + def __init__( + self, a: Union[int, float], b: Union[int, float], depth: int, rdepth: int + ) -> None: self.a = a self.b = b self.depth = depth self.rdepth = rdepth - def points(self): + def points(self) -> np.ndarray: a = self.a b = self.b return (a + b) / 2 + (b - a) * xi[self.depth] / 2 @classmethod - def make_first(cls, f, a, b, depth=2): + def make_first( + cls, f: Callable, a: int, b: int, depth: int = 2 + ) -> Tuple["_Interval", int]: ival = _Interval(a, b, depth, 1) fx = f(ival.points()) ival.c = _calc_coeffs(fx, depth) @@ -251,7 +256,7 @@ def make_first(cls, f, a, b, depth=2): ival.ndiv = 0 return ival, n[depth] - def calc_igral_and_err(self, c_old): + def calc_igral_and_err(self, c_old: np.ndarray) -> float: self.c = c_new = _calc_coeffs(self.fx, self.depth) c_diff = np.zeros(max(len(c_old), len(c_new))) c_diff[: len(c_old)] = c_old @@ -262,7 +267,9 @@ def calc_igral_and_err(self, c_old): self.err = w * c_diff return c_diff - def split(self, f): + def split( + self, f: Callable + ) -> Union[Tuple[Tuple[float, float, float], int], Tuple[List["_Interval"], int]]: m = (self.a + self.b) / 2 f_center = self.fx[(len(self.fx) - 1) // 2] @@ -287,7 +294,7 @@ def split(self, f): return ivals, nr_points - def refine(self, f): + def refine(self, f: Callable) -> Tuple[np.ndarray, bool, int]: """Increase degree of interval.""" self.depth = depth = self.depth + 1 points = self.points() @@ -299,7 +306,9 @@ def refine(self, f): return points, split, n[depth] - n[depth - 1] -def algorithm_4(f, a, b, tol, N_loops=int(1e9)): +def algorithm_4( + f: Callable, a: int, b: int, tol: float, N_loops: int = int(1e9) +) -> Tuple[float, float, int, List["_Interval"]]: """ALGORITHM_4 evaluates an integral using adaptive quadrature. The algorithm uses Clenshaw-Curtis quadrature rules of increasing degree in each interval and bisects the interval if either the @@ -403,10 +412,10 @@ def algorithm_4(f, a, b, tol, N_loops=int(1e9)): return igral, err, nr_points, ivals -################ Tests ################ +# ############### Tests ################ -def f0(x): +def f0(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]: return x * np.sin(1 / x) * np.sqrt(abs(1 - x)) @@ -414,18 +423,20 @@ def f7(x): return x**-0.5 -def f24(x): +def f24(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]: return np.floor(np.exp(x)) -def f21(x): +def f21(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]: y = 0 for i in range(1, 4): y += 1 / np.cosh(20**i * (x - 2 * i / 10)) return y -def f63(x, alpha, beta): +def f63( + x: Union[float, np.ndarray], alpha: float, beta: float +) -> Union[float, np.ndarray]: return abs(x - beta) ** alpha @@ -433,7 +444,7 @@ def F63(x, alpha, beta): return (x - beta) * abs(x - beta) ** alpha / (alpha + 1) -def fdiv(x): +def fdiv(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]: return abs(x - 0.987654321) ** -1.1 @@ -461,7 +472,9 @@ def test_scalar_product(n=33): selection = [0, 5, 7, n - 1] for i in selection: for j in selection: - assert scalar_product(legs[i], legs[j]) == ((i == j) and Frac(2, 2 * i + 1)) + assert scalar_product(legs[i], legs[j]) == ( + (i == j) and Fraction(2, 2 * i + 1) + ) def simple_newton(n): diff --git a/adaptive/tests/test_average_learner1d.py b/adaptive/tests/test_average_learner1d.py index 4286f55b9..8b2670d77 100644 --- a/adaptive/tests/test_average_learner1d.py +++ b/adaptive/tests/test_average_learner1d.py @@ -1,6 +1,6 @@ +from itertools import chain + import numpy as np -import pandas as pd -from pandas.testing import assert_series_equal from adaptive import AverageLearner1D from adaptive.tests.test_learners import ( @@ -11,13 +11,27 @@ def almost_equal_dicts(a, b): - assert_series_equal(pd.Series(sorted(a.items())), pd.Series(sorted(b.items()))) + assert a.keys() == b.keys() + for k, v1 in a.items(): + v2 = b[k] + if ( + v1 is None + or v2 is None + or isinstance(v1, (tuple, list)) + and any(x is None for x in chain(v1, v2)) + ): + assert v1 == v2 + else: + try: + np.testing.assert_almost_equal(v1, v2) + except TypeError: + raise AssertionError(f"{v1} != {v2}") def test_tell_many_at_point(): f = generate_random_parametrization(noisy_peak) learner = AverageLearner1D(f, bounds=(-2, 2)) - control = AverageLearner1D(f, bounds=(-2, 2)) + control = learner.new() learner._recompute_losses_factor = 1 control._recompute_losses_factor = 1 simple_run(learner, 100) diff --git a/adaptive/tests/test_learner1d.py b/adaptive/tests/test_learner1d.py index 59a8b81a8..7e990bd7b 100644 --- a/adaptive/tests/test_learner1d.py +++ b/adaptive/tests/test_learner1d.py @@ -277,8 +277,8 @@ def f_vec(x, offset=0.123214): def assert_equal_dicts(d1, d2): xs1, ys1 = zip(*sorted(d1.items())) xs2, ys2 = zip(*sorted(d2.items())) - ys1 = np.array(ys1, dtype=np.float) - ys2 = np.array(ys2, dtype=np.float) + ys1 = np.array(ys1, dtype=np.float64) + ys2 = np.array(ys2, dtype=np.float64) np.testing.assert_almost_equal(xs1, xs2) np.testing.assert_almost_equal(ys1, ys2) diff --git a/adaptive/tests/test_learners.py b/adaptive/tests/test_learners.py index 8c616e6bb..d393511fb 100644 --- a/adaptive/tests/test_learners.py +++ b/adaptive/tests/test_learners.py @@ -12,7 +12,6 @@ import flaky import numpy as np -import pandas import pytest import scipy.spatial @@ -28,6 +27,7 @@ LearnerND, SequenceLearner, ) +from adaptive.learner.learner1D import with_pandas from adaptive.runner import simple try: @@ -708,6 +708,7 @@ def wrapper(*args, **kwargs): return wrapper +@pytest.mark.skipif(not with_pandas, reason="pandas is not installed") @run_with( Learner1D, Learner2D, @@ -719,6 +720,8 @@ def wrapper(*args, **kwargs): with_all_loss_functions=False, ) def test_to_dataframe(learner_type, f, learner_kwargs): + import pandas + if learner_type is LearnerND: kw = {"point_names": tuple("xyz")[: len(learner_kwargs["bounds"])]} else: diff --git a/adaptive/tests/test_pickling.py b/adaptive/tests/test_pickling.py index b8c7445a7..c0d515320 100644 --- a/adaptive/tests/test_pickling.py +++ b/adaptive/tests/test_pickling.py @@ -62,7 +62,7 @@ def balancing_learner(f, learner_type, learner_kwargs): learners_pairs = [ (Learner1D, dict(bounds=(-1, 1))), - (Learner2D, dict(bounds=[(-1, 1), (-1, 1)])), + (Learner2D, dict(bounds=((-1, 1), (-1, 1)))), (SequenceLearner, dict(sequence=list(range(100)))), (IntegratorLearner, dict(bounds=(0, 1), tol=1e-3)), (AverageLearner, dict(atol=0.1)), diff --git a/adaptive/tests/test_runner.py b/adaptive/tests/test_runner.py index 2cdfc085f..169c38431 100644 --- a/adaptive/tests/test_runner.py +++ b/adaptive/tests/test_runner.py @@ -57,7 +57,7 @@ def test_nonconforming_output(runner): def f(x): return [0] - runner(Learner2D(f, [(-1, 1), (-1, 1)]), trivial_goal) + runner(Learner2D(f, ((-1, 1), (-1, 1))), trivial_goal) def test_aync_def_function(): diff --git a/adaptive/types.py b/adaptive/types.py index e2d57a44f..a49b332a6 100644 --- a/adaptive/types.py +++ b/adaptive/types.py @@ -1,3 +1,5 @@ +from numbers import Integral as Int +from numbers import Real from typing import Union import numpy as np @@ -9,5 +11,7 @@ from typing_extensions import TypeAlias Float: TypeAlias = Union[float, np.float_] -Int: TypeAlias = Union[int, np.int_] -Real: TypeAlias = Union[Float, Int] +Bool: TypeAlias = Union[bool, np.bool_] + + +__all__ = ["Float", "Bool", "Int", "Real"] diff --git a/adaptive/utils.py b/adaptive/utils.py index a98af12a1..465883188 100644 --- a/adaptive/utils.py +++ b/adaptive/utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import abc import functools import gzip @@ -5,20 +7,21 @@ import os import pickle import warnings -from contextlib import contextmanager +from contextlib import _GeneratorContextManager, contextmanager from itertools import product +from typing import Any, Callable, Mapping, Sequence import cloudpickle -def named_product(**items): +def named_product(**items: Mapping[str, Sequence[Any]]): names = items.keys() vals = items.values() return [dict(zip(names, res)) for res in product(*vals)] @contextmanager -def restore(*learners): +def restore(*learners) -> _GeneratorContextManager: states = [learner.__getstate__() for learner in learners] try: yield @@ -27,7 +30,7 @@ def restore(*learners): learner.__setstate__(state) -def cache_latest(f): +def cache_latest(f: Callable) -> Callable: """Cache the latest return value of the function and add it as 'self._cache[f.__name__]'.""" @@ -42,7 +45,7 @@ def wrapper(*args, **kwargs): return wrapper -def save(fname, data, compress=True): +def save(fname: str, data: Any, compress: bool = True) -> bool: fname = os.path.expanduser(fname) dirname = os.path.dirname(fname) if dirname: @@ -71,14 +74,14 @@ def save(fname, data, compress=True): return True -def load(fname, compress=True): +def load(fname: str, compress: bool = True) -> Any: fname = os.path.expanduser(fname) _open = gzip.open if compress else open with _open(fname, "rb") as f: return cloudpickle.load(f) -def copy_docstring_from(other): +def copy_docstring_from(other: Callable) -> Callable: def decorator(method): return functools.wraps(other)(method) diff --git a/setup.py b/setup.py index 21c3191d8..08a7948f5 100644 --- a/setup.py +++ b/setup.py @@ -42,6 +42,7 @@ def get_version_and_cmdclass(package_name): "holoviews>=1.9.1", "ipywidgets", "bokeh", + "pandas", "matplotlib", "plotly", ], @@ -52,7 +53,6 @@ def get_version_and_cmdclass(package_name): "pytest-randomly", "pytest-timeout", "pre_commit", - "pandas", "typeguard", ], "other": [