Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@ faer-ext = {version = "0.1.0", features = ["ndarray"]}
ndarray = "0.15.6"
serde = {version = "*", features=["derive"]}
hashbrown = {version = "0.14.2", features=["nightly"]}
numpy = "*"
numpy = "*"
6 changes: 3 additions & 3 deletions docs/notebooks/benchmarks.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"metadata": {},
"outputs": [],
"source": [
"from typing import Callable\n",
"from collections.abc import Callable\n",
"\n",
"import pandas as pd\n",
"import perfplot\n",
Expand Down Expand Up @@ -113,7 +113,7 @@
" fe.autoregressive_coefficients,\n",
" tsfresh.ar_coefficient,\n",
" {\"n_lags\": 4},\n",
" {\"param\": [{\"coeff\": i, \"k\": 4}] for i in range(5)},\n",
" {f\"param_{i}\": [{\"coeff\": i, \"k\": 4}] for i in range(5)},\n",
" ),\n",
" (fe.benford_correlation, tsfresh.benford_correlation, {}, {}),\n",
" (fe.binned_entropy, tsfresh.binned_entropy, {\"bin_count\": 10}, {\"max_bins\": 10}),\n",
Expand All @@ -134,7 +134,7 @@
" fe.energy_ratios,\n",
" tsfresh.energy_ratio_by_chunks,\n",
" {\"n_chunks\": 6},\n",
" {\"param\": [{\"num_segments\": 6, \"segment_focus\": i} for i in range(6)]},\n",
" {f\"param_{i}\": [{\"num_segments\": 6, \"segment_focus\": i} for i in range(6)]},\n",
" ),\n",
" (fe.first_location_of_maximum, tsfresh.first_location_of_maximum, {}, {}),\n",
" (fe.first_location_of_minimum, tsfresh.first_location_of_minimum, {}, {}),\n",
Expand Down
43 changes: 43 additions & 0 deletions functime/_compat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
from __future__ import annotations

from collections.abc import Iterable
from pathlib import Path
from typing import Any

import polars as pl

try:
POLARS_MAJOR_VERSION = int(pl.__version__.split(".", 1)[0])
except ValueError:
POLARS_MAJOR_VERSION = 0

if POLARS_MAJOR_VERSION >= 1:
from polars.plugins import register_plugin_function

rle_fields = {"value": "value", "len": "len"}

else:

def register_plugin_function(
*,
plugin_path: Path | str,
function_name: str,
args: IntoExpr | Iterable[IntoExpr],
kwargs: dict[str, Any] | None = None,
is_elementwise: bool = False,
returns_scalar: bool = False,
cast_to_supertype: bool = False,
):
expr = args[0]
args1 = args[1:]
expr.register_plugin(
lib=plugin_path,
args=args1,
symbol=function_name,
is_elementwise=is_elementwise,
returns_scalar=returns_scalar,
kwargs=kwargs,
cast_to_supertypes=cast_to_supertype,
)

rle_fields = {"value": "values", "len": "lengths"}
26 changes: 12 additions & 14 deletions functime/_plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,9 @@
from plotly.subplots import make_subplots

if TYPE_CHECKING:
from collections.abc import Collection
from typing import (
ClassVar,
Collection,
Optional,
Tuple,
TypedDict,
)

Expand Down Expand Up @@ -103,9 +101,9 @@ def from_panel(
cls,
*,
y: pl.LazyFrame,
num_cols: Optional[int] = None,
num_series: Optional[int] = None,
seed: Optional[int] = None,
num_cols: int | None = None,
num_series: int | None = None,
seed: int | None = None,
default_title: str,
**kwargs,
):
Expand Down Expand Up @@ -173,9 +171,9 @@ def add_time_series(
self: Self,
*,
data: pl.LazyFrame,
num_points: Optional[int] = None,
name_on_hover: Optional[str] = None,
legend_group: Optional[str] = None,
num_points: int | None = None,
name_on_hover: str | None = None,
legend_group: str | None = None,
**kwargs,
) -> Self:
"""Add a time series to the subplot grid.
Expand Down Expand Up @@ -240,8 +238,8 @@ def add_traces(
entities: Collection[str],
num_cols: int,
show_legend: bool = True,
name_on_hover: Optional[str] = None,
legend_group: Optional[str] = None,
name_on_hover: str | None = None,
legend_group: str | None = None,
**kwargs,
) -> go.Figure:
"""Add scatterplot traces to a `Figure` instance.
Expand Down Expand Up @@ -300,8 +298,8 @@ def add_traces(
def get_chosen_entities(
*,
y: pl.LazyFrame,
num_series: Optional[int] = None,
seed: Optional[int] = None,
num_series: int | None = None,
seed: int | None = None,
):
"""Sample entities to plot in a subplot grid, given the data.

Expand Down Expand Up @@ -404,7 +402,7 @@ def get_subplot_grid_position(
*,
element: int,
num_cols: int,
) -> Tuple[int, int]:
) -> tuple[int, int]:
"""Get the row and column index of the subplot at the given element index.

Need to add 1 because the grid indexes in a plotly subplot are 1-based.
Expand Down
3 changes: 2 additions & 1 deletion functime/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from typing import Callable, ParamSpec, TypeVar
from collections.abc import Callable
from typing import ParamSpec, TypeVar

P = ParamSpec("P")
R = TypeVar("R")
Expand Down
15 changes: 8 additions & 7 deletions functime/backtesting.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import annotations

from typing import Any, Callable, Mapping, Optional, Tuple
from collections.abc import Callable, Mapping
from typing import Any

import numpy as np
import polars as pl
Expand All @@ -15,7 +16,7 @@ def _residualize_autoreg(
lags: int,
max_horizons: int,
artifacts: Mapping[str, Any],
X_train: Optional[pl.DataFrame] = None,
X_train: pl.DataFrame | None = None,
) -> pl.DataFrame:
y_train = y_train.lazy().collect(streaming=True)
idx_cols = y_train.columns[:2]
Expand All @@ -25,7 +26,7 @@ def _score_recursive(regressor):
X_cp_train = X_y_train.select(pl.all().exclude(target_col))
y_pred_arr = regressor.predict(X_cp_train)
# Check if censored model
if isinstance(y_pred_arr, Tuple):
if isinstance(y_pred_arr, tuple):
y_pred_arr, _ = y_pred_arr # forecast, probabilities
y_pred_cp = X_cp_train.select(idx_cols).with_columns(
pl.lit(y_pred_arr).alias("y_pred")
Expand All @@ -48,7 +49,7 @@ def _score_direct(regressors):
X_cp_train = X_y_train.select([*idx_cols, *feature_cols])
y_pred_arr = regressors[i].predict(X_cp_train)
# Check if censored model
if isinstance(y_pred_arr, Tuple):
if isinstance(y_pred_arr, tuple):
y_pred_arr, _ = y_pred_arr # forecast, probabilities
y_preds_cp.append(y_pred_arr)
# NOTE: we just naively take the mean across all direct predictions
Expand Down Expand Up @@ -88,7 +89,7 @@ def _merge_autoreg_residuals(
lags: int,
max_horizons: int,
artifacts: Mapping[str, Any],
X: Optional[pl.DataFrame] = None,
X: pl.DataFrame | None = None,
):
y_resid = _residualize_autoreg(
y_train=y,
Expand All @@ -109,9 +110,9 @@ def backtest(
fh: int,
y: pl.DataFrame,
cv: Callable[[pl.DataFrame], Mapping[int, pl.DataFrame]],
X: Optional[pl.DataFrame] = None,
X: pl.DataFrame | None = None,
residualize: bool = True,
) -> Tuple[pl.DataFrame, pl.DataFrame]:
) -> tuple[pl.DataFrame, pl.DataFrame]:
pl.enable_string_cache()
entity_col, time_col, target_col = y.columns[:3]
if X is None:
Expand Down
Loading
Loading