diff --git a/.github/workflows/ci-additional.yaml b/.github/workflows/ci-additional.yaml
index 1fc58c4f0bf..78c15fec699 100644
--- a/.github/workflows/ci-additional.yaml
+++ b/.github/workflows/ci-additional.yaml
@@ -139,7 +139,7 @@ jobs:
fail_ci_if_error: false
mypy39:
- name: Mypy 3.9
+ name: Mypy 3.10
runs-on: "ubuntu-latest"
needs: detect-ci-trigger
defaults:
@@ -147,7 +147,7 @@ jobs:
shell: bash -l {0}
env:
CONDA_ENV_FILE: ci/requirements/environment.yml
- PYTHON_VERSION: "3.9"
+ PYTHON_VERSION: "3.10"
steps:
- uses: actions/checkout@v4
@@ -254,7 +254,7 @@ jobs:
fail_ci_if_error: false
pyright39:
- name: Pyright 3.9
+ name: Pyright 3.10
runs-on: "ubuntu-latest"
needs: detect-ci-trigger
if: |
@@ -267,7 +267,7 @@ jobs:
shell: bash -l {0}
env:
CONDA_ENV_FILE: ci/requirements/environment.yml
- PYTHON_VERSION: "3.9"
+ PYTHON_VERSION: "3.10"
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 6f9ba4a440d..4698e144293 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -47,15 +47,15 @@ jobs:
matrix:
os: ["ubuntu-latest", "macos-latest", "windows-latest"]
# Bookend python versions
- python-version: ["3.9", "3.12"]
+ python-version: ["3.10", "3.12"]
env: [""]
include:
# Minimum python version:
- env: "bare-minimum"
- python-version: "3.9"
+ python-version: "3.10"
os: ubuntu-latest
- env: "min-all-deps"
- python-version: "3.9"
+ python-version: "3.10"
os: ubuntu-latest
# Latest python version:
- env: "all-but-dask"
diff --git a/ci/requirements/bare-minimum.yml b/ci/requirements/bare-minimum.yml
index 105e90ce109..0878222da35 100644
--- a/ci/requirements/bare-minimum.yml
+++ b/ci/requirements/bare-minimum.yml
@@ -3,7 +3,7 @@ channels:
- conda-forge
- nodefaults
dependencies:
- - python=3.9
+ - python=3.10
- coveralls
- pip
- pytest
diff --git a/ci/requirements/min-all-deps.yml b/ci/requirements/min-all-deps.yml
index 64f4327bbcb..ea1dc7b7fb0 100644
--- a/ci/requirements/min-all-deps.yml
+++ b/ci/requirements/min-all-deps.yml
@@ -7,7 +7,7 @@ dependencies:
# Run ci/min_deps_check.py to verify that this file respects the policy.
# When upgrading python, numpy, or pandas, must also change
# doc/user-guide/installing.rst, doc/user-guide/plotting.rst and setup.py.
- - python=3.9
+ - python=3.10
- array-api-strict=1.0 # dependency for testing the array api compat
- boto3=1.26
- bottleneck=1.3
diff --git a/doc/getting-started-guide/installing.rst b/doc/getting-started-guide/installing.rst
index 823c50f333b..4910047014e 100644
--- a/doc/getting-started-guide/installing.rst
+++ b/doc/getting-started-guide/installing.rst
@@ -6,7 +6,7 @@ Installation
Required dependencies
---------------------
-- Python (3.9 or later)
+- Python (3.10 or later)
- `numpy `__ (1.23 or later)
- `packaging `__ (23.1 or later)
- `pandas `__ (2.0 or later)
diff --git a/pyproject.toml b/pyproject.toml
index 3eafcda7670..de590559a64 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,7 +9,6 @@ classifiers = [
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
@@ -20,7 +19,7 @@ dynamic = ["version"]
license = {text = "Apache-2.0"}
name = "xarray"
readme = "README.md"
-requires-python = ">=3.9"
+requires-python = ">=3.10"
dependencies = [
"numpy>=1.23",
@@ -242,7 +241,7 @@ extend-exclude = [
"doc",
"_typed_ops.pyi",
]
-target-version = "py39"
+target-version = "py310"
[tool.ruff.lint]
# E402: module level import not at top of file
@@ -255,6 +254,7 @@ ignore = [
"E402",
"E501",
"E731",
+ "UP007"
]
select = [
"F", # Pyflakes
diff --git a/xarray/backends/api.py b/xarray/backends/api.py
index ece60a2b161..305dfb11c34 100644
--- a/xarray/backends/api.py
+++ b/xarray/backends/api.py
@@ -1,14 +1,20 @@
from __future__ import annotations
import os
-from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence
+from collections.abc import (
+ Callable,
+ Hashable,
+ Iterable,
+ Mapping,
+ MutableMapping,
+ Sequence,
+)
from functools import partial
from io import BytesIO
from numbers import Number
from typing import (
TYPE_CHECKING,
Any,
- Callable,
Final,
Literal,
Union,
@@ -358,7 +364,7 @@ def _dataset_from_backend_dataset(
from_array_kwargs,
**extra_tokens,
):
- if not isinstance(chunks, (int, dict)) and chunks not in {None, "auto"}:
+ if not isinstance(chunks, int | dict) and chunks not in {None, "auto"}:
raise ValueError(
f"chunks must be an int, dict, 'auto', or None. Instead found {chunks}."
)
@@ -385,7 +391,7 @@ def _dataset_from_backend_dataset(
if "source" not in ds.encoding:
path = getattr(filename_or_obj, "path", filename_or_obj)
- if isinstance(path, (str, os.PathLike)):
+ if isinstance(path, str | os.PathLike):
ds.encoding["source"] = _normalize_path(path)
return ds
@@ -1042,7 +1048,7 @@ def open_mfdataset(
raise OSError("no files to open")
if combine == "nested":
- if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:
+ if isinstance(concat_dim, str | DataArray) or concat_dim is None:
concat_dim = [concat_dim] # type: ignore[assignment]
# This creates a flat list which is easier to iterate over, whilst
diff --git a/xarray/backends/h5netcdf_.py b/xarray/backends/h5netcdf_.py
index 3926ac051f5..d7152bc021a 100644
--- a/xarray/backends/h5netcdf_.py
+++ b/xarray/backends/h5netcdf_.py
@@ -109,7 +109,7 @@ class H5NetCDFStore(WritableCFDataStore):
def __init__(self, manager, group=None, mode=None, lock=HDF5_LOCK, autoclose=False):
import h5netcdf
- if isinstance(manager, (h5netcdf.File, h5netcdf.Group)):
+ if isinstance(manager, h5netcdf.File | h5netcdf.Group):
if group is None:
root, group = find_root_and_group(manager)
else:
@@ -374,7 +374,7 @@ def guess_can_open(
if magic_number is not None:
return magic_number.startswith(b"\211HDF\r\n\032\n")
- if isinstance(filename_or_obj, (str, os.PathLike)):
+ if isinstance(filename_or_obj, str | os.PathLike):
_, ext = os.path.splitext(filename_or_obj)
return ext in {".nc", ".nc4", ".cdf"}
diff --git a/xarray/backends/lru_cache.py b/xarray/backends/lru_cache.py
index c09bcb19006..c37981297a4 100644
--- a/xarray/backends/lru_cache.py
+++ b/xarray/backends/lru_cache.py
@@ -2,8 +2,8 @@
import threading
from collections import OrderedDict
-from collections.abc import Iterator, MutableMapping
-from typing import Any, Callable, TypeVar
+from collections.abc import Callable, Iterator, MutableMapping
+from typing import Any, TypeVar
K = TypeVar("K")
V = TypeVar("V")
diff --git a/xarray/backends/netCDF4_.py b/xarray/backends/netCDF4_.py
index 302c002a779..a40fabdcce7 100644
--- a/xarray/backends/netCDF4_.py
+++ b/xarray/backends/netCDF4_.py
@@ -615,7 +615,7 @@ def guess_can_open(
# netcdf 3 or HDF5
return magic_number.startswith((b"CDF", b"\211HDF\r\n\032\n"))
- if isinstance(filename_or_obj, (str, os.PathLike)):
+ if isinstance(filename_or_obj, str | os.PathLike):
_, ext = os.path.splitext(filename_or_obj)
return ext in {".nc", ".nc4", ".cdf"}
diff --git a/xarray/backends/plugins.py b/xarray/backends/plugins.py
index f4890015040..3f8fde49446 100644
--- a/xarray/backends/plugins.py
+++ b/xarray/backends/plugins.py
@@ -3,22 +3,17 @@
import functools
import inspect
import itertools
-import sys
import warnings
+from collections.abc import Callable
from importlib.metadata import entry_points
-from typing import TYPE_CHECKING, Any, Callable
+from typing import TYPE_CHECKING, Any
from xarray.backends.common import BACKEND_ENTRYPOINTS, BackendEntrypoint
from xarray.core.utils import module_available
if TYPE_CHECKING:
import os
- from importlib.metadata import EntryPoint
-
- if sys.version_info >= (3, 10):
- from importlib.metadata import EntryPoints
- else:
- EntryPoints = list[EntryPoint]
+ from importlib.metadata import EntryPoint, EntryPoints
from io import BufferedIOBase
from xarray.backends.common import AbstractDataStore
@@ -129,13 +124,8 @@ def list_engines() -> dict[str, BackendEntrypoint]:
-----
This function lives in the backends namespace (``engs=xr.backends.list_engines()``).
If available, more information is available about each backend via ``engs["eng_name"]``.
-
- # New selection mechanism introduced with Python 3.10. See GH6514.
"""
- if sys.version_info >= (3, 10):
- entrypoints = entry_points(group="xarray.backends")
- else:
- entrypoints = entry_points().get("xarray.backends", [])
+ entrypoints = entry_points(group="xarray.backends")
return build_engines(entrypoints)
diff --git a/xarray/backends/scipy_.py b/xarray/backends/scipy_.py
index f8c486e512c..83031e1ef8b 100644
--- a/xarray/backends/scipy_.py
+++ b/xarray/backends/scipy_.py
@@ -299,7 +299,7 @@ def guess_can_open(
if magic_number is not None:
return magic_number.startswith(b"CDF")
- if isinstance(filename_or_obj, (str, os.PathLike)):
+ if isinstance(filename_or_obj, str | os.PathLike):
_, ext = os.path.splitext(filename_or_obj)
return ext in {".nc", ".nc4", ".cdf", ".gz"}
diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py
index 8c526ddb58d..0da056e8ad2 100644
--- a/xarray/backends/zarr.py
+++ b/xarray/backends/zarr.py
@@ -1140,7 +1140,7 @@ def guess_can_open(
self,
filename_or_obj: str | os.PathLike[Any] | BufferedIOBase | AbstractDataStore,
) -> bool:
- if isinstance(filename_or_obj, (str, os.PathLike)):
+ if isinstance(filename_or_obj, str | os.PathLike):
_, ext = os.path.splitext(filename_or_obj)
return ext in {".zarr"}
diff --git a/xarray/coding/calendar_ops.py b/xarray/coding/calendar_ops.py
index 6f492e78bf9..1b2875b26f1 100644
--- a/xarray/coding/calendar_ops.py
+++ b/xarray/coding/calendar_ops.py
@@ -362,7 +362,7 @@ def interp_calendar(source, target, dim="time"):
"""
from xarray.core.dataarray import DataArray
- if isinstance(target, (pd.DatetimeIndex, CFTimeIndex)):
+ if isinstance(target, pd.DatetimeIndex | CFTimeIndex):
target = DataArray(target, dims=(dim,), name=dim)
if not _contains_datetime_like_objects(
diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py
index b0caf2a0cd3..ad2f55e585f 100644
--- a/xarray/coding/cftime_offsets.py
+++ b/xarray/coding/cftime_offsets.py
@@ -220,7 +220,7 @@ def _next_higher_resolution(self) -> Tick:
raise ValueError("Could not convert to integer offset at any resolution")
def __mul__(self, other: int | float) -> Tick:
- if not isinstance(other, (int, float)):
+ if not isinstance(other, int | float):
return NotImplemented
if isinstance(other, float):
n = other * self.n
@@ -805,7 +805,7 @@ def to_cftime_datetime(date_str_or_date, calendar=None):
return date
elif isinstance(date_str_or_date, cftime.datetime):
return date_str_or_date
- elif isinstance(date_str_or_date, (datetime, pd.Timestamp)):
+ elif isinstance(date_str_or_date, datetime | pd.Timestamp):
return cftime.DatetimeProlepticGregorian(*date_str_or_date.timetuple())
else:
raise TypeError(
@@ -1409,7 +1409,7 @@ def date_range_like(source, calendar, use_cftime=None):
from xarray.coding.frequencies import infer_freq
from xarray.core.dataarray import DataArray
- if not isinstance(source, (pd.DatetimeIndex, CFTimeIndex)) and (
+ if not isinstance(source, pd.DatetimeIndex | CFTimeIndex) and (
isinstance(source, DataArray)
and (source.ndim != 1)
or not _contains_datetime_like_objects(source.variable)
@@ -1458,7 +1458,7 @@ def date_range_like(source, calendar, use_cftime=None):
# For the cases where the source ends on the end of the month, we expect the same in the new calendar.
if source_end.day == source_end.daysinmonth and isinstance(
- freq_as_offset, (YearEnd, QuarterEnd, MonthEnd, Day)
+ freq_as_offset, YearEnd | QuarterEnd | MonthEnd | Day
):
end = end.replace(day=end.daysinmonth)
diff --git a/xarray/coding/cftimeindex.py b/xarray/coding/cftimeindex.py
index ef01f4cc79a..ae08abb06a7 100644
--- a/xarray/coding/cftimeindex.py
+++ b/xarray/coding/cftimeindex.py
@@ -566,7 +566,7 @@ def shift( # type: ignore[override] # freq is typed Any, we are more precise
if isinstance(freq, timedelta):
return self + periods * freq
- if isinstance(freq, (str, BaseCFTimeOffset)):
+ if isinstance(freq, str | BaseCFTimeOffset):
from xarray.coding.cftime_offsets import to_offset
return self + periods * to_offset(freq)
diff --git a/xarray/coding/frequencies.py b/xarray/coding/frequencies.py
index b912b9a1fca..8629c491cfb 100644
--- a/xarray/coding/frequencies.py
+++ b/xarray/coding/frequencies.py
@@ -82,7 +82,7 @@ def infer_freq(index):
from xarray.core.dataarray import DataArray
from xarray.core.variable import Variable
- if isinstance(index, (DataArray, pd.Series)):
+ if isinstance(index, DataArray | pd.Series):
if index.ndim != 1:
raise ValueError("'index' must be 1D")
elif not _contains_datetime_like_objects(Variable("dim", index)):
diff --git a/xarray/coding/times.py b/xarray/coding/times.py
index badb9259b06..70df8c6c390 100644
--- a/xarray/coding/times.py
+++ b/xarray/coding/times.py
@@ -2,10 +2,10 @@
import re
import warnings
-from collections.abc import Hashable
+from collections.abc import Callable, Hashable
from datetime import datetime, timedelta
from functools import partial
-from typing import Callable, Literal, Union, cast
+from typing import Literal, Union, cast
import numpy as np
import pandas as pd
diff --git a/xarray/coding/variables.py b/xarray/coding/variables.py
index c240cfe5939..441ddfe7bfd 100644
--- a/xarray/coding/variables.py
+++ b/xarray/coding/variables.py
@@ -3,9 +3,9 @@
from __future__ import annotations
import warnings
-from collections.abc import Hashable, MutableMapping
+from collections.abc import Callable, Hashable, MutableMapping
from functools import partial
-from typing import TYPE_CHECKING, Any, Callable, Union
+from typing import TYPE_CHECKING, Any, Union
import numpy as np
import pandas as pd
diff --git a/xarray/core/_aggregations.py b/xarray/core/_aggregations.py
index acc534d8b52..5342db31fd0 100644
--- a/xarray/core/_aggregations.py
+++ b/xarray/core/_aggregations.py
@@ -4,8 +4,8 @@
from __future__ import annotations
-from collections.abc import Sequence
-from typing import TYPE_CHECKING, Any, Callable
+from collections.abc import Callable, Sequence
+from typing import TYPE_CHECKING, Any
from xarray.core import duck_array_ops
from xarray.core.options import OPTIONS
diff --git a/xarray/core/_typed_ops.py b/xarray/core/_typed_ops.py
index 61aa1846bd0..553f5e4bc57 100644
--- a/xarray/core/_typed_ops.py
+++ b/xarray/core/_typed_ops.py
@@ -5,7 +5,8 @@
from __future__ import annotations
import operator
-from typing import TYPE_CHECKING, Any, Callable, overload
+from collections.abc import Callable
+from typing import TYPE_CHECKING, Any, overload
from xarray.core import nputils, ops
from xarray.core.types import (
diff --git a/xarray/core/accessor_str.py b/xarray/core/accessor_str.py
index a48fbc91faf..c1a2d958c83 100644
--- a/xarray/core/accessor_str.py
+++ b/xarray/core/accessor_str.py
@@ -42,11 +42,11 @@
import codecs
import re
import textwrap
-from collections.abc import Hashable, Mapping
+from collections.abc import Callable, Hashable, Mapping
from functools import reduce
from operator import or_ as set_union
from re import Pattern
-from typing import TYPE_CHECKING, Any, Callable, Generic
+from typing import TYPE_CHECKING, Any, Generic
from unicodedata import normalize
import numpy as np
@@ -90,7 +90,7 @@ def _contains_obj_type(*, pat: Any, checker: Any) -> bool:
def _contains_str_like(pat: Any) -> bool:
"""Determine if the object is a str-like or array of str-like."""
- if isinstance(pat, (str, bytes)):
+ if isinstance(pat, str | bytes):
return True
if not hasattr(pat, "dtype"):
diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py
index 44fc7319170..a28376d2890 100644
--- a/xarray/core/alignment.py
+++ b/xarray/core/alignment.py
@@ -3,9 +3,9 @@
import functools
import operator
from collections import defaultdict
-from collections.abc import Hashable, Iterable, Mapping
+from collections.abc import Callable, Hashable, Iterable, Mapping
from contextlib import suppress
-from typing import TYPE_CHECKING, Any, Callable, Final, Generic, TypeVar, cast, overload
+from typing import TYPE_CHECKING, Any, Final, Generic, TypeVar, cast, overload
import numpy as np
import pandas as pd
@@ -904,7 +904,7 @@ def deep_align(
indexes = {}
def is_alignable(obj):
- return isinstance(obj, (Coordinates, DataArray, Dataset))
+ return isinstance(obj, Coordinates | DataArray | Dataset)
positions: list[int] = []
keys: list[type[object] | Hashable] = []
diff --git a/xarray/core/array_api_compat.py b/xarray/core/array_api_compat.py
index 3a94513d5d4..da072de5b69 100644
--- a/xarray/core/array_api_compat.py
+++ b/xarray/core/array_api_compat.py
@@ -2,7 +2,7 @@
def is_weak_scalar_type(t):
- return isinstance(t, (bool, int, float, complex, str, bytes))
+ return isinstance(t, bool | int | float | complex | str | bytes)
def _future_array_api_result_type(*arrays_and_dtypes, xp):
diff --git a/xarray/core/combine.py b/xarray/core/combine.py
index 5cb0a3417fa..6f652cb6597 100644
--- a/xarray/core/combine.py
+++ b/xarray/core/combine.py
@@ -570,7 +570,7 @@ def combine_nested(
if mixed_datasets_and_arrays:
raise ValueError("Can't combine datasets with unnamed arrays.")
- if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:
+ if isinstance(concat_dim, str | DataArray) or concat_dim is None:
concat_dim = [concat_dim]
# The IDs argument tells _nested_combine that datasets aren't yet sorted
diff --git a/xarray/core/common.py b/xarray/core/common.py
index 1e9c8ed8e29..664de7146d7 100644
--- a/xarray/core/common.py
+++ b/xarray/core/common.py
@@ -1,11 +1,11 @@
from __future__ import annotations
import warnings
-from collections.abc import Hashable, Iterable, Iterator, Mapping
+from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping
from contextlib import suppress
from html import escape
from textwrap import dedent
-from typing import TYPE_CHECKING, Any, Callable, TypeVar, Union, overload
+from typing import TYPE_CHECKING, Any, TypeVar, Union, overload
import numpy as np
import pandas as pd
@@ -1181,7 +1181,7 @@ def where(self, cond: Any, other: Any = dtypes.NA, drop: bool = False) -> Self:
other = other(self)
if drop:
- if not isinstance(cond, (Dataset, DataArray)):
+ if not isinstance(cond, Dataset | DataArray):
raise TypeError(
f"cond argument is {cond!r} but must be a {Dataset!r} or {DataArray!r} (or a callable than returns one)."
)
@@ -1355,7 +1355,7 @@ def isin(self, test_elements: Any) -> Self:
raise TypeError(
f"isin() argument must be convertible to an array: {test_elements}"
)
- elif isinstance(test_elements, (Variable, DataArray)):
+ elif isinstance(test_elements, Variable | DataArray):
# need to explicitly pull out data to support dask arrays as the
# second argument
test_elements = test_elements.data
diff --git a/xarray/core/computation.py b/xarray/core/computation.py
index bb7122e82de..3e91efc1ede 100644
--- a/xarray/core/computation.py
+++ b/xarray/core/computation.py
@@ -9,8 +9,16 @@
import operator
import warnings
from collections import Counter
-from collections.abc import Hashable, Iterable, Iterator, Mapping, Sequence, Set
-from typing import TYPE_CHECKING, Any, Callable, Literal, TypeVar, Union, cast, overload
+from collections.abc import (
+ Callable,
+ Hashable,
+ Iterable,
+ Iterator,
+ Mapping,
+ Sequence,
+ Set,
+)
+from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union, cast, overload
import numpy as np
@@ -1814,7 +1822,7 @@ def dot(
from xarray.core.dataarray import DataArray
from xarray.core.variable import Variable
- if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):
+ if any(not isinstance(arr, Variable | DataArray) for arr in arrays):
raise TypeError(
"Only xr.DataArray and xr.Variable are supported."
f"Given {[type(arr) for arr in arrays]}."
diff --git a/xarray/core/concat.py b/xarray/core/concat.py
index 15292bdb34b..182cf8a23a1 100644
--- a/xarray/core/concat.py
+++ b/xarray/core/concat.py
@@ -308,7 +308,7 @@ def _calc_concat_dim_index(
dim = dim_or_data
index = None
else:
- if not isinstance(dim_or_data, (DataArray, Variable)):
+ if not isinstance(dim_or_data, DataArray | Variable):
dim = getattr(dim_or_data, "name", None)
if dim is None:
dim = "concat_dim"
diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py
index 0818f488b7e..84f229bf575 100644
--- a/xarray/core/dataarray.py
+++ b/xarray/core/dataarray.py
@@ -2,18 +2,23 @@
import datetime
import warnings
-from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence
+from collections.abc import (
+ Callable,
+ Hashable,
+ Iterable,
+ Mapping,
+ MutableMapping,
+ Sequence,
+)
from functools import partial
from os import PathLike
from typing import (
TYPE_CHECKING,
Any,
- Callable,
Generic,
Literal,
NoReturn,
TypeVar,
- Union,
overload,
)
@@ -114,7 +119,7 @@
from xarray.groupers import Grouper, Resampler
from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint
- T_XarrayOther = TypeVar("T_XarrayOther", bound=Union["DataArray", Dataset])
+ T_XarrayOther = TypeVar("T_XarrayOther", bound="DataArray" | Dataset)
def _check_coords_dims(shape, coords, dim):
@@ -456,7 +461,7 @@ def __init__(
coords = [data.index]
elif isinstance(data, pd.DataFrame):
coords = [data.index, data.columns]
- elif isinstance(data, (pd.Index, IndexVariable)):
+ elif isinstance(data, pd.Index | IndexVariable):
coords = [data]
if dims is None:
@@ -1422,10 +1427,10 @@ def chunk(
)
chunk_mapping = {}
- if isinstance(chunks, (float, str, int)):
+ if isinstance(chunks, float | str | int):
# ignoring type; unclear why it won't accept a Literal into the value.
chunk_mapping = dict.fromkeys(self.dims, chunks)
- elif isinstance(chunks, (tuple, list)):
+ elif isinstance(chunks, tuple | list):
utils.emit_user_level_warning(
"Supplying chunks as dimension-order tuples is deprecated. "
"It will raise an error in the future. Instead use a dict with dimension names as keys.",
@@ -4732,7 +4737,7 @@ def _binary_op(
) -> Self:
from xarray.core.groupby import GroupBy
- if isinstance(other, (Dataset, GroupBy)):
+ if isinstance(other, Dataset | GroupBy):
return NotImplemented
if isinstance(other, DataArray):
align_type = OPTIONS["arithmetic_join"]
diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py
index cad2f00ccc1..62183adcf71 100644
--- a/xarray/core/dataset.py
+++ b/xarray/core/dataset.py
@@ -9,6 +9,7 @@
import warnings
from collections import defaultdict
from collections.abc import (
+ Callable,
Collection,
Hashable,
Iterable,
@@ -22,7 +23,7 @@
from numbers import Number
from operator import methodcaller
from os import PathLike
-from typing import IO, TYPE_CHECKING, Any, Callable, Generic, Literal, cast, overload
+from typing import IO, TYPE_CHECKING, Any, Generic, Literal, cast, overload
import numpy as np
from pandas.api.types import is_extension_array_dtype
@@ -2720,7 +2721,7 @@ def chunk(
chunks = {}
chunks_mapping: Mapping[Any, Any]
if not isinstance(chunks, Mapping) and chunks is not None:
- if isinstance(chunks, (tuple, list)):
+ if isinstance(chunks, tuple | list):
utils.emit_user_level_warning(
"Supplying chunks as dimension-order tuples is deprecated. "
"It will raise an error in the future. Instead use a dict with dimensions as keys.",
@@ -2808,7 +2809,7 @@ def _validate_indexers(
# all indexers should be int, slice, np.ndarrays, or Variable
for k, v in indexers.items():
- if isinstance(v, (int, slice, Variable)):
+ if isinstance(v, int | slice | Variable):
yield k, v
elif isinstance(v, DataArray):
yield k, v.variable
@@ -7479,7 +7480,7 @@ def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> Self:
extension_arrays = []
for k, v in dataframe.items():
if not is_extension_array_dtype(v) or isinstance(
- v.array, (pd.arrays.DatetimeArray, pd.arrays.TimedeltaArray)
+ v.array, pd.arrays.DatetimeArray | pd.arrays.TimedeltaArray
):
arrays.append((k, np.asarray(v)))
else:
@@ -7764,7 +7765,7 @@ def _binary_op(self, other, f, reflexive=False, join=None) -> Dataset:
if isinstance(other, GroupBy):
return NotImplemented
align_type = OPTIONS["arithmetic_join"] if join is None else join
- if isinstance(other, (DataArray, Dataset)):
+ if isinstance(other, DataArray | Dataset):
self, other = align(self, other, join=align_type, copy=False)
g = f if not reflexive else lambda x, y: f(y, x)
ds = self._calculate_binary_op(g, other, join=align_type)
@@ -7784,7 +7785,7 @@ def _inplace_binary_op(self, other, f) -> Self:
)
# we don't actually modify arrays in-place with in-place Dataset
# arithmetic -- this lets us automatically align things
- if isinstance(other, (DataArray, Dataset)):
+ if isinstance(other, DataArray | Dataset):
other = other.reindex_like(self, copy=False)
g = ops.inplace_to_noninplace_op(f)
ds = self._calculate_binary_op(g, other, inplace=True)
@@ -8563,7 +8564,7 @@ def integrate(
a float64 8B 20.0
b float64 8B 4.0
"""
- if not isinstance(coord, (list, tuple)):
+ if not isinstance(coord, list | tuple):
coord = (coord,)
result = self
for c in coord:
@@ -8692,7 +8693,7 @@ def cumulative_integrate(
a (x) float64 32B 0.0 30.0 8.0 20.0
b (x) float64 32B 0.0 9.0 3.0 4.0
"""
- if not isinstance(coord, (list, tuple)):
+ if not isinstance(coord, list | tuple):
coord = (coord,)
result = self
for c in coord:
diff --git a/xarray/core/datatree.py b/xarray/core/datatree.py
index 6289146308e..72faf9c4d17 100644
--- a/xarray/core/datatree.py
+++ b/xarray/core/datatree.py
@@ -3,12 +3,18 @@
import itertools
import textwrap
from collections import ChainMap
-from collections.abc import Hashable, Iterable, Iterator, Mapping, MutableMapping
+from collections.abc import (
+ Callable,
+ Hashable,
+ Iterable,
+ Iterator,
+ Mapping,
+ MutableMapping,
+)
from html import escape
from typing import (
TYPE_CHECKING,
Any,
- Callable,
Generic,
Literal,
NoReturn,
@@ -902,7 +908,7 @@ def _set(self, key: str, val: DataTree | CoercibleValue) -> None:
new_node.name = key
new_node.parent = self
else:
- if not isinstance(val, (DataArray, Variable)):
+ if not isinstance(val, DataArray | Variable):
# accommodate other types that can be coerced into Variables
val = DataArray(val)
@@ -968,7 +974,7 @@ def update(
# Datatree's name is always a string until we fix that (#8836)
new_child.name = str(k)
new_children[str(k)] = new_child
- elif isinstance(v, (DataArray, Variable)):
+ elif isinstance(v, DataArray | Variable):
# TODO this should also accommodate other types that can be coerced into Variables
new_variables[k] = v
else:
diff --git a/xarray/core/datatree_mapping.py b/xarray/core/datatree_mapping.py
index 6e5aae15562..17630466016 100644
--- a/xarray/core/datatree_mapping.py
+++ b/xarray/core/datatree_mapping.py
@@ -2,8 +2,9 @@
import functools
import sys
+from collections.abc import Callable
from itertools import repeat
-from typing import TYPE_CHECKING, Callable
+from typing import TYPE_CHECKING
from xarray.core.dataarray import DataArray
from xarray.core.dataset import Dataset
@@ -257,11 +258,11 @@ def _check_single_set_return_values(
path_to_node: str, obj: Dataset | DataArray | tuple[Dataset | DataArray]
):
"""Check types returned from single evaluation of func, and return number of return values received from func."""
- if isinstance(obj, (Dataset, DataArray)):
+ if isinstance(obj, Dataset | DataArray):
return 1
elif isinstance(obj, tuple):
for r in obj:
- if not isinstance(r, (Dataset, DataArray)):
+ if not isinstance(r, Dataset | DataArray):
raise TypeError(
f"One of the results of calling func on datasets on the nodes at position {path_to_node} is "
f"of type {type(r)}, not Dataset or DataArray."
diff --git a/xarray/core/datatree_render.py b/xarray/core/datatree_render.py
index f10f2540952..98cb4f91495 100644
--- a/xarray/core/datatree_render.py
+++ b/xarray/core/datatree_render.py
@@ -238,7 +238,7 @@ def get() -> Iterator[str]:
if callable(attrname)
else getattr(node, attrname, "")
)
- if isinstance(attr, (list, tuple)):
+ if isinstance(attr, list | tuple):
lines = attr
else:
lines = str(attr).split("\n")
diff --git a/xarray/core/dtypes.py b/xarray/core/dtypes.py
index 2c3a43eeea6..b39f7628fd3 100644
--- a/xarray/core/dtypes.py
+++ b/xarray/core/dtypes.py
@@ -216,7 +216,7 @@ def isdtype(dtype, kind: str | tuple[str, ...], xp=None) -> bool:
def preprocess_scalar_types(t):
- if isinstance(t, (str, bytes)):
+ if isinstance(t, str | bytes):
return type(t)
else:
return t
diff --git a/xarray/core/duck_array_ops.py b/xarray/core/duck_array_ops.py
index 4e6b066591f..204579757e1 100644
--- a/xarray/core/duck_array_ops.py
+++ b/xarray/core/duck_array_ops.py
@@ -10,9 +10,9 @@
import datetime
import inspect
import warnings
+from collections.abc import Callable
from functools import partial
from importlib import import_module
-from typing import Callable
import numpy as np
import pandas as pd
diff --git a/xarray/core/extension_array.py b/xarray/core/extension_array.py
index b0361ef0f0f..b2efeae7bb0 100644
--- a/xarray/core/extension_array.py
+++ b/xarray/core/extension_array.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-from collections.abc import Sequence
-from typing import Callable, Generic, cast
+from collections.abc import Callable, Sequence
+from typing import Generic, cast
import numpy as np
import pandas as pd
diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py
index 6571b288fae..ec78588c527 100644
--- a/xarray/core/formatting.py
+++ b/xarray/core/formatting.py
@@ -181,11 +181,11 @@ def format_timedelta(t, timedelta_format=None):
def format_item(x, timedelta_format=None, quote_strings=True):
"""Returns a succinct summary of an object as a string"""
- if isinstance(x, (np.datetime64, datetime)):
+ if isinstance(x, np.datetime64 | datetime):
return format_timestamp(x)
- if isinstance(x, (np.timedelta64, timedelta)):
+ if isinstance(x, np.timedelta64 | timedelta):
return format_timedelta(x, timedelta_format=timedelta_format)
- elif isinstance(x, (str, bytes)):
+ elif isinstance(x, str | bytes):
if hasattr(x, "dtype"):
x = x.item()
return repr(x) if quote_strings else x
diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py
index 9b0758d030b..faeb0c538c3 100644
--- a/xarray/core/groupby.py
+++ b/xarray/core/groupby.py
@@ -2,9 +2,9 @@
import copy
import warnings
-from collections.abc import Hashable, Iterator, Mapping, Sequence
+from collections.abc import Callable, Hashable, Iterator, Mapping, Sequence
from dataclasses import dataclass, field
-from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, Union
+from typing import TYPE_CHECKING, Any, Generic, Literal, Union
import numpy as np
import pandas as pd
@@ -622,7 +622,7 @@ def _binary_op(self, other, f, reflexive=False):
# TODO: explicitly create Index here
coord = DataArray(coord, coords={coord_dim: coord.data})
- if not isinstance(other, (Dataset, DataArray)):
+ if not isinstance(other, Dataset | DataArray):
raise TypeError(
"GroupBy objects only support binary ops "
"when the other argument is a Dataset or "
@@ -699,7 +699,7 @@ def _maybe_restore_empty_groups(self, combined):
(grouper,) = self.groupers
if (
- isinstance(grouper.grouper, (BinGrouper, TimeResampler))
+ isinstance(grouper.grouper, BinGrouper | TimeResampler)
and grouper.name in combined.dims
):
indexers = {grouper.name: grouper.full_index}
diff --git a/xarray/core/indexes.py b/xarray/core/indexes.py
index 9d8a68edbf3..80d15f8cde9 100644
--- a/xarray/core/indexes.py
+++ b/xarray/core/indexes.py
@@ -443,7 +443,7 @@ def safe_cast_to_index(array: Any) -> pd.Index:
if isinstance(array, pd.Index):
index = array
- elif isinstance(array, (DataArray, Variable)):
+ elif isinstance(array, DataArray | Variable):
# returns the original multi-index for pandas.MultiIndex level coordinates
index = array._to_index()
elif isinstance(array, Index):
@@ -480,7 +480,7 @@ def _sanitize_slice_element(x):
f"cannot use non-scalar arrays in a slice for xarray indexing: {x}"
)
- if isinstance(x, (Variable, DataArray)):
+ if isinstance(x, Variable | DataArray):
x = x.values
if isinstance(x, np.ndarray):
@@ -530,7 +530,7 @@ def _asarray_tuplesafe(values):
def _is_nested_tuple(possible_tuple):
return isinstance(possible_tuple, tuple) and any(
- isinstance(value, (tuple, list, slice)) for value in possible_tuple
+ isinstance(value, tuple | list | slice) for value in possible_tuple
)
diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py
index 19937270268..1f5444b6baa 100644
--- a/xarray/core/indexing.py
+++ b/xarray/core/indexing.py
@@ -4,12 +4,12 @@
import functools
import operator
from collections import Counter, defaultdict
-from collections.abc import Hashable, Iterable, Mapping
+from collections.abc import Callable, Hashable, Iterable, Mapping
from contextlib import suppress
from dataclasses import dataclass, field
from datetime import timedelta
from html import escape
-from typing import TYPE_CHECKING, Any, Callable, overload
+from typing import TYPE_CHECKING, Any, overload
import numpy as np
import pandas as pd
@@ -546,7 +546,7 @@ def _vindex_set(self, indexer: VectorizedIndexer, value: Any) -> None:
)
def _check_and_raise_if_non_basic_indexer(self, indexer: ExplicitIndexer) -> None:
- if isinstance(indexer, (VectorizedIndexer, OuterIndexer)):
+ if isinstance(indexer, VectorizedIndexer | OuterIndexer):
raise TypeError(
"Vectorized indexing with vectorized or outer indexers is not supported. "
"Please use .vindex and .oindex properties to index the array."
@@ -708,7 +708,7 @@ def __init__(self, array: duckarray[Any, Any], key: ExplicitIndexer):
Array like object to index.
key : VectorizedIndexer
"""
- if isinstance(key, (BasicIndexer, OuterIndexer)):
+ if isinstance(key, BasicIndexer | OuterIndexer):
self.key = _outer_to_vectorized_indexer(key, array.shape)
elif isinstance(key, VectorizedIndexer):
self.key = _arrayize_vectorized_indexer(key, array.shape)
@@ -1045,7 +1045,7 @@ def decompose_indexer(
) -> tuple[ExplicitIndexer, ExplicitIndexer]:
if isinstance(indexer, VectorizedIndexer):
return _decompose_vectorized_indexer(indexer, shape, indexing_support)
- if isinstance(indexer, (BasicIndexer, OuterIndexer)):
+ if isinstance(indexer, BasicIndexer | OuterIndexer):
return _decompose_outer_indexer(indexer, shape, indexing_support)
raise TypeError(f"unexpected key type: {indexer}")
@@ -1204,7 +1204,7 @@ def _decompose_outer_indexer(
backend_indexer: list[Any] = []
np_indexer: list[Any] = []
- assert isinstance(indexer, (OuterIndexer, BasicIndexer))
+ assert isinstance(indexer, OuterIndexer | BasicIndexer)
if indexing_support == IndexingSupport.VECTORIZED:
for k, s in zip(indexer.tuple, shape):
@@ -1474,7 +1474,7 @@ def is_fancy_indexer(indexer: Any) -> bool:
"""Return False if indexer is a int, slice, a 1-dimensional list, or a 0 or
1-dimensional ndarray; in all other cases return True
"""
- if isinstance(indexer, (int, slice)):
+ if isinstance(indexer, int | slice):
return False
if isinstance(indexer, np.ndarray):
return indexer.ndim > 1
diff --git a/xarray/core/iterators.py b/xarray/core/iterators.py
index ae748b0066c..1ba3c6f1675 100644
--- a/xarray/core/iterators.py
+++ b/xarray/core/iterators.py
@@ -1,7 +1,6 @@
from __future__ import annotations
-from collections.abc import Iterator
-from typing import Callable
+from collections.abc import Callable, Iterator
from xarray.core.treenode import Tree
diff --git a/xarray/core/merge.py b/xarray/core/merge.py
index a90e59e7c0b..d1eaa43ad89 100644
--- a/xarray/core/merge.py
+++ b/xarray/core/merge.py
@@ -2,7 +2,7 @@
from collections import defaultdict
from collections.abc import Hashable, Iterable, Mapping, Sequence, Set
-from typing import TYPE_CHECKING, Any, NamedTuple, Optional, Union
+from typing import TYPE_CHECKING, Any, NamedTuple, Union
import pandas as pd
@@ -158,7 +158,7 @@ def _assert_compat_valid(compat):
raise ValueError(f"compat={compat!r} invalid: must be {set(_VALID_COMPAT)}")
-MergeElement = tuple[Variable, Optional[Index]]
+MergeElement = tuple[Variable, Index | None]
def _assert_prioritized_valid(
@@ -342,7 +342,7 @@ def append_all(variables, indexes):
append(name, variable, indexes.get(name))
for mapping in list_of_mappings:
- if isinstance(mapping, (Coordinates, Dataset)):
+ if isinstance(mapping, Coordinates | Dataset):
append_all(mapping.variables, mapping.xindexes)
continue
@@ -477,7 +477,7 @@ def coerce_pandas_values(objects: Iterable[CoercibleMapping]) -> list[DatasetLik
out: list[DatasetLike] = []
for obj in objects:
variables: DatasetLike
- if isinstance(obj, (Dataset, Coordinates)):
+ if isinstance(obj, Dataset | Coordinates):
variables = obj
else:
variables = {}
@@ -721,7 +721,7 @@ def merge_core(
)
attrs = merge_attrs(
- [var.attrs for var in coerced if isinstance(var, (Dataset, DataArray))],
+ [var.attrs for var in coerced if isinstance(var, Dataset | DataArray)],
combine_attrs,
)
@@ -961,7 +961,7 @@ def merge(
dict_like_objects = []
for obj in objects:
- if not isinstance(obj, (DataArray, Dataset, Coordinates, dict)):
+ if not isinstance(obj, DataArray | Dataset | Coordinates | dict):
raise TypeError(
"objects must be an iterable containing only "
"Dataset(s), DataArray(s), and dictionaries."
diff --git a/xarray/core/missing.py b/xarray/core/missing.py
index bfbad72649a..187a93d322f 100644
--- a/xarray/core/missing.py
+++ b/xarray/core/missing.py
@@ -2,10 +2,10 @@
import datetime as dt
import warnings
-from collections.abc import Hashable, Sequence
+from collections.abc import Callable, Hashable, Sequence
from functools import partial
from numbers import Number
-from typing import TYPE_CHECKING, Any, Callable, get_args
+from typing import TYPE_CHECKING, Any, get_args
import numpy as np
import pandas as pd
@@ -286,7 +286,7 @@ def get_clean_interp_index(
# Special case for non-standard calendar indexes
# Numerical datetime values are defined with respect to 1970-01-01T00:00:00 in units of nanoseconds
- if isinstance(index, (CFTimeIndex, pd.DatetimeIndex)):
+ if isinstance(index, CFTimeIndex | pd.DatetimeIndex):
offset = type(index[0])(1970, 1, 1)
if isinstance(index, CFTimeIndex):
index = index.values
@@ -338,7 +338,7 @@ def interp_na(
if (
dim in self._indexes
and isinstance(
- self._indexes[dim].to_pandas_index(), (pd.DatetimeIndex, CFTimeIndex)
+ self._indexes[dim].to_pandas_index(), pd.DatetimeIndex | CFTimeIndex
)
and use_coordinate
):
@@ -346,7 +346,7 @@ def interp_na(
max_gap = timedelta_to_numeric(max_gap)
if not use_coordinate:
- if not isinstance(max_gap, (Number, np.number)):
+ if not isinstance(max_gap, Number | np.number):
raise TypeError(
f"Expected integer or floating point max_gap since use_coordinate=False. Received {max_type}."
)
diff --git a/xarray/core/nputils.py b/xarray/core/nputils.py
index 1d30fe9db9e..70ddeb813f3 100644
--- a/xarray/core/nputils.py
+++ b/xarray/core/nputils.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import warnings
-from typing import Callable
+from collections.abc import Callable
import numpy as np
import pandas as pd
diff --git a/xarray/core/parallel.py b/xarray/core/parallel.py
index 41311497f8b..9c68ee3a1c5 100644
--- a/xarray/core/parallel.py
+++ b/xarray/core/parallel.py
@@ -3,8 +3,8 @@
import collections
import itertools
import operator
-from collections.abc import Hashable, Iterable, Mapping, Sequence
-from typing import TYPE_CHECKING, Any, Callable, Literal, TypedDict
+from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence
+from typing import TYPE_CHECKING, Any, Literal, TypedDict
import numpy as np
@@ -131,7 +131,7 @@ def infer_template(
"Please supply the 'template' kwarg to map_blocks."
) from e
- if not isinstance(template, (Dataset, DataArray)):
+ if not isinstance(template, Dataset | DataArray):
raise TypeError(
"Function must return an xarray DataArray or Dataset. Instead it returned "
f"{type(template)}"
@@ -351,7 +351,7 @@ def _wrapper(
result = func(*converted_args, **kwargs)
merged_coordinates = merge(
- [arg.coords for arg in args if isinstance(arg, (Dataset, DataArray))]
+ [arg.coords for arg in args if isinstance(arg, Dataset | DataArray)]
).coords
# check all dims are present
@@ -387,7 +387,7 @@ def _wrapper(
return make_dict(result)
- if template is not None and not isinstance(template, (DataArray, Dataset)):
+ if template is not None and not isinstance(template, DataArray | Dataset):
raise TypeError(
f"template must be a DataArray or Dataset. Received {type(template).__name__} instead."
)
@@ -417,7 +417,7 @@ def _wrapper(
pass
all_args = [obj] + list(args)
- is_xarray = [isinstance(arg, (Dataset, DataArray)) for arg in all_args]
+ is_xarray = [isinstance(arg, Dataset | DataArray) for arg in all_args]
is_array = [isinstance(arg, DataArray) for arg in all_args]
# there should be a better way to group this. partition?
diff --git a/xarray/core/resample.py b/xarray/core/resample.py
index 86b550466da..677de48f0b6 100644
--- a/xarray/core/resample.py
+++ b/xarray/core/resample.py
@@ -1,8 +1,8 @@
from __future__ import annotations
import warnings
-from collections.abc import Hashable, Iterable, Sequence
-from typing import TYPE_CHECKING, Any, Callable
+from collections.abc import Callable, Hashable, Iterable, Sequence
+from typing import TYPE_CHECKING, Any
from xarray.core._aggregations import (
DataArrayResampleAggregations,
diff --git a/xarray/core/resample_cftime.py b/xarray/core/resample_cftime.py
index caa2d166d24..2149a62dfb5 100644
--- a/xarray/core/resample_cftime.py
+++ b/xarray/core/resample_cftime.py
@@ -84,7 +84,7 @@ def __init__(
self.freq = to_offset(freq)
self.origin = origin
- if isinstance(self.freq, (MonthEnd, QuarterEnd, YearEnd)):
+ if isinstance(self.freq, MonthEnd | QuarterEnd | YearEnd):
if closed is None:
self.closed = "right"
else:
@@ -272,7 +272,7 @@ def _adjust_bin_edges(
CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object')
"""
- if isinstance(freq, (MonthEnd, QuarterEnd, YearEnd)):
+ if isinstance(freq, MonthEnd | QuarterEnd | YearEnd):
if closed == "right":
datetime_bins = datetime_bins + datetime.timedelta(days=1, microseconds=-1)
if datetime_bins[-2] > index.max():
@@ -485,7 +485,7 @@ def _convert_offset_to_timedelta(
) -> datetime.timedelta:
if isinstance(offset, datetime.timedelta):
return offset
- if isinstance(offset, (str, Tick)):
+ if isinstance(offset, str | Tick):
timedelta_cftime_offset = to_offset(offset)
if isinstance(timedelta_cftime_offset, Tick):
return timedelta_cftime_offset.as_timedelta()
diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py
index 6cf49fc995b..f7dd1210919 100644
--- a/xarray/core/rolling.py
+++ b/xarray/core/rolling.py
@@ -4,8 +4,8 @@
import itertools
import math
import warnings
-from collections.abc import Hashable, Iterator, Mapping
-from typing import TYPE_CHECKING, Any, Callable, Generic, TypeVar
+from collections.abc import Callable, Hashable, Iterator, Mapping
+from typing import TYPE_CHECKING, Any, Generic, TypeVar
import numpy as np
from packaging.version import Version
diff --git a/xarray/core/types.py b/xarray/core/types.py
index 591320d26da..0e432283ba9 100644
--- a/xarray/core/types.py
+++ b/xarray/core/types.py
@@ -2,11 +2,10 @@
import datetime
import sys
-from collections.abc import Collection, Hashable, Iterator, Mapping, Sequence
+from collections.abc import Callable, Collection, Hashable, Iterator, Mapping, Sequence
from typing import (
TYPE_CHECKING,
Any,
- Callable,
Literal,
Protocol,
SupportsIndex,
@@ -21,7 +20,9 @@
if sys.version_info >= (3, 11):
from typing import Self, TypeAlias
else:
- from typing_extensions import Self, TypeAlias
+ from typing import TypeAlias
+
+ from typing_extensions import Self
except ImportError:
if TYPE_CHECKING:
raise
@@ -96,9 +97,9 @@
except ImportError:
CFTimeDatetime = np.datetime64
-DatetimeLike: TypeAlias = Union[
- pd.Timestamp, datetime.datetime, np.datetime64, CFTimeDatetime
-]
+DatetimeLike: TypeAlias = (
+ pd.Timestamp | datetime.datetime | np.datetime64 | CFTimeDatetime
+)
class Alignable(Protocol):
@@ -191,11 +192,11 @@ def copy(
# FYI in some cases we don't allow `None`, which this doesn't take account of.
# FYI the `str` is for a size string, e.g. "16MB", supported by dask.
-T_ChunkDim: TypeAlias = Union[str, int, Literal["auto"], None, tuple[int, ...]]
+T_ChunkDim: TypeAlias = str | int | Literal["auto"] | None | tuple[int, ...]
T_ChunkDimFreq: TypeAlias = Union["TimeResampler", T_ChunkDim]
-T_ChunksFreq: TypeAlias = Union[T_ChunkDim, Mapping[Any, T_ChunkDimFreq]]
+T_ChunksFreq: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDimFreq]
# We allow the tuple form of this (though arguably we could transition to named dims only)
-T_Chunks: TypeAlias = Union[T_ChunkDim, Mapping[Any, T_ChunkDim]]
+T_Chunks: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDim]
T_NormalizedChunks = tuple[tuple[int, ...], ...]
DataVars = Mapping[Any, Any]
diff --git a/xarray/core/utils.py b/xarray/core/utils.py
index c2859632360..008e1d101c9 100644
--- a/xarray/core/utils.py
+++ b/xarray/core/utils.py
@@ -47,6 +47,7 @@
import sys
import warnings
from collections.abc import (
+ Callable,
Collection,
Container,
Hashable,
@@ -62,7 +63,7 @@
)
from enum import Enum
from pathlib import Path
-from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, TypeVar, overload
+from typing import TYPE_CHECKING, Any, Generic, Literal, TypeGuard, TypeVar, overload
import numpy as np
import pandas as pd
@@ -255,7 +256,7 @@ def is_full_slice(value: Any) -> bool:
def is_list_like(value: Any) -> TypeGuard[list | tuple]:
- return isinstance(value, (list, tuple))
+ return isinstance(value, list | tuple)
def _is_scalar(value, include_0d):
@@ -265,7 +266,7 @@ def _is_scalar(value, include_0d):
include_0d = getattr(value, "ndim", None) == 0
return (
include_0d
- or isinstance(value, (str, bytes))
+ or isinstance(value, str | bytes)
or not (
isinstance(value, (Iterable,) + NON_NUMPY_SUPPORTED_ARRAY_TYPES)
or hasattr(value, "__array_function__")
@@ -274,34 +275,12 @@ def _is_scalar(value, include_0d):
)
-# See GH5624, this is a convoluted way to allow type-checking to use `TypeGuard` without
-# requiring typing_extensions as a required dependency to _run_ the code (it is required
-# to type-check).
-try:
- if sys.version_info >= (3, 10):
- from typing import TypeGuard
- else:
- from typing_extensions import TypeGuard
-except ImportError:
- if TYPE_CHECKING:
- raise
- else:
-
- def is_scalar(value: Any, include_0d: bool = True) -> bool:
- """Whether to treat a value as a scalar.
-
- Any non-iterable, string, or 0-D array
- """
- return _is_scalar(value, include_0d)
+def is_scalar(value: Any, include_0d: bool = True) -> TypeGuard[Hashable]:
+ """Whether to treat a value as a scalar.
-else:
-
- def is_scalar(value: Any, include_0d: bool = True) -> TypeGuard[Hashable]:
- """Whether to treat a value as a scalar.
-
- Any non-iterable, string, or 0-D array
- """
- return _is_scalar(value, include_0d)
+ Any non-iterable, string, or 0-D array
+ """
+ return _is_scalar(value, include_0d)
def is_valid_numpy_dtype(dtype: Any) -> bool:
diff --git a/xarray/core/variable.py b/xarray/core/variable.py
index 828c53e6187..3cd8e4acbd5 100644
--- a/xarray/core/variable.py
+++ b/xarray/core/variable.py
@@ -5,10 +5,10 @@
import math
import numbers
import warnings
-from collections.abc import Hashable, Mapping, Sequence
+from collections.abc import Callable, Hashable, Mapping, Sequence
from datetime import timedelta
from functools import partial
-from typing import TYPE_CHECKING, Any, Callable, NoReturn, cast
+from typing import TYPE_CHECKING, Any, NoReturn, cast
import numpy as np
import pandas as pd
@@ -147,9 +147,9 @@ def as_variable(
)
elif utils.is_scalar(obj):
obj = Variable([], obj)
- elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
+ elif isinstance(obj, pd.Index | IndexVariable) and obj.name is not None:
obj = Variable(obj.name, obj)
- elif isinstance(obj, (set, dict)):
+ elif isinstance(obj, set | dict):
raise TypeError(f"variable {name!r} has invalid type {type(obj)!r}")
elif name is not None:
data: T_DuckArray = as_compatible_data(obj)
@@ -250,9 +250,9 @@ def _possibly_convert_datetime_or_timedelta_index(data):
handle non-nanosecond precision datetimes or timedeltas in our code
before allowing such values to pass through unchanged."""
if isinstance(data, PandasIndexingAdapter):
- if isinstance(data.array, (pd.DatetimeIndex, pd.TimedeltaIndex)):
+ if isinstance(data.array, pd.DatetimeIndex | pd.TimedeltaIndex):
data = PandasIndexingAdapter(_as_nanosecond_precision(data.array))
- elif isinstance(data, (pd.DatetimeIndex, pd.TimedeltaIndex)):
+ elif isinstance(data, pd.DatetimeIndex | pd.TimedeltaIndex):
data = _as_nanosecond_precision(data)
return data
@@ -298,7 +298,7 @@ def as_compatible_data(
data = np.timedelta64(getattr(data, "value", data), "ns")
# we don't want nested self-described arrays
- if isinstance(data, (pd.Series, pd.DataFrame)):
+ if isinstance(data, pd.Series | pd.DataFrame):
data = data.values # type: ignore[assignment]
if isinstance(data, np.ma.MaskedArray):
@@ -425,7 +425,7 @@ def _new(
@property
def _in_memory(self):
return isinstance(
- self._data, (np.ndarray, np.number, PandasIndexingAdapter)
+ self._data, np.ndarray | np.number | PandasIndexingAdapter
) or (
isinstance(self._data, indexing.MemoryCachedArray)
and isinstance(self._data.array, indexing.NumpyIndexingAdapter)
@@ -2305,7 +2305,7 @@ def _unary_op(self, f, *args, **kwargs):
return result
def _binary_op(self, other, f, reflexive=False):
- if isinstance(other, (xr.DataArray, xr.Dataset)):
+ if isinstance(other, xr.DataArray | xr.Dataset):
return NotImplemented
if reflexive and issubclass(type(self), type(other)):
other_data, self_data, dims = _broadcast_compat_data(other, self)
diff --git a/xarray/groupers.py b/xarray/groupers.py
index becb005b66c..98409dfe542 100644
--- a/xarray/groupers.py
+++ b/xarray/groupers.py
@@ -66,7 +66,7 @@ def __post_init__(self):
raise ValueError("Please set a name on the array you are grouping by.")
assert isinstance(self.full_index, pd.Index)
assert (
- isinstance(self.unique_coord, (Variable, _DummyGroup))
+ isinstance(self.unique_coord, Variable | _DummyGroup)
or self.unique_coord is None
)
diff --git a/xarray/namedarray/_aggregations.py b/xarray/namedarray/_aggregations.py
index 9f58aeb791d..4889b7bd106 100644
--- a/xarray/namedarray/_aggregations.py
+++ b/xarray/namedarray/_aggregations.py
@@ -4,8 +4,8 @@
from __future__ import annotations
-from collections.abc import Sequence
-from typing import Any, Callable
+from collections.abc import Callable, Sequence
+from typing import Any
from xarray.core import duck_array_ops
from xarray.core.types import Dims, Self
diff --git a/xarray/namedarray/_typing.py b/xarray/namedarray/_typing.py
index c8d6953fc72..57b17385558 100644
--- a/xarray/namedarray/_typing.py
+++ b/xarray/namedarray/_typing.py
@@ -1,13 +1,12 @@
from __future__ import annotations
import sys
-from collections.abc import Hashable, Iterable, Mapping, Sequence
+from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence
from enum import Enum
from types import ModuleType
from typing import (
TYPE_CHECKING,
Any,
- Callable,
Final,
Literal,
Protocol,
@@ -24,7 +23,7 @@
if sys.version_info >= (3, 11):
from typing import TypeAlias
else:
- from typing_extensions import TypeAlias
+ from typing import TypeAlias
except ImportError:
if TYPE_CHECKING:
raise
@@ -79,9 +78,9 @@ def dtype(self) -> _DType_co: ...
_Chunks = tuple[_Shape, ...]
_NormalizedChunks = tuple[tuple[int, ...], ...]
# FYI in some cases we don't allow `None`, which this doesn't take account of.
-T_ChunkDim: TypeAlias = Union[int, Literal["auto"], None, tuple[int, ...]]
+T_ChunkDim: TypeAlias = int | Literal["auto"] | None | tuple[int, ...]
# We allow the tuple form of this (though arguably we could transition to named dims only)
-T_Chunks: TypeAlias = Union[T_ChunkDim, Mapping[Any, T_ChunkDim]]
+T_Chunks: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDim]
_Dim = Hashable
_Dims = tuple[_Dim, ...]
@@ -93,7 +92,7 @@ def dtype(self) -> _DType_co: ...
# https://github.com/numpy/numpy/pull/25022
# https://github.com/data-apis/array-api/pull/674
_IndexKey = Union[int, slice, "ellipsis"]
-_IndexKeys = tuple[Union[_IndexKey], ...] # tuple[Union[_IndexKey, None], ...]
+_IndexKeys = tuple[_IndexKey, ...] # tuple[Union[_IndexKey, None], ...]
_IndexKeyLike = Union[_IndexKey, _IndexKeys]
_AttrsLike = Union[Mapping[Any, Any], None]
diff --git a/xarray/namedarray/core.py b/xarray/namedarray/core.py
index e9668d89d94..c5841f6913e 100644
--- a/xarray/namedarray/core.py
+++ b/xarray/namedarray/core.py
@@ -4,11 +4,10 @@
import math
import sys
import warnings
-from collections.abc import Hashable, Iterable, Mapping, Sequence
+from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence
from typing import (
TYPE_CHECKING,
Any,
- Callable,
Generic,
Literal,
TypeVar,
@@ -804,7 +803,7 @@ def chunk(
)
chunks = {}
- if isinstance(chunks, (float, str, int, tuple, list)):
+ if isinstance(chunks, float | str | int | tuple | list):
# TODO we shouldn't assume here that other chunkmanagers can handle these types
# TODO should we call normalize_chunks here?
pass # dask.array.from_array can handle these directly
diff --git a/xarray/namedarray/daskmanager.py b/xarray/namedarray/daskmanager.py
index 963d12fd865..a056f4e00bd 100644
--- a/xarray/namedarray/daskmanager.py
+++ b/xarray/namedarray/daskmanager.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-from collections.abc import Iterable, Sequence
-from typing import TYPE_CHECKING, Any, Callable
+from collections.abc import Callable, Iterable, Sequence
+from typing import TYPE_CHECKING, Any
import numpy as np
from packaging.version import Version
diff --git a/xarray/namedarray/dtypes.py b/xarray/namedarray/dtypes.py
index 7a83bd17064..a29fbdfd41c 100644
--- a/xarray/namedarray/dtypes.py
+++ b/xarray/namedarray/dtypes.py
@@ -1,13 +1,7 @@
from __future__ import annotations
import functools
-import sys
-from typing import Any, Literal
-
-if sys.version_info >= (3, 10):
- from typing import TypeGuard
-else:
- from typing_extensions import TypeGuard
+from typing import Any, Literal, TypeGuard
import numpy as np
diff --git a/xarray/namedarray/parallelcompat.py b/xarray/namedarray/parallelcompat.py
index dd555fe200a..3394aca9a5e 100644
--- a/xarray/namedarray/parallelcompat.py
+++ b/xarray/namedarray/parallelcompat.py
@@ -7,11 +7,10 @@
from __future__ import annotations
import functools
-import sys
from abc import ABC, abstractmethod
-from collections.abc import Iterable, Sequence
+from collections.abc import Callable, Iterable, Sequence
from importlib.metadata import EntryPoint, entry_points
-from typing import TYPE_CHECKING, Any, Callable, Generic, Protocol, TypeVar
+from typing import TYPE_CHECKING, Any, Generic, Protocol, TypeVar
import numpy as np
@@ -56,15 +55,8 @@ def list_chunkmanagers() -> dict[str, ChunkManagerEntrypoint[Any]]:
chunkmanagers : dict
Dictionary whose values are registered ChunkManagerEntrypoint subclass instances, and whose values
are the strings under which they are registered.
-
- Notes
- -----
- # New selection mechanism introduced with Python 3.10. See GH6514.
"""
- if sys.version_info >= (3, 10):
- entrypoints = entry_points(group="xarray.chunkmanagers")
- else:
- entrypoints = entry_points().get("xarray.chunkmanagers", ())
+ entrypoints = entry_points(group="xarray.chunkmanagers")
return load_chunkmanagers(entrypoints)
diff --git a/xarray/namedarray/utils.py b/xarray/namedarray/utils.py
index b82a80b546a..e3a4f6ba1ad 100644
--- a/xarray/namedarray/utils.py
+++ b/xarray/namedarray/utils.py
@@ -1,7 +1,6 @@
from __future__ import annotations
import importlib
-import sys
import warnings
from collections.abc import Hashable, Iterable, Iterator, Mapping
from functools import lru_cache
@@ -13,10 +12,7 @@
from xarray.namedarray._typing import ErrorOptionsWithWarn, _DimsLike
if TYPE_CHECKING:
- if sys.version_info >= (3, 10):
- from typing import TypeGuard
- else:
- from typing_extensions import TypeGuard
+ from typing import TypeGuard
from numpy.typing import NDArray
diff --git a/xarray/plot/dataarray_plot.py b/xarray/plot/dataarray_plot.py
index ed752d3461f..ae10c3e9920 100644
--- a/xarray/plot/dataarray_plot.py
+++ b/xarray/plot/dataarray_plot.py
@@ -2,8 +2,8 @@
import functools
import warnings
-from collections.abc import Hashable, Iterable, MutableMapping
-from typing import TYPE_CHECKING, Any, Callable, Literal, Union, cast, overload
+from collections.abc import Callable, Hashable, Iterable, MutableMapping
+from typing import TYPE_CHECKING, Any, Literal, Union, cast, overload
import numpy as np
import pandas as pd
diff --git a/xarray/plot/dataset_plot.py b/xarray/plot/dataset_plot.py
index 96b59f6174e..8ad3b4a6296 100644
--- a/xarray/plot/dataset_plot.py
+++ b/xarray/plot/dataset_plot.py
@@ -3,8 +3,8 @@
import functools
import inspect
import warnings
-from collections.abc import Hashable, Iterable
-from typing import TYPE_CHECKING, Any, Callable, TypeVar, overload
+from collections.abc import Callable, Hashable, Iterable
+from typing import TYPE_CHECKING, Any, TypeVar, overload
from xarray.core.alignment import broadcast
from xarray.plot import dataarray_plot
diff --git a/xarray/plot/facetgrid.py b/xarray/plot/facetgrid.py
index 613362ed5d5..4c0d9b96a03 100644
--- a/xarray/plot/facetgrid.py
+++ b/xarray/plot/facetgrid.py
@@ -3,8 +3,8 @@
import functools
import itertools
import warnings
-from collections.abc import Hashable, Iterable, MutableMapping
-from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, TypeVar, cast
+from collections.abc import Callable, Hashable, Iterable, MutableMapping
+from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, cast
import numpy as np
diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py
index a0abe0c8cfd..a037123a46f 100644
--- a/xarray/plot/utils.py
+++ b/xarray/plot/utils.py
@@ -3,10 +3,17 @@
import itertools
import textwrap
import warnings
-from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence
+from collections.abc import (
+ Callable,
+ Hashable,
+ Iterable,
+ Mapping,
+ MutableMapping,
+ Sequence,
+)
from datetime import date, datetime
from inspect import getfullargspec
-from typing import TYPE_CHECKING, Any, Callable, Literal, overload
+from typing import TYPE_CHECKING, Any, Literal, overload
import numpy as np
import pandas as pd
@@ -119,7 +126,7 @@ def _color_palette(cmap, n_colors):
from matplotlib.colors import ListedColormap
colors_i = np.linspace(0, 1.0, n_colors)
- if isinstance(cmap, (list, tuple)):
+ if isinstance(cmap, list | tuple):
# we have a list of colors
cmap = ListedColormap(cmap, N=n_colors)
pal = cmap(colors_i)
@@ -925,7 +932,7 @@ def _process_cmap_cbar_kwargs(
# we should not be getting a list of colors in cmap anymore
# is there a better way to do this test?
- if isinstance(cmap, (list, tuple)):
+ if isinstance(cmap, list | tuple):
raise ValueError(
"Specifying a list of colors in cmap is deprecated. "
"Use colors keyword instead."
diff --git a/xarray/testing/assertions.py b/xarray/testing/assertions.py
index 2a4c17e115a..5bc9c8c1016 100644
--- a/xarray/testing/assertions.py
+++ b/xarray/testing/assertions.py
@@ -3,7 +3,7 @@
import functools
import warnings
from collections.abc import Hashable
-from typing import Union, overload
+from typing import overload
import numpy as np
import pandas as pd
@@ -98,7 +98,7 @@ def assert_isomorphic(a: DataTree, b: DataTree, from_root: bool = False):
def maybe_transpose_dims(a, b, check_dim_order: bool):
"""Helper for assert_equal/allclose/identical"""
__tracebackhide__ = True
- if not isinstance(a, (Variable, DataArray, Dataset)):
+ if not isinstance(a, Variable | DataArray | Dataset):
return b
if not check_dim_order and set(a.dims) == set(b.dims):
# Ensure transpose won't fail if a dimension is missing
@@ -152,7 +152,7 @@ def assert_equal(a, b, from_root=True, check_dim_order: bool = True):
type(a) == type(b) or isinstance(a, Coordinates) and isinstance(b, Coordinates)
)
b = maybe_transpose_dims(a, b, check_dim_order)
- if isinstance(a, (Variable, DataArray)):
+ if isinstance(a, Variable | DataArray):
assert a.equals(b), formatting.diff_array_repr(a, b, "equals")
elif isinstance(a, Dataset):
assert a.equals(b), formatting.diff_dataset_repr(a, b, "equals")
@@ -213,7 +213,7 @@ def assert_identical(a, b, from_root=True):
elif isinstance(a, DataArray):
assert a.name == b.name
assert a.identical(b), formatting.diff_array_repr(a, b, "identical")
- elif isinstance(a, (Dataset, Variable)):
+ elif isinstance(a, Dataset | Variable):
assert a.identical(b), formatting.diff_dataset_repr(a, b, "identical")
elif isinstance(a, Coordinates):
assert a.identical(b), formatting.diff_coords_repr(a, b, "identical")
@@ -429,10 +429,10 @@ def _assert_variable_invariants(var: Variable, name: Hashable = None):
var._dims,
var._data.shape,
)
- assert isinstance(var._encoding, (type(None), dict)), name_or_empty + (
+ assert isinstance(var._encoding, type(None) | dict), name_or_empty + (
var._encoding,
)
- assert isinstance(var._attrs, (type(None), dict)), name_or_empty + (var._attrs,)
+ assert isinstance(var._attrs, type(None) | dict), name_or_empty + (var._attrs,)
def _assert_dataarray_invariants(da: DataArray, check_default_indexes: bool):
@@ -491,12 +491,12 @@ def _assert_dataset_invariants(ds: Dataset, check_default_indexes: bool):
ds._indexes, ds._variables, ds._dims, check_default=check_default_indexes
)
- assert isinstance(ds._encoding, (type(None), dict))
- assert isinstance(ds._attrs, (type(None), dict))
+ assert isinstance(ds._encoding, type(None) | dict)
+ assert isinstance(ds._attrs, type(None) | dict)
def _assert_internal_invariants(
- xarray_obj: Union[DataArray, Dataset, Variable], check_default_indexes: bool
+ xarray_obj: DataArray | Dataset | Variable, check_default_indexes: bool
):
"""Validate that an xarray object satisfies its own internal invariants.
diff --git a/xarray/testing/strategies.py b/xarray/testing/strategies.py
index 085b70e518b..b76733d113f 100644
--- a/xarray/testing/strategies.py
+++ b/xarray/testing/strategies.py
@@ -1,5 +1,5 @@
from collections.abc import Hashable, Iterable, Mapping, Sequence
-from typing import TYPE_CHECKING, Any, Protocol, Union, overload
+from typing import TYPE_CHECKING, Any, Protocol, overload
try:
import hypothesis.strategies as st
@@ -141,7 +141,7 @@ def dimension_sizes(
min_dims: int = 0,
max_dims: int = 3,
min_side: int = 1,
- max_side: Union[int, None] = None,
+ max_side: int | None = None,
) -> st.SearchStrategy[Mapping[Hashable, int]]:
"""
Generates an arbitrary mapping from dimension names to lengths.
@@ -224,11 +224,8 @@ def attrs() -> st.SearchStrategy[Mapping[Hashable, Any]]:
def variables(
draw: st.DrawFn,
*,
- array_strategy_fn: Union[ArrayStrategyFn, None] = None,
- dims: Union[
- st.SearchStrategy[Union[Sequence[Hashable], Mapping[Hashable, int]]],
- None,
- ] = None,
+ array_strategy_fn: ArrayStrategyFn | None = None,
+ dims: st.SearchStrategy[Sequence[Hashable] | Mapping[Hashable, int]] | None = None,
dtype: st.SearchStrategy[np.dtype] = supported_dtypes(),
attrs: st.SearchStrategy[Mapping] = attrs(),
) -> xr.Variable:
@@ -354,7 +351,7 @@ def variables(
valid_shapes = npst.array_shapes(min_dims=len(_dims), max_dims=len(_dims))
_shape = draw(valid_shapes)
array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
- elif isinstance(_dims, (Mapping, dict)):
+ elif isinstance(_dims, Mapping | dict):
# should be a mapping of form {dim_names: lengths}
dim_names, _shape = list(_dims.keys()), tuple(_dims.values())
array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
@@ -394,7 +391,7 @@ def unique_subset_of(
objs: Sequence[Hashable],
*,
min_size: int = 0,
- max_size: Union[int, None] = None,
+ max_size: int | None = None,
) -> st.SearchStrategy[Sequence[Hashable]]: ...
@@ -403,18 +400,18 @@ def unique_subset_of(
objs: Mapping[Hashable, Any],
*,
min_size: int = 0,
- max_size: Union[int, None] = None,
+ max_size: int | None = None,
) -> st.SearchStrategy[Mapping[Hashable, Any]]: ...
@st.composite
def unique_subset_of(
draw: st.DrawFn,
- objs: Union[Sequence[Hashable], Mapping[Hashable, Any]],
+ objs: Sequence[Hashable] | Mapping[Hashable, Any],
*,
min_size: int = 0,
- max_size: Union[int, None] = None,
-) -> Union[Sequence[Hashable], Mapping[Hashable, Any]]:
+ max_size: int | None = None,
+) -> Sequence[Hashable] | Mapping[Hashable, Any]:
"""
Return a strategy which generates a unique subset of the given objects.
diff --git a/xarray/tests/arrays.py b/xarray/tests/arrays.py
index 983e620d1f0..4e1e31cfa49 100644
--- a/xarray/tests/arrays.py
+++ b/xarray/tests/arrays.py
@@ -1,5 +1,5 @@
-from collections.abc import Iterable
-from typing import Any, Callable
+from collections.abc import Callable, Iterable
+from typing import Any
import numpy as np
diff --git a/xarray/tests/test_accessor_str.py b/xarray/tests/test_accessor_str.py
index e0c9619b4e7..d7838ff0667 100644
--- a/xarray/tests/test_accessor_str.py
+++ b/xarray/tests/test_accessor_str.py
@@ -39,7 +39,7 @@
from __future__ import annotations
import re
-from typing import Callable
+from collections.abc import Callable
import numpy as np
import pytest
diff --git a/xarray/tests/test_cftime_offsets.py b/xarray/tests/test_cftime_offsets.py
index cec4ea4c944..c4f2f51bd33 100644
--- a/xarray/tests/test_cftime_offsets.py
+++ b/xarray/tests/test_cftime_offsets.py
@@ -1,8 +1,9 @@
from __future__ import annotations
import warnings
+from collections.abc import Callable
from itertools import product
-from typing import Callable, Literal
+from typing import Literal
import numpy as np
import pandas as pd
@@ -978,9 +979,9 @@ def test_onOffset_month_or_quarter_or_year_end(
def test_rollforward(calendar, offset, initial_date_args, partial_expected_date_args):
date_type = get_date_type(calendar)
initial = date_type(*initial_date_args)
- if isinstance(offset, (MonthBegin, QuarterBegin, YearBegin)):
+ if isinstance(offset, MonthBegin | QuarterBegin | YearBegin):
expected_date_args = partial_expected_date_args + (1,)
- elif isinstance(offset, (MonthEnd, QuarterEnd, YearEnd)):
+ elif isinstance(offset, MonthEnd | QuarterEnd | YearEnd):
reference_args = partial_expected_date_args + (1,)
reference = date_type(*reference_args)
expected_date_args = partial_expected_date_args + (reference.daysinmonth,)
@@ -1029,9 +1030,9 @@ def test_rollforward(calendar, offset, initial_date_args, partial_expected_date_
def test_rollback(calendar, offset, initial_date_args, partial_expected_date_args):
date_type = get_date_type(calendar)
initial = date_type(*initial_date_args)
- if isinstance(offset, (MonthBegin, QuarterBegin, YearBegin)):
+ if isinstance(offset, MonthBegin | QuarterBegin | YearBegin):
expected_date_args = partial_expected_date_args + (1,)
- elif isinstance(offset, (MonthEnd, QuarterEnd, YearEnd)):
+ elif isinstance(offset, MonthEnd | QuarterEnd | YearEnd):
reference_args = partial_expected_date_args + (1,)
reference = date_type(*reference_args)
expected_date_args = partial_expected_date_args + (reference.daysinmonth,)
diff --git a/xarray/tests/test_computation.py b/xarray/tests/test_computation.py
index e974b8b1ac8..ab3108e7056 100644
--- a/xarray/tests/test_computation.py
+++ b/xarray/tests/test_computation.py
@@ -500,7 +500,7 @@ def func(x):
return np.stack([x, -x], axis=-1)
result = apply_ufunc(func, obj, output_core_dims=[["sign"]])
- if isinstance(result, (xr.Dataset, xr.DataArray)):
+ if isinstance(result, xr.Dataset | xr.DataArray):
result.coords["sign"] = [1, -1]
return result
@@ -527,7 +527,7 @@ def func(x):
return (x, np.stack([x, -x], axis=-1))
result = apply_ufunc(func, obj, output_core_dims=[[], ["sign"]])
- if isinstance(result[1], (xr.Dataset, xr.DataArray)):
+ if isinstance(result[1], xr.Dataset | xr.DataArray):
result[1].coords["sign"] = [1, -1]
return result
@@ -568,7 +568,7 @@ def func(*x):
output_core_dims=[[dim]],
exclude_dims={dim},
)
- if isinstance(result, (xr.Dataset, xr.DataArray)):
+ if isinstance(result, xr.Dataset | xr.DataArray):
# note: this will fail if dim is not a coordinate on any input
new_coord = np.concatenate([obj.coords[dim] for obj in objects])
result.coords[dim] = new_coord
diff --git a/xarray/tests/test_concat.py b/xarray/tests/test_concat.py
index 8b2a7ec5d28..e0dc105c925 100644
--- a/xarray/tests/test_concat.py
+++ b/xarray/tests/test_concat.py
@@ -1,7 +1,8 @@
from __future__ import annotations
+from collections.abc import Callable
from copy import deepcopy
-from typing import TYPE_CHECKING, Any, Callable, Literal
+from typing import TYPE_CHECKING, Any, Literal
import numpy as np
import pandas as pd
diff --git a/xarray/tests/test_indexing.py b/xarray/tests/test_indexing.py
index f019d3c789c..985fb02a87e 100644
--- a/xarray/tests/test_indexing.py
+++ b/xarray/tests/test_indexing.py
@@ -333,7 +333,7 @@ def test_lazily_indexed_array(self) -> None:
# make sure actual.key is appropriate type
if all(
- isinstance(k, (int, slice)) for k in v_lazy._data.key.tuple
+ isinstance(k, int | slice) for k in v_lazy._data.key.tuple
):
assert isinstance(v_lazy._data.key, indexing.BasicIndexer)
else:
@@ -364,10 +364,7 @@ def test_lazily_indexed_array(self) -> None:
assert_array_equal(expected_b.transpose(*order), transposed)
assert isinstance(
actual._data,
- (
- indexing.LazilyVectorizedIndexedArray,
- indexing.LazilyIndexedArray,
- ),
+ indexing.LazilyVectorizedIndexedArray | indexing.LazilyIndexedArray,
)
assert isinstance(actual._data, indexing.LazilyIndexedArray)
@@ -388,10 +385,7 @@ def check_indexing(v_eager, v_lazy, indexers):
assert expected.shape == actual.shape
assert isinstance(
actual._data,
- (
- indexing.LazilyVectorizedIndexedArray,
- indexing.LazilyIndexedArray,
- ),
+ indexing.LazilyVectorizedIndexedArray | indexing.LazilyIndexedArray,
)
assert_array_equal(expected, actual)
v_eager = expected
diff --git a/xarray/tests/test_plot.py b/xarray/tests/test_plot.py
index a973f6b11f7..680a98a6500 100644
--- a/xarray/tests/test_plot.py
+++ b/xarray/tests/test_plot.py
@@ -3,10 +3,10 @@
import contextlib
import inspect
import math
-from collections.abc import Generator, Hashable
+from collections.abc import Callable, Generator, Hashable
from copy import copy
from datetime import date, timedelta
-from typing import Any, Callable, Literal, cast
+from typing import Any, Literal, cast
import numpy as np
import pandas as pd
@@ -175,7 +175,7 @@ def contourf_called(self, plotmethod) -> bool:
# Compatible with mpl before (PathCollection) and after (QuadContourSet) 3.8
def matchfunc(x) -> bool:
return isinstance(
- x, (mpl.collections.PathCollection, mpl.contour.QuadContourSet)
+ x, mpl.collections.PathCollection | mpl.contour.QuadContourSet
)
paths = plt.gca().findobj(matchfunc)
diff --git a/xarray/tests/test_plugins.py b/xarray/tests/test_plugins.py
index 46051f040ce..a0d742c3159 100644
--- a/xarray/tests/test_plugins.py
+++ b/xarray/tests/test_plugins.py
@@ -1,12 +1,7 @@
from __future__ import annotations
import sys
-from importlib.metadata import EntryPoint
-
-if sys.version_info >= (3, 10):
- from importlib.metadata import EntryPoints
-else:
- EntryPoints = list[EntryPoint]
+from importlib.metadata import EntryPoint, EntryPoints
from unittest import mock
import pytest
@@ -288,10 +283,7 @@ def test_refresh_engines() -> None:
EntryPointMock1.name = "test1"
EntryPointMock1.load.return_value = DummyBackendEntrypoint1
- if sys.version_info >= (3, 10):
- return_value = EntryPoints([EntryPointMock1])
- else:
- return_value = {"xarray.backends": [EntryPointMock1]}
+ return_value = EntryPoints([EntryPointMock1])
with mock.patch("xarray.backends.plugins.entry_points", return_value=return_value):
list_engines.cache_clear()
@@ -303,10 +295,7 @@ def test_refresh_engines() -> None:
EntryPointMock2.name = "test2"
EntryPointMock2.load.return_value = DummyBackendEntrypoint2
- if sys.version_info >= (3, 10):
- return_value2 = EntryPoints([EntryPointMock2])
- else:
- return_value2 = {"xarray.backends": [EntryPointMock2]}
+ return_value2 = EntryPoints([EntryPointMock2])
with mock.patch("xarray.backends.plugins.entry_points", return_value=return_value2):
refresh_engines()
diff --git a/xarray/tests/test_ufuncs.py b/xarray/tests/test_ufuncs.py
index 6b4c3f38ee9..20e064e2013 100644
--- a/xarray/tests/test_ufuncs.py
+++ b/xarray/tests/test_ufuncs.py
@@ -10,7 +10,7 @@
def assert_identical(a, b):
assert type(a) is type(b) or float(a) == float(b)
- if isinstance(a, (xr.DataArray, xr.Dataset, xr.Variable)):
+ if isinstance(a, xr.DataArray | xr.Dataset | xr.Variable):
assert_identical_(a, b)
else:
assert_array_equal(a, b)
diff --git a/xarray/tests/test_units.py b/xarray/tests/test_units.py
index 0e8fbe9198b..232a35d8ea0 100644
--- a/xarray/tests/test_units.py
+++ b/xarray/tests/test_units.py
@@ -44,7 +44,7 @@
def is_compatible(unit1, unit2):
def dimensionality(obj):
- if isinstance(obj, (unit_registry.Quantity, unit_registry.Unit)):
+ if isinstance(obj, unit_registry.Quantity | unit_registry.Unit):
unit_like = obj
else:
unit_like = unit_registry.dimensionless
@@ -75,7 +75,7 @@ def zip_mappings(*mappings):
def array_extract_units(obj):
- if isinstance(obj, (xr.Variable, xr.DataArray, xr.Dataset)):
+ if isinstance(obj, xr.Variable | xr.DataArray | xr.Dataset):
obj = obj.data
try:
@@ -163,7 +163,7 @@ def strip_units(obj):
new_obj = obj.copy(data=data)
elif isinstance(obj, unit_registry.Quantity):
new_obj = obj.magnitude
- elif isinstance(obj, (list, tuple)):
+ elif isinstance(obj, list | tuple):
return type(obj)(strip_units(elem) for elem in obj)
else:
new_obj = obj
@@ -172,7 +172,7 @@ def strip_units(obj):
def attach_units(obj, units):
- if not isinstance(obj, (xr.DataArray, xr.Dataset, xr.Variable)):
+ if not isinstance(obj, xr.DataArray | xr.Dataset | xr.Variable):
units = units.get("data", None) or units.get(None, None) or 1
return array_attach_units(obj, units)
@@ -1580,11 +1580,11 @@ def test_numpy_methods(self, func, unit, error, dtype):
variable = xr.Variable("x", array)
args = [
- item * unit if isinstance(item, (int, float, list)) else item
+ item * unit if isinstance(item, int | float | list) else item
for item in func.args
]
kwargs = {
- key: value * unit if isinstance(value, (int, float, list)) else value
+ key: value * unit if isinstance(value, int | float | list) else value
for key, value in func.kwargs.items()
}
@@ -1633,7 +1633,7 @@ def test_raw_numpy_methods(self, func, unit, error, dtype):
args = [
(
item * unit
- if isinstance(item, (int, float, list)) and func.name != "item"
+ if isinstance(item, int | float | list) and func.name != "item"
else item
)
for item in func.args
@@ -1641,7 +1641,7 @@ def test_raw_numpy_methods(self, func, unit, error, dtype):
kwargs = {
key: (
value * unit
- if isinstance(value, (int, float, list)) and func.name != "item"
+ if isinstance(value, int | float | list) and func.name != "item"
else value
)
for key, value in func.kwargs.items()
@@ -3308,7 +3308,7 @@ def test_sel(self, raw_values, unit, error, dtype):
values = raw_values * unit
if error is not None and not (
- isinstance(raw_values, (int, float)) and x.check(unit)
+ isinstance(raw_values, int | float) and x.check(unit)
):
with pytest.raises(error):
data_array.sel(x=values)
@@ -3353,7 +3353,7 @@ def test_loc(self, raw_values, unit, error, dtype):
values = raw_values * unit
if error is not None and not (
- isinstance(raw_values, (int, float)) and x.check(unit)
+ isinstance(raw_values, int | float) and x.check(unit)
):
with pytest.raises(error):
data_array.loc[{"x": values}]
@@ -3398,7 +3398,7 @@ def test_drop_sel(self, raw_values, unit, error, dtype):
values = raw_values * unit
if error is not None and not (
- isinstance(raw_values, (int, float)) and x.check(unit)
+ isinstance(raw_values, int | float) and x.check(unit)
):
with pytest.raises(error):
data_array.drop_sel(x=values)
@@ -3808,7 +3808,7 @@ def test_computation(self, func, variant, dtype, compute_backend):
# we want to make sure the output unit is correct
units = extract_units(data_array)
- if not isinstance(func, (function, method)):
+ if not isinstance(func, function | method):
units.update(extract_units(func(array.reshape(-1))))
with xr.set_options(use_opt_einsum=False):
diff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py
index ff6522c00eb..d1d5d5ada55 100644
--- a/xarray/tests/test_variable.py
+++ b/xarray/tests/test_variable.py
@@ -2956,7 +2956,7 @@ def test_from_pint_wrapping_dask(self, Var):
ids=lambda x: f"{x}",
)
def test_datetime_conversion_warning(values, warns) -> None:
- dims = ["time"] if isinstance(values, (np.ndarray, pd.Index, pd.Series)) else []
+ dims = ["time"] if isinstance(values, np.ndarray | pd.Index | pd.Series) else []
if warns:
with pytest.warns(UserWarning, match="non-nanosecond precision datetime"):
var = Variable(dims, values)
@@ -3031,7 +3031,7 @@ def test_pandas_two_only_datetime_conversion_warnings(
ids=lambda x: f"{x}",
)
def test_timedelta_conversion_warning(values, warns) -> None:
- dims = ["time"] if isinstance(values, (np.ndarray, pd.Index)) else []
+ dims = ["time"] if isinstance(values, np.ndarray | pd.Index) else []
if warns:
with pytest.warns(UserWarning, match="non-nanosecond precision timedelta"):
var = Variable(dims, values)
diff --git a/xarray/util/deprecation_helpers.py b/xarray/util/deprecation_helpers.py
index 3d52253ed6e..c8e594508d9 100644
--- a/xarray/util/deprecation_helpers.py
+++ b/xarray/util/deprecation_helpers.py
@@ -33,8 +33,9 @@
import inspect
import warnings
+from collections.abc import Callable
from functools import wraps
-from typing import Callable, TypeVar
+from typing import TypeVar
from xarray.core.utils import emit_user_level_warning
diff --git a/xarray/util/generate_ops.py b/xarray/util/generate_ops.py
index a9f66cdc614..c9d31111353 100644
--- a/xarray/util/generate_ops.py
+++ b/xarray/util/generate_ops.py
@@ -13,7 +13,6 @@
from __future__ import annotations
from collections.abc import Iterator, Sequence
-from typing import Optional
BINOPS_EQNE = (("__eq__", "nputils.array_eq"), ("__ne__", "nputils.array_ne"))
BINOPS_CMP = (
@@ -139,7 +138,7 @@ def _type_ignore(ignore: str) -> str:
return f" # type:ignore[{ignore}]" if ignore else ""
-FuncType = Sequence[tuple[Optional[str], Optional[str]]]
+FuncType = Sequence[tuple[str | None, str | None]]
OpsType = tuple[FuncType, str, dict[str, str]]