diff --git a/.github/workflows/ci-additional.yaml b/.github/workflows/ci-additional.yaml
index 4bf85458211..5d9c94d639c 100644
--- a/.github/workflows/ci-additional.yaml
+++ b/.github/workflows/ci-additional.yaml
@@ -42,7 +42,6 @@ jobs:
"py37-min-all-deps",
"py37-min-nep18",
"py38-all-but-dask",
- "py38-backend-api-v2",
"py38-flaky",
]
steps:
@@ -56,12 +55,7 @@ jobs:
- name: Set environment variables
run: |
- if [[ ${{ matrix.env }} == "py38-backend-api-v2" ]] ;
- then
- echo "CONDA_ENV_FILE=ci/requirements/environment.yml" >> $GITHUB_ENV
- echo "XARRAY_BACKEND_API=v2" >> $GITHUB_ENV
-
- elif [[ ${{ matrix.env }} == "py38-flaky" ]] ;
+ if [[ ${{ matrix.env }} == "py38-flaky" ]] ;
then
echo "CONDA_ENV_FILE=ci/requirements/environment.yml" >> $GITHUB_ENV
echo "PYTEST_EXTRA_FLAGS=--run-flaky --run-network-tests" >> $GITHUB_ENV
diff --git a/ci/requirements/py37-min-all-deps.yml b/ci/requirements/py37-min-all-deps.yml
index c2fd2e18a8a..7d04f431935 100644
--- a/ci/requirements/py37-min-all-deps.yml
+++ b/ci/requirements/py37-min-all-deps.yml
@@ -25,7 +25,9 @@ dependencies:
- lxml=4.5 # Optional dep of pydap
- matplotlib-base=3.1
- nc-time-axis=1.2
- - netcdf4=1.5
+# netcdf follows a 1.major.minor[.patch] convention (see https://github.com/Unidata/netcdf4-python/issues/1090)
+# bumping the netCDF4 version is currently blocked by #4491
+ - netcdf4=1.5.3
- numba=0.48
- numpy=1.17
- pandas=1.0
diff --git a/doc/api-hidden.rst b/doc/api-hidden.rst
index 14d79039a3a..1df9cc0a1f9 100644
--- a/doc/api-hidden.rst
+++ b/doc/api-hidden.rst
@@ -811,3 +811,28 @@
backends.DummyFileManager.acquire
backends.DummyFileManager.acquire_context
backends.DummyFileManager.close
+
+ backends.common.BackendArray
+ backends.common.BackendEntrypoint
+ backends.common.BackendEntrypoint.guess_can_open
+ backends.common.BackendEntrypoint.open_dataset
+
+ core.indexing.IndexingSupport
+ core.indexing.explicit_indexing_adapter
+ core.indexing.BasicIndexer
+ core.indexing.OuterIndexer
+ core.indexing.VectorizedIndexer
+ core.indexing.LazilyIndexedArray
+ core.indexing.LazilyVectorizedIndexedArray
+
+ conventions.decode_cf_variables
+
+ coding.variables.UnsignedIntegerCoder
+ coding.variables.CFMaskCoder
+ coding.variables.CFScaleOffsetCoder
+
+ coding.strings.CharacterArrayCoder
+ coding.strings.EncodedStringCoder
+
+ coding.times.CFTimedeltaCoder
+ coding.times.CFDatetimeCoder
diff --git a/doc/internals.rst b/doc/internals.rst
index f3d67de9077..f94371731de 100644
--- a/doc/internals.rst
+++ b/doc/internals.rst
@@ -248,3 +248,460 @@ re-open it directly with Zarr:
zgroup = zarr.open("rasm.zarr")
print(zgroup.tree())
dict(zgroup["Tair"].attrs)
+
+.. _add_a_backend:
+
+How to add a new backend
+------------------------
+
+Adding a new backend for read support to Xarray does not require
+to integrate any code in Xarray; all you need to do is:
+
+- Create a class that inherits from Xarray :py:class:`~xarray.backends.common.BackendEntrypoint`
+ and implements the method ``open_dataset`` see :ref:`RST backend_entrypoint`
+
+- Declare this class as an external plugin in your ``setup.py``, see :ref:`RST backend_registration`
+
+If you also want to support lazy loading and dask see :ref:`RST lazy_loading`.
+
+Note that the new interface for backends is available from Xarray
+version >= 0.18 onwards.
+
+.. _RST backend_entrypoint:
+
+BackendEntrypoint subclassing
++++++++++++++++++++++++++++++
+
+Your ``BackendEntrypoint`` sub-class is the primary interface with Xarray, and
+it should implement the following attributes and methods:
+
+- the ``open_dataset`` method (mandatory)
+- the ``open_dataset_parameters`` attribute (optional)
+- the ``guess_can_open`` method (optional).
+
+This is what a ``BackendEntrypoint`` subclass should look like:
+
+.. code-block:: python
+
+ class MyBackendEntrypoint(BackendEntrypoint):
+ def open_dataset(
+ self,
+ filename_or_obj,
+ *,
+ drop_variables=None,
+ # other backend specific keyword arguments
+ ):
+ ...
+ return ds
+
+ open_dataset_parameters = ["filename_or_obj", "drop_variables"]
+
+ def guess_can_open(self, filename_or_obj):
+ try:
+ _, ext = os.path.splitext(filename_or_obj)
+ except TypeError:
+ return False
+ return ext in {...}
+
+``BackendEntrypoint`` subclass methods and attributes are detailed in the following.
+
+.. _RST open_dataset:
+
+open_dataset
+^^^^^^^^^^^^
+
+The backend ``open_dataset`` shall implement reading from file, the variables
+decoding and it shall instantiate the output Xarray class :py:class:`~xarray.Dataset`.
+
+The following is an example of the high level processing steps:
+
+.. code-block:: python
+
+ def open_dataset(
+ self,
+ filename_or_obj,
+ *,
+ drop_variables=None,
+ decode_times=True,
+ decode_timedelta=True,
+ decode_coords=True,
+ my_backend_param=None,
+ ):
+ vars, attrs, coords = my_reader(
+ filename_or_obj,
+ drop_variables=drop_variables,
+ my_backend_param=my_backend_param,
+ )
+ vars, attrs, coords = my_decode_variables(
+ vars, attrs, decode_times, decode_timedelta, decode_coords
+ ) # see also conventions.decode_cf_variables
+
+ ds = xr.Dataset(vars, attrs=attrs)
+ ds = ds.set_coords(coords)
+ ds.set_close(store.close)
+
+ return ds
+
+
+The output :py:class:`~xarray.Dataset` shall implement the additional custom method
+``close``, used by Xarray to ensure the related files are eventually closed. This
+method shall be set by using :py:meth:`~xarray.Dataset.set_close`.
+
+
+The input of ``open_dataset`` method are one argument
+(``filename``) and one keyword argument (``drop_variables``):
+
+- ``filename``: can be a string containing a path or an instance of
+ :py:class:`pathlib.Path`.
+- ``drop_variables``: can be `None` or an iterable containing the variable
+ names to be dropped when reading the data.
+
+If it makes sense for your backend, your ``open_dataset`` method
+should implement in its interface the following boolean keyword arguments, called
+**decoders**, which default to ``None``:
+
+- ``mask_and_scale``
+- ``decode_times``
+- ``decode_timedelta``
+- ``use_cftime``
+- ``concat_characters``
+- ``decode_coords``
+
+Note: all the supported decoders shall be declared explicitly
+in backend ``open_dataset`` signature.
+
+These keyword arguments are explicitly defined in Xarray
+:py:func:`~xarray.open_dataset` signature. Xarray will pass them to the
+backend only if the User explicitly sets a value different from ``None``.
+For more details on decoders see :ref:`RST decoders`.
+
+Your backend can also take as input a set of backend-specific keyword
+arguments. All these keyword arguments can be passed to
+:py:func:`~xarray.open_dataset` grouped either via the ``backend_kwargs``
+parameter or explicitly using the syntax ``**kwargs``.
+
+
+If you don't want to support the lazy loading, then the
+:py:class:`~xarray.Dataset` shall contain values as a :py:class:`numpy.ndarray`
+and your work is almost done.
+
+.. _RST open_dataset_parameters:
+
+open_dataset_parameters
+^^^^^^^^^^^^^^^^^^^^^^^
+
+``open_dataset_parameters`` is the list of backend ``open_dataset`` parameters.
+It is not a mandatory parameter, and if the backend does not provide it
+explicitly, Xarray creates a list of them automatically by inspecting the
+backend signature.
+
+If ``open_dataset_parameters`` is not defined, but ``**kwargs`` and ``*args``
+are in the backend ``open_dataset`` signature, Xarray raises an error.
+On the other hand, if the backend provides the ``open_dataset_parameters``,
+then ``**kwargs`` and ``*args`` can be used in the signature.
+However, this practice is discouraged unless there is a good reasons for using
+``**kwargs`` or ``*args``.
+
+.. _RST guess_can_open:
+
+guess_can_open
+^^^^^^^^^^^^^^
+
+``guess_can_open`` is used to identify the proper engine to open your data
+file automatically in case the engine is not specified explicitly. If you are
+not interested in supporting this feature, you can skip this step since
+:py:class:`~xarray.backends.common.BackendEntrypoint` already provides a
+default :py:meth:`~xarray.backend.common.BackendEntrypoint.guess_can_open`
+that always returns ``False``.
+
+Backend ``guess_can_open`` takes as input the ``filename_or_obj`` parameter of
+Xarray :py:meth:`~xarray.open_dataset`, and returns a boolean.
+
+.. _RST decoders:
+
+Decoders
+^^^^^^^^
+The decoders implement specific operations to transform data from on-disk
+representation to Xarray representation.
+
+A classic example is the “time” variable decoding operation. In NetCDF, the
+elements of the “time” variable are stored as integers, and the unit contains
+an origin (for example: "seconds since 1970-1-1"). In this case, Xarray
+transforms the pair integer-unit in a :py:class:`numpy.datetime64`.
+
+The standard coders implemented in Xarray are:
+
+- :py:class:`xarray.coding.strings.CharacterArrayCoder()`
+- :py:class:`xarray.coding.strings.EncodedStringCoder()`
+- :py:class:`xarray.coding.variables.UnsignedIntegerCoder()`
+- :py:class:`xarray.coding.variables.CFMaskCoder()`
+- :py:class:`xarray.coding.variables.CFScaleOffsetCoder()`
+- :py:class:`xarray.coding.times.CFTimedeltaCoder()`
+- :py:class:`xarray.coding.times.CFDatetimeCoder()`
+
+Xarray coders all have the same interface. They have two methods: ``decode``
+and ``encode``. The method ``decode`` takes a ``Variable`` in on-disk
+format and returns a ``Variable`` in Xarray format. Variable
+attributes no more applicable after the decoding, are dropped and stored in the
+``Variable.encoding`` to make them available to the ``encode`` method, which
+performs the inverse transformation.
+
+In the following an example on how to use the coders ``decode`` method:
+
+.. ipython:: python
+
+ var = xr.Variable(
+ dims=("x",), data=np.arange(10.0), attrs={"scale_factor": 10, "add_offset": 2}
+ )
+ var
+
+ coder = xr.coding.variables.CFScaleOffsetCoder()
+ decoded_var = coder.decode(var)
+ decoded_var
+ decoded_var.encoding
+
+Some of the transformations can be common to more backends, so before
+implementing a new decoder, be sure Xarray does not already implement that one.
+
+The backends can reuse Xarray’s decoders, either instantiating the coders
+and using the method ``decode`` directly or using the higher-level function
+:py:func:`~xarray.conventions.decode_cf_variables` that groups Xarray decoders.
+
+In some cases, the transformation to apply strongly depends on the on-disk
+data format. Therefore, you may need to implement your own decoder.
+
+An example of such a case is when you have to deal with the time format of a
+grib file. grib format is very different from the NetCDF one: in grib, the
+time is stored in two attributes dataDate and dataTime as strings. Therefore,
+it is not possible to reuse the Xarray time decoder, and implementing a new
+one is mandatory.
+
+Decoders can be activated or deactivated using the boolean keywords of
+Xarray :py:meth:`~xarray.open_dataset` signature: ``mask_and_scale``,
+``decode_times``, ``decode_timedelta``, ``use_cftime``,
+``concat_characters``, ``decode_coords``.
+Such keywords are passed to the backend only if the User sets a value
+different from ``None``. Note that the backend does not necessarily have to
+implement all the decoders, but it shall declare in its ``open_dataset``
+interface only the boolean keywords related to the supported decoders.
+
+.. _RST backend_registration:
+
+How to register a backend
++++++++++++++++++++++++++++
+
+Define a new entrypoint in your ``setup.py`` (or ``setup.cfg``) with:
+
+- group: ``xarray.backend``
+- name: the name to be passed to :py:meth:`~xarray.open_dataset` as ``engine``
+- object reference: the reference of the class that you have implemented.
+
+You can declare the entrypoint in ``setup.py`` using the following syntax:
+
+.. code-block::
+
+ setuptools.setup(
+ entry_points={
+ "xarray.backends": [
+ "engine_name=your_package.your_module:YourBackendEntryClass"
+ ],
+ },
+ )
+
+in ``setup.cfg``:
+
+.. code-block:: cfg
+
+ [options.entry_points]
+ xarray.backends =
+ engine_name = your_package.your_module:YourBackendEntryClass
+
+
+See https://packaging.python.org/specifications/entry-points/#data-model
+for more information
+
+If you are using [Poetry](https://python-poetry.org/) for your build system, you can accomplish the same thing using "plugins". In this case you would need to add the following to your ``pyproject.toml`` file:
+
+.. code-block:: toml
+
+ [tool.poetry.plugins."xarray_backends"]
+ "engine_name" = "your_package.your_module:YourBackendEntryClass"
+
+See https://python-poetry.org/docs/pyproject/#plugins for more information on Poetry plugins.
+
+.. _RST lazy_loading:
+
+How to support Lazy Loading
++++++++++++++++++++++++++++
+If you want to make your backend effective with big datasets, then you should
+support lazy loading.
+Basically, you shall replace the :py:class:`numpy.ndarray` inside the
+variables with a custom class that supports lazy loading indexing.
+See the example below:
+
+.. code-block:: python
+
+ backend_array = MyBackendArray()
+ data = indexing.LazilyIndexedArray(backend_array)
+ var = xr.Variable(dims, data, attrs=attrs, encoding=encoding)
+
+Where:
+
+- :py:class:`~xarray.core.indexing.LazilyIndexedArray` is a class
+ provided by Xarray that manages the lazy loading.
+- ``MyBackendArray`` shall be implemented by the backend and shall inherit
+ from :py:class:`~xarray.backends.common.BackendArray`.
+
+BackendArray subclassing
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The BackendArray subclass shall implement the following method and attributes:
+
+- the ``__getitem__`` method that takes in input an index and returns a
+ `NumPy `__ array
+- the ``shape`` attribute
+- the ``dtype`` attribute.
+
+
+Xarray supports different type of
+`indexing `__, that can be
+grouped in three types of indexes
+:py:class:`~xarray.core.indexing.BasicIndexer`,
+:py:class:`~xarray.core.indexing.OuterIndexer` and
+:py:class:`~xarray.core.indexing.VectorizedIndexer`.
+This implies that the implementation of the method ``__getitem__`` can be tricky.
+In oder to simplify this task, Xarray provides a helper function,
+:py:func:`~xarray.core.indexing.explicit_indexing_adapter`, that transforms
+all the input ``indexer`` types (`basic`, `outer`, `vectorized`) in a tuple
+which is interpreted correctly by your backend.
+
+This is an example ``BackendArray`` subclass implementation:
+
+.. code-block:: python
+
+ class MyBackendArray(BackendArray):
+ def __init__(
+ self,
+ shape,
+ dtype,
+ lock,
+ # other backend specific keyword arguments
+ ):
+ self.shape = shape
+ self.dtype = lock
+ self.lock = dtype
+
+ def __getitem__(
+ self, key: xarray.core.indexing.ExplicitIndexer
+ ) -> np.typing.ArrayLike:
+ return indexing.explicit_indexing_adapter(
+ key,
+ self.shape,
+ indexing.IndexingSupport.BASIC,
+ self._raw_indexing_method,
+ )
+
+ def _raw_indexing_method(self, key: tuple) -> np.typing.ArrayLike:
+ # thread safe method that access to data on disk
+ with self.lock:
+ ...
+ return item
+
+Note that ``BackendArray.__getitem__`` must be thread safe to support
+multi-thread processing.
+
+The :py:func:`~xarray.core.indexing.explicit_indexing_adapter` method takes in
+input the ``key``, the array ``shape`` and the following parameters:
+
+- ``indexing_support``: the type of index supported by ``raw_indexing_method``
+- ``raw_indexing_method``: a method that shall take in input a key in the form
+ of a tuple and return an indexed :py:class:`numpy.ndarray`.
+
+For more details see
+:py:class:`~xarray.core.indexing.IndexingSupport` and :ref:`RST indexing`.
+
+In order to support `Dask `__ distributed and
+:py:mod:`multiprocessing`, ``BackendArray`` subclass should be serializable
+either with :ref:`io.pickle` or
+`cloudpickle `__.
+That implies that all the reference to open files should be dropped. For
+opening files, we therefore suggest to use the helper class provided by Xarray
+:py:class:`~xarray.backends.CachingFileManager`.
+
+.. _RST indexing:
+
+Indexing Examples
+^^^^^^^^^^^^^^^^^
+**BASIC**
+
+In the ``BASIC`` indexing support, numbers and slices are supported.
+
+Example:
+
+.. ipython::
+ :verbatim:
+
+ In [1]: # () shall return the full array
+ ...: backend_array._raw_indexing_method(())
+ Out[1]: array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
+
+ In [2]: # shall support integers
+ ...: backend_array._raw_indexing_method(1, 1)
+ Out[2]: 5
+
+ In [3]: # shall support slices
+ ...: backend_array._raw_indexing_method(slice(0, 3), slice(2, 4))
+ Out[3]: array([[2, 3], [6, 7], [10, 11]])
+
+**OUTER**
+
+The ``OUTER`` indexing shall support number, slices and in addition it shall
+support also lists of integers. The the outer indexing is equivalent to
+combining multiple input list with ``itertools.product()``:
+
+.. ipython::
+ :verbatim:
+
+ In [1]: backend_array._raw_indexing_method([0, 1], [0, 1, 2])
+ Out[1]: array([[0, 1, 2], [4, 5, 6]])
+
+ # shall support integers
+ In [2]: backend_array._raw_indexing_method(1, 1)
+ Out[2]: 5
+
+
+**OUTER_1VECTOR**
+
+The ``OUTER_1VECTOR`` indexing shall supports number, slices and at most one
+list. The behaviour with the list shall be the same of ``OUTER`` indexing.
+
+If you support more complex indexing as `explicit indexing` or
+`numpy indexing`, you can have a look to the implemetation of Zarr backend and Scipy backend,
+currently available in :py:mod:`~xarray.backends` module.
+
+.. _RST preferred_chunks:
+
+Backend preferred chunks
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The backend is not directly involved in `Dask `__
+chunking, since it is internally managed by Xarray. However, the backend can
+define the preferred chunk size inside the variable’s encoding
+``var.encoding["preferred_chunks"]``. The ``preferred_chunks`` may be useful
+to improve performances with lazy loading. ``preferred_chunks`` shall be a
+dictionary specifying chunk size per dimension like
+``{“dim1”: 1000, “dim2”: 2000}`` or
+``{“dim1”: [1000, 100], “dim2”: [2000, 2000, 2000]]}``.
+
+The ``preferred_chunks`` is used by Xarray to define the chunk size in some
+special cases:
+
+- if ``chunks`` along a dimension is ``None`` or not defined
+- if ``chunks`` is ``"auto"``.
+
+In the first case Xarray uses the chunks size specified in
+``preferred_chunks``.
+In the second case Xarray accommodates ideal chunk sizes, preserving if
+possible the "preferred_chunks". The ideal chunk size is computed using
+:py:func:`dask.core.normalize_chunks`, setting
+``previous_chunks = preferred_chunks``.
diff --git a/doc/whats-new.rst b/doc/whats-new.rst
index eed4e16eb62..5826211923a 100644
--- a/doc/whats-new.rst
+++ b/doc/whats-new.rst
@@ -22,22 +22,42 @@ v0.17.1 (unreleased)
New Features
~~~~~~~~~~~~
-
+- Allow passing ``combine_attrs`` to :py:meth:`Dataset.merge` (:pull:`4895`).
+ By `Justus Magin `_.
+- Support for `dask.graph_manipulation
+ `_ (requires dask >=2021.3)
+ By `Guido Imperiale `_
+- Thanks to the new pluggable backend infrastructure external packages may now
+ use the ``xarray.backends`` entry point to register additional engines to be used in
+ :py:func:`open_dataset`, see the documentation in :ref:`add_a_backend`
+ (:issue:`4309`, :issue:`4803`, :pull:`4989`, :pull:`4810` and many others).
+ The backend refactor has been sponsored with the "Essential Open Source Software for Science"
+ grant from the `Chan Zuckerberg Initiative `_ and
+ developed by `B-Open `_.
+ By `Aureliana Barghini `_ and `Alessandro Amici `_.
Breaking changes
~~~~~~~~~~~~~~~~
-
+- :py:func:`open_dataset` and :py:func:`open_dataarray` now accept only the first argument
+ as positional, all others need to be passed are keyword arguments. This is part of the
+ refactor to support external backends (:issue:`4309`, :pull:`4989`).
+ By `Alessandro Amici `_.
Deprecations
~~~~~~~~~~~~
-
Bug fixes
~~~~~~~~~
-
+- Don't allow passing ``axis`` to :py:meth:`Dataset.reduce` methods (:issue:`3510`, :pull:`4940`).
+ By `Justus Magin `_.
Documentation
~~~~~~~~~~~~~
+- New section on :ref:`add_a_backend` in the "Internals" chapter aimed to backend developers
+ (:issue:`4803`, :pull:`4810`). By `Aureliana Barghini `_.
+- Add :py:meth:`Dataset.polyfit` and :py:meth:`DataArray.polyfit` under "See also" in
+ the docstrings of :py:meth:`Dataset.polyfit` and :py:meth:`DataArray.polyfit`
+ (:issue:`5016`, :pull:`5020`). By `Aaron Spring `_.
Internal Changes
diff --git a/xarray/backends/api.py b/xarray/backends/api.py
index 4fa34b39925..382fad60acb 100644
--- a/xarray/backends/api.py
+++ b/xarray/backends/api.py
@@ -26,7 +26,8 @@
)
from ..core.dataarray import DataArray
from ..core.dataset import Dataset, _get_chunk, _maybe_chunk
-from ..core.utils import close_on_error, is_grib_path, is_remote_uri, read_magic_number
+from ..core.utils import is_remote_uri
+from . import plugins
from .common import AbstractDataStore, ArrayWriter
from .locks import _get_scheduler
@@ -70,26 +71,6 @@ def _get_default_engine_remote_uri():
return engine
-def _get_default_engine_grib():
- msgs = []
- try:
- import Nio # noqa: F401
-
- msgs += ["set engine='pynio' to access GRIB files with PyNIO"]
- except ImportError: # pragma: no cover
- pass
- try:
- import cfgrib # noqa: F401
-
- msgs += ["set engine='cfgrib' to access GRIB files with cfgrib"]
- except ImportError: # pragma: no cover
- pass
- if msgs:
- raise ValueError(" or\n".join(msgs))
- else:
- raise ValueError("PyNIO or cfgrib is required for accessing GRIB files")
-
-
def _get_default_engine_gz():
try:
import scipy # noqa: F401
@@ -118,27 +99,9 @@ def _get_default_engine_netcdf():
return engine
-def _get_engine_from_magic_number(filename_or_obj):
- magic_number = read_magic_number(filename_or_obj)
-
- if magic_number.startswith(b"CDF"):
- engine = "scipy"
- elif magic_number.startswith(b"\211HDF\r\n\032\n"):
- engine = "h5netcdf"
- else:
- raise ValueError(
- "cannot guess the engine, "
- f"{magic_number} is not the signature of any supported file format "
- "did you mean to pass a string for a path instead?"
- )
- return engine
-
-
def _get_default_engine(path: str, allow_remote: bool = False):
if allow_remote and is_remote_uri(path):
engine = _get_default_engine_remote_uri()
- elif is_grib_path(path):
- engine = _get_default_engine_grib()
elif path.endswith(".gz"):
engine = _get_default_engine_gz()
else:
@@ -146,27 +109,6 @@ def _get_default_engine(path: str, allow_remote: bool = False):
return engine
-def _autodetect_engine(filename_or_obj):
- if isinstance(filename_or_obj, AbstractDataStore):
- engine = "store"
- elif isinstance(filename_or_obj, (str, Path)):
- engine = _get_default_engine(str(filename_or_obj), allow_remote=True)
- else:
- engine = _get_engine_from_magic_number(filename_or_obj)
- return engine
-
-
-def _get_backend_cls(engine, engines=ENGINES):
- """Select open_dataset method based on current engine"""
- try:
- return engines[engine]
- except KeyError:
- raise ValueError(
- "unrecognized engine for open_dataset: {}\n"
- "must be one of: {}".format(engine, list(ENGINES))
- )
-
-
def _normalize_path(path):
if isinstance(path, Path):
path = str(path)
@@ -236,6 +178,31 @@ def check_attr(name, value):
check_attr(k, v)
+def _resolve_decoders_kwargs(decode_cf, open_backend_dataset_parameters, **decoders):
+ for d in list(decoders):
+ if decode_cf is False and d in open_backend_dataset_parameters:
+ decoders[d] = False
+ if decoders[d] is None:
+ decoders.pop(d)
+ return decoders
+
+
+def _get_mtime(filename_or_obj):
+ # if passed an actual file path, augment the token with
+ # the file modification time
+ mtime = None
+
+ try:
+ path = os.fspath(filename_or_obj)
+ except TypeError:
+ path = None
+
+ if path and not is_remote_uri(path):
+ mtime = os.path.getmtime(filename_or_obj)
+
+ return mtime
+
+
def _protect_dataset_variables_inplace(dataset, cache):
for name, variable in dataset.variables.items():
if name not in variable.dims:
@@ -304,22 +271,90 @@ def load_dataarray(filename_or_obj, **kwargs):
return da.load()
+def _chunk_ds(
+ backend_ds,
+ filename_or_obj,
+ engine,
+ chunks,
+ overwrite_encoded_chunks,
+ **extra_tokens,
+):
+ from dask.base import tokenize
+
+ mtime = _get_mtime(filename_or_obj)
+ token = tokenize(filename_or_obj, mtime, engine, chunks, **extra_tokens)
+ name_prefix = "open_dataset-%s" % token
+
+ variables = {}
+ for name, var in backend_ds.variables.items():
+ var_chunks = _get_chunk(var, chunks)
+ variables[name] = _maybe_chunk(
+ name,
+ var,
+ var_chunks,
+ overwrite_encoded_chunks=overwrite_encoded_chunks,
+ name_prefix=name_prefix,
+ token=token,
+ )
+ ds = backend_ds._replace(variables)
+ return ds
+
+
+def _dataset_from_backend_dataset(
+ backend_ds,
+ filename_or_obj,
+ engine,
+ chunks,
+ cache,
+ overwrite_encoded_chunks,
+ **extra_tokens,
+):
+ if not (isinstance(chunks, (int, dict)) or chunks is None):
+ if chunks != "auto":
+ raise ValueError(
+ "chunks must be an int, dict, 'auto', or None. "
+ "Instead found %s. " % chunks
+ )
+
+ _protect_dataset_variables_inplace(backend_ds, cache)
+ if chunks is None:
+ ds = backend_ds
+ else:
+ ds = _chunk_ds(
+ backend_ds,
+ filename_or_obj,
+ engine,
+ chunks,
+ overwrite_encoded_chunks,
+ **extra_tokens,
+ )
+
+ ds.set_close(backend_ds._close)
+
+ # Ensure source filename always stored in dataset object (GH issue #2550)
+ if "source" not in ds.encoding:
+ if isinstance(filename_or_obj, str):
+ ds.encoding["source"] = filename_or_obj
+
+ return ds
+
+
def open_dataset(
filename_or_obj,
- group=None,
- decode_cf=True,
- mask_and_scale=None,
- decode_times=True,
- concat_characters=True,
- decode_coords=True,
+ *args,
engine=None,
chunks=None,
- lock=None,
cache=None,
+ decode_cf=None,
+ mask_and_scale=None,
+ decode_times=None,
+ decode_timedelta=None,
+ use_cftime=None,
+ concat_characters=None,
+ decode_coords=None,
drop_variables=None,
backend_kwargs=None,
- use_cftime=None,
- decode_timedelta=None,
+ **kwargs,
):
"""Open and decode a dataset from a file or file-like object.
@@ -331,9 +366,26 @@ def open_dataset(
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
- group : str, optional
- Path to the netCDF4 group in the given file to open (only works for
- netCDF4 files).
+ engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
+ "pseudonetcdf", "zarr"}, optional
+ Engine to use when reading files. If not provided, the default engine
+ is chosen based on available dependencies, with a preference for
+ "netcdf4".
+ chunks : int or dict, optional
+ If chunks is provided, it is used to load the new dataset into dask
+ arrays. ``chunks=-1`` loads the dataset with dask using a single
+ chunk for all arrays. `chunks={}`` loads the dataset with dask using
+ engine preferred chunks if exposed by the backend, otherwise with
+ a single chunk for all arrays.
+ ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
+ engine preferred chunks. See dask chunking for more details.
+ cache : bool, optional
+ If True, cache data loaded from the underlying datastore in memory as
+ NumPy arrays when accessed to avoid reading from the underlying data-
+ store multiple times. Defaults to True unless you specify the `chunks`
+ argument to use dask, in which case it defaults to False. Does not
+ change the behavior of coordinates corresponding to dimensions, which
+ always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
@@ -345,15 +397,33 @@ def open_dataset(
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
- pseudonetcdf backend.
+ pseudonetcdf backend. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
+ This keyword may not be supported by all the backends.
+ decode_timedelta : bool, optional
+ If True, decode variables and coordinates with time units in
+ {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
+ into timedelta objects. If False, leave them encoded as numbers.
+ If None (default), assume the same value of decode_time.
+ This keyword may not be supported by all the backends.
+ use_cftime: bool, optional
+ Only relevant if encoded dates come from a standard calendar
+ (e.g. "gregorian", "proleptic_gregorian", "standard", or not
+ specified). If None (default), attempt to decode times to
+ ``np.datetime64[ns]`` objects; if this is not possible, decode times to
+ ``cftime.datetime`` objects. If True, always decode times to
+ ``cftime.datetime`` objects, regardless of whether or not they can be
+ represented using ``np.datetime64[ns]`` objects. If False, always
+ decode times to ``np.datetime64[ns]`` objects; if this is not possible
+ raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
+ This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
@@ -362,54 +432,26 @@ def open_dataset(
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
- engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
- "pseudonetcdf", "zarr"}, optional
- Engine to use when reading files. If not provided, the default engine
- is chosen based on available dependencies, with a preference for
- "netcdf4".
- chunks : int or dict, optional
- If chunks is provided, it is used to load the new dataset into dask
- arrays. ``chunks=-1`` loads the dataset with dask using a single
- chunk for all arrays. `chunks={}`` loads the dataset with dask using
- engine preferred chunks if exposed by the backend, otherwise with
- a single chunk for all arrays.
- ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
- engine preferred chunks. See dask chunking for more details.
- lock : False or lock-like, optional
- Resource lock to use when reading data from disk. Only relevant when
- using dask or another form of parallelism. By default, appropriate
- locks are chosen to safely read and write files with the currently
- active dask scheduler.
- cache : bool, optional
- If True, cache data loaded from the underlying datastore in memory as
- NumPy arrays when accessed to avoid reading from the underlying data-
- store multiple times. Defaults to True unless you specify the `chunks`
- argument to use dask, in which case it defaults to False. Does not
- change the behavior of coordinates corresponding to dimensions, which
- always load their data from disk into a ``pandas.Index``.
drop_variables: str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
- backend_kwargs: dict, optional
- A dictionary of keyword arguments to pass on to the backend. This
- may be useful when backend options would improve performance or
- allow user control of dataset processing.
- use_cftime: bool, optional
- Only relevant if encoded dates come from a standard calendar
- (e.g. "gregorian", "proleptic_gregorian", "standard", or not
- specified). If None (default), attempt to decode times to
- ``np.datetime64[ns]`` objects; if this is not possible, decode times to
- ``cftime.datetime`` objects. If True, always decode times to
- ``cftime.datetime`` objects, regardless of whether or not they can be
- represented using ``np.datetime64[ns]`` objects. If False, always
- decode times to ``np.datetime64[ns]`` objects; if this is not possible
- raise an error.
- decode_timedelta : bool, optional
- If True, decode variables and coordinates with time units in
- {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
- into timedelta objects. If False, leave them encoded as numbers.
- If None (default), assume the same value of decode_time.
+ backend_kwargs: dict
+ Additional keyword arguments passed on to the engine open function,
+ equivalent to `**kwargs`.
+ **kwargs: dict
+ Additional keyword arguments passed on to the engine open function.
+ For example:
+
+ - 'group': path to the netCDF4 group in the given file to open given as
+ a str,supported by "netcdf4", "h5netcdf", "zarr".
+ - 'lock': resource lock to use when reading data from disk. Only
+ relevant when using dask or another form of parallelism. By default,
+ appropriate locks are chosen to safely read and write files with the
+ currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
+ "pynio", "pseudonetcdf", "cfgrib".
+
+ See engine open function for kwargs accepted by each specific engine.
Returns
-------
@@ -427,159 +469,72 @@ def open_dataset(
--------
open_mfdataset
"""
- if os.environ.get("XARRAY_BACKEND_API", "v1") == "v2":
- kwargs = {k: v for k, v in locals().items() if v is not None}
- from . import apiv2
-
- return apiv2.open_dataset(**kwargs)
-
- if mask_and_scale is None:
- mask_and_scale = not engine == "pseudonetcdf"
-
- if not decode_cf:
- mask_and_scale = False
- decode_times = False
- concat_characters = False
- decode_coords = False
- decode_timedelta = False
+ if len(args) > 0:
+ raise TypeError(
+ "open_dataset() takes only 1 positional argument starting from version 0.18.0, "
+ "all other options must be passed as keyword arguments"
+ )
if cache is None:
cache = chunks is None
- if backend_kwargs is None:
- backend_kwargs = {}
-
- def maybe_decode_store(store, chunks):
- ds = conventions.decode_cf(
- store,
- mask_and_scale=mask_and_scale,
- decode_times=decode_times,
- concat_characters=concat_characters,
- decode_coords=decode_coords,
- drop_variables=drop_variables,
- use_cftime=use_cftime,
- decode_timedelta=decode_timedelta,
- )
+ if backend_kwargs is not None:
+ kwargs.update(backend_kwargs)
- _protect_dataset_variables_inplace(ds, cache)
-
- if chunks is not None and engine != "zarr":
- from dask.base import tokenize
-
- # if passed an actual file path, augment the token with
- # the file modification time
- if isinstance(filename_or_obj, str) and not is_remote_uri(filename_or_obj):
- mtime = os.path.getmtime(filename_or_obj)
- else:
- mtime = None
- token = tokenize(
- filename_or_obj,
- mtime,
- group,
- decode_cf,
- mask_and_scale,
- decode_times,
- concat_characters,
- decode_coords,
- engine,
- chunks,
- drop_variables,
- use_cftime,
- decode_timedelta,
- )
- name_prefix = "open_dataset-%s" % token
- ds2 = ds.chunk(chunks, name_prefix=name_prefix, token=token)
-
- elif engine == "zarr":
- # adapted from Dataset.Chunk() and taken from open_zarr
- if not (isinstance(chunks, (int, dict)) or chunks is None):
- if chunks != "auto":
- raise ValueError(
- "chunks must be an int, dict, 'auto', or None. "
- "Instead found %s. " % chunks
- )
-
- if chunks == "auto":
- try:
- import dask.array # noqa
- except ImportError:
- chunks = None
-
- # auto chunking needs to be here and not in ZarrStore because
- # the variable chunks does not survive decode_cf
- # return trivial case
- if chunks is None:
- return ds
-
- if isinstance(chunks, int):
- chunks = dict.fromkeys(ds.dims, chunks)
-
- variables = {
- k: _maybe_chunk(
- k,
- v,
- _get_chunk(v, chunks),
- overwrite_encoded_chunks=overwrite_encoded_chunks,
- )
- for k, v in ds.variables.items()
- }
- ds2 = ds._replace(variables)
-
- else:
- ds2 = ds
- ds2.set_close(ds._close)
- return ds2
-
- filename_or_obj = _normalize_path(filename_or_obj)
-
- if isinstance(filename_or_obj, AbstractDataStore):
- store = filename_or_obj
- else:
- if engine is None:
- engine = _autodetect_engine(filename_or_obj)
-
- extra_kwargs = {}
- if group is not None:
- extra_kwargs["group"] = group
- if lock is not None:
- extra_kwargs["lock"] = lock
-
- if engine == "zarr":
- backend_kwargs = backend_kwargs.copy()
- overwrite_encoded_chunks = backend_kwargs.pop(
- "overwrite_encoded_chunks", None
- )
+ if engine is None:
+ engine = plugins.guess_engine(filename_or_obj)
- opener = _get_backend_cls(engine)
- store = opener(filename_or_obj, **extra_kwargs, **backend_kwargs)
+ backend = plugins.get_backend(engine)
- with close_on_error(store):
- ds = maybe_decode_store(store, chunks)
+ decoders = _resolve_decoders_kwargs(
+ decode_cf,
+ open_backend_dataset_parameters=backend.open_dataset_parameters,
+ mask_and_scale=mask_and_scale,
+ decode_times=decode_times,
+ decode_timedelta=decode_timedelta,
+ concat_characters=concat_characters,
+ use_cftime=use_cftime,
+ decode_coords=decode_coords,
+ )
- # Ensure source filename always stored in dataset object (GH issue #2550)
- if "source" not in ds.encoding:
- if isinstance(filename_or_obj, str):
- ds.encoding["source"] = filename_or_obj
+ overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None)
+ backend_ds = backend.open_dataset(
+ filename_or_obj,
+ drop_variables=drop_variables,
+ **decoders,
+ **kwargs,
+ )
+ ds = _dataset_from_backend_dataset(
+ backend_ds,
+ filename_or_obj,
+ engine,
+ chunks,
+ cache,
+ overwrite_encoded_chunks,
+ drop_variables=drop_variables,
+ **decoders,
+ **kwargs,
+ )
return ds
def open_dataarray(
filename_or_obj,
- group=None,
- decode_cf=True,
- mask_and_scale=None,
- decode_times=True,
- concat_characters=True,
- decode_coords=True,
+ *args,
engine=None,
chunks=None,
- lock=None,
cache=None,
+ decode_cf=None,
+ mask_and_scale=None,
+ decode_times=None,
+ decode_timedelta=None,
+ use_cftime=None,
+ concat_characters=None,
+ decode_coords=None,
drop_variables=None,
backend_kwargs=None,
- use_cftime=None,
- decode_timedelta=None,
+ **kwargs,
):
"""Open an DataArray from a file or file-like object containing a single
data variable.
@@ -590,14 +545,31 @@ def open_dataarray(
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
- Strings and Paths are interpreted as a path to a netCDF file or an
- OpenDAP URL and opened with python-netCDF4, unless the filename ends
- with .gz, in which case the file is gunzipped and opened with
+ Strings and Path objects are interpreted as a path to a netCDF file
+ or an OpenDAP URL and opened with python-netCDF4, unless the filename
+ ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
- group : str, optional
- Path to the netCDF4 group in the given file to open (only works for
- netCDF4 files).
+ engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
+ "pseudonetcdf", "zarr"}, optional
+ Engine to use when reading files. If not provided, the default engine
+ is chosen based on available dependencies, with a preference for
+ "netcdf4".
+ chunks : int or dict, optional
+ If chunks is provided, it is used to load the new dataset into dask
+ arrays. ``chunks=-1`` loads the dataset with dask using a single
+ chunk for all arrays. `chunks={}`` loads the dataset with dask using
+ engine preferred chunks if exposed by the backend, otherwise with
+ a single chunk for all arrays.
+ ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
+ engine preferred chunks. See dask chunking for more details.
+ cache : bool, optional
+ If True, cache data loaded from the underlying datastore in memory as
+ NumPy arrays when accessed to avoid reading from the underlying data-
+ store multiple times. Defaults to True unless you specify the `chunks`
+ argument to use dask, in which case it defaults to False. Does not
+ change the behavior of coordinates corresponding to dimensions, which
+ always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
@@ -609,15 +581,33 @@ def open_dataarray(
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
- pseudonetcdf backend.
+ pseudonetcdf backend. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
+ This keyword may not be supported by all the backends.
+ decode_timedelta : bool, optional
+ If True, decode variables and coordinates with time units in
+ {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
+ into timedelta objects. If False, leave them encoded as numbers.
+ If None (default), assume the same value of decode_time.
+ This keyword may not be supported by all the backends.
+ use_cftime: bool, optional
+ Only relevant if encoded dates come from a standard calendar
+ (e.g. "gregorian", "proleptic_gregorian", "standard", or not
+ specified). If None (default), attempt to decode times to
+ ``np.datetime64[ns]`` objects; if this is not possible, decode times to
+ ``cftime.datetime`` objects. If True, always decode times to
+ ``cftime.datetime`` objects, regardless of whether or not they can be
+ represented using ``np.datetime64[ns]`` objects. If False, always
+ decode times to ``np.datetime64[ns]`` objects; if this is not possible
+ raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
+ This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
@@ -626,51 +616,26 @@ def open_dataarray(
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
- engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib"}, \
- optional
- Engine to use when reading files. If not provided, the default engine
- is chosen based on available dependencies, with a preference for
- "netcdf4".
- chunks : int or dict, optional
- If chunks is provided, it used to load the new dataset into dask
- arrays.
- lock : False or lock-like, optional
- Resource lock to use when reading data from disk. Only relevant when
- using dask or another form of parallelism. By default, appropriate
- locks are chosen to safely read and write files with the currently
- active dask scheduler.
- cache : bool, optional
- If True, cache data loaded from the underlying datastore in memory as
- NumPy arrays when accessed to avoid reading from the underlying data-
- store multiple times. Defaults to True unless you specify the `chunks`
- argument to use dask, in which case it defaults to False. Does not
- change the behavior of coordinates corresponding to dimensions, which
- always load their data from disk into a ``pandas.Index``.
drop_variables: str or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
- backend_kwargs: dict, optional
- A dictionary of keyword arguments to pass on to the backend. This
- may be useful when backend options would improve performance or
- allow user control of dataset processing. If using fsspec URLs,
- include the key "storage_options" to pass arguments to the
- storage layer.
- use_cftime: bool, optional
- Only relevant if encoded dates come from a standard calendar
- (e.g. "gregorian", "proleptic_gregorian", "standard", or not
- specified). If None (default), attempt to decode times to
- ``np.datetime64[ns]`` objects; if this is not possible, decode times to
- ``cftime.datetime`` objects. If True, always decode times to
- ``cftime.datetime`` objects, regardless of whether or not they can be
- represented using ``np.datetime64[ns]`` objects. If False, always
- decode times to ``np.datetime64[ns]`` objects; if this is not possible
- raise an error.
- decode_timedelta : bool, optional
- If True, decode variables and coordinates with time units in
- {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
- into timedelta objects. If False, leave them encoded as numbers.
- If None (default), assume the same value of decode_time.
+ backend_kwargs: dict
+ Additional keyword arguments passed on to the engine open function,
+ equivalent to `**kwargs`.
+ **kwargs: dict
+ Additional keyword arguments passed on to the engine open function.
+ For example:
+
+ - 'group': path to the netCDF4 group in the given file to open given as
+ a str,supported by "netcdf4", "h5netcdf", "zarr".
+ - 'lock': resource lock to use when reading data from disk. Only
+ relevant when using dask or another form of parallelism. By default,
+ appropriate locks are chosen to safely read and write files with the
+ currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
+ "pynio", "pseudonetcdf", "cfgrib".
+
+ See engine open function for kwargs accepted by each specific engine.
Notes
-----
@@ -685,10 +650,14 @@ def open_dataarray(
--------
open_dataset
"""
+ if len(args) > 0:
+ raise TypeError(
+ "open_dataarray() takes only 1 positional argument starting from version 0.18.0, "
+ "all other options must be passed as keyword arguments"
+ )
dataset = open_dataset(
filename_or_obj,
- group=group,
decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
@@ -696,12 +665,12 @@ def open_dataarray(
decode_coords=decode_coords,
engine=engine,
chunks=chunks,
- lock=lock,
cache=cache,
drop_variables=drop_variables,
backend_kwargs=backend_kwargs,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
+ **kwargs,
)
if len(dataset.data_vars) != 1:
@@ -734,7 +703,6 @@ def open_mfdataset(
compat="no_conflicts",
preprocess=None,
engine=None,
- lock=None,
data_vars="all",
coords="different",
combine="by_coords",
@@ -804,11 +772,6 @@ def open_mfdataset(
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4".
- lock : False or lock-like, optional
- Resource lock to use when reading data from disk. Only relevant when
- using dask or another form of parallelism. By default, appropriate
- locks are chosen to safely read and write files with the currently
- active dask scheduler.
data_vars : {"minimal", "different", "all"} or list of str, optional
These data variables will be concatenated together:
* "minimal": Only data variables in which the dimension already
@@ -923,7 +886,7 @@ def open_mfdataset(
combined_ids_paths = _infer_concat_order_from_positions(paths)
ids, paths = (list(combined_ids_paths.keys()), list(combined_ids_paths.values()))
- open_kwargs = dict(engine=engine, chunks=chunks or {}, lock=lock, **kwargs)
+ open_kwargs = dict(engine=engine, chunks=chunks or {}, **kwargs)
if parallel:
import dask
diff --git a/xarray/backends/apiv2.py b/xarray/backends/apiv2.py
deleted file mode 100644
index de1b3e1bb29..00000000000
--- a/xarray/backends/apiv2.py
+++ /dev/null
@@ -1,286 +0,0 @@
-import os
-
-from ..core import indexing
-from ..core.dataset import _get_chunk, _maybe_chunk
-from ..core.utils import is_remote_uri
-from . import plugins
-
-
-def _protect_dataset_variables_inplace(dataset, cache):
- for name, variable in dataset.variables.items():
- if name not in variable.dims:
- # no need to protect IndexVariable objects
- data = indexing.CopyOnWriteArray(variable._data)
- if cache:
- data = indexing.MemoryCachedArray(data)
- variable.data = data
-
-
-def _get_mtime(filename_or_obj):
- # if passed an actual file path, augment the token with
- # the file modification time
- mtime = None
-
- try:
- path = os.fspath(filename_or_obj)
- except TypeError:
- path = None
-
- if path and not is_remote_uri(path):
- mtime = os.path.getmtime(filename_or_obj)
-
- return mtime
-
-
-def _chunk_ds(
- backend_ds,
- filename_or_obj,
- engine,
- chunks,
- overwrite_encoded_chunks,
- **extra_tokens,
-):
- from dask.base import tokenize
-
- mtime = _get_mtime(filename_or_obj)
- token = tokenize(filename_or_obj, mtime, engine, chunks, **extra_tokens)
- name_prefix = "open_dataset-%s" % token
-
- variables = {}
- for name, var in backend_ds.variables.items():
- var_chunks = _get_chunk(var, chunks)
- variables[name] = _maybe_chunk(
- name,
- var,
- var_chunks,
- overwrite_encoded_chunks=overwrite_encoded_chunks,
- name_prefix=name_prefix,
- token=token,
- )
- ds = backend_ds._replace(variables)
- return ds
-
-
-def _dataset_from_backend_dataset(
- backend_ds,
- filename_or_obj,
- engine,
- chunks,
- cache,
- overwrite_encoded_chunks,
- **extra_tokens,
-):
- if not (isinstance(chunks, (int, dict)) or chunks is None):
- if chunks != "auto":
- raise ValueError(
- "chunks must be an int, dict, 'auto', or None. "
- "Instead found %s. " % chunks
- )
-
- _protect_dataset_variables_inplace(backend_ds, cache)
- if chunks is None:
- ds = backend_ds
- else:
- ds = _chunk_ds(
- backend_ds,
- filename_or_obj,
- engine,
- chunks,
- overwrite_encoded_chunks,
- **extra_tokens,
- )
-
- ds.set_close(backend_ds._close)
-
- # Ensure source filename always stored in dataset object (GH issue #2550)
- if "source" not in ds.encoding:
- if isinstance(filename_or_obj, str):
- ds.encoding["source"] = filename_or_obj
-
- return ds
-
-
-def _resolve_decoders_kwargs(decode_cf, open_backend_dataset_parameters, **decoders):
- for d in list(decoders):
- if decode_cf is False and d in open_backend_dataset_parameters:
- decoders[d] = False
- if decoders[d] is None:
- decoders.pop(d)
- return decoders
-
-
-def open_dataset(
- filename_or_obj,
- *,
- engine=None,
- chunks=None,
- cache=None,
- decode_cf=None,
- mask_and_scale=None,
- decode_times=None,
- decode_timedelta=None,
- use_cftime=None,
- concat_characters=None,
- decode_coords=None,
- drop_variables=None,
- backend_kwargs=None,
- **kwargs,
-):
- """Open and decode a dataset from a file or file-like object.
-
- Parameters
- ----------
- filename_or_obj : str, Path, file-like or DataStore
- Strings and Path objects are interpreted as a path to a netCDF file
- or an OpenDAP URL and opened with python-netCDF4, unless the filename
- ends with .gz, in which case the file is unzipped and opened with
- scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
- objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
- engine : str, optional
- Engine to use when reading files. If not provided, the default engine
- is chosen based on available dependencies, with a preference for
- "netcdf4". Options are: {"netcdf4", "scipy", "pydap", "h5netcdf",\
- "pynio", "cfgrib", "pseudonetcdf", "zarr"}.
- chunks : int or dict, optional
- If chunks is provided, it is used to load the new dataset into dask
- arrays. ``chunks=-1`` loads the dataset with dask using a single
- chunk for all arrays. `chunks={}`` loads the dataset with dask using
- engine preferred chunks if exposed by the backend, otherwise with
- a single chunk for all arrays.
- ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
- engine preferred chunks. See dask chunking for more details.
- cache : bool, optional
- If True, cache data is loaded from the underlying datastore in memory as
- NumPy arrays when accessed to avoid reading from the underlying data-
- store multiple times. Defaults to True unless you specify the `chunks`
- argument to use dask, in which case it defaults to False. Does not
- change the behavior of coordinates corresponding to dimensions, which
- always load their data from disk into a ``pandas.Index``.
- decode_cf : bool, optional
- Setting ``decode_cf=False`` will disable ``mask_and_scale``,
- ``decode_times``, ``decode_timedelta``, ``concat_characters``,
- ``decode_coords``.
- mask_and_scale : bool, optional
- If True, array values equal to `_FillValue` are replaced with NA and other
- values are scaled according to the formula `original_values * scale_factor +
- add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
- taken from variable attributes (if they exist). If the `_FillValue` or
- `missing_value` attribute contains multiple values, a warning will be
- issued and all array values matching one of the multiple values will
- be replaced by NA. mask_and_scale defaults to True except for the
- pseudonetcdf backend. This keyword may not be supported by all the backends.
- decode_times : bool, optional
- If True, decode times encoded in the standard NetCDF datetime format
- into datetime objects. Otherwise, leave them encoded as numbers.
- This keyword may not be supported by all the backends.
- decode_timedelta : bool, optional
- If True, decode variables and coordinates with time units in
- {"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
- into timedelta objects. If False, they remain encoded as numbers.
- If None (default), assume the same value of decode_time.
- This keyword may not be supported by all the backends.
- use_cftime: bool, optional
- Only relevant if encoded dates come from a standard calendar
- (e.g. "gregorian", "proleptic_gregorian", "standard", or not
- specified). If None (default), attempt to decode times to
- ``np.datetime64[ns]`` objects; if this is not possible, decode times to
- ``cftime.datetime`` objects. If True, always decode times to
- ``cftime.datetime`` objects, regardless of whether or not they can be
- represented using ``np.datetime64[ns]`` objects. If False, always
- decode times to ``np.datetime64[ns]`` objects; if this is not possible
- raise an error. This keyword may not be supported by all the backends.
- concat_characters : bool, optional
- If True, concatenate along the last dimension of character arrays to
- form string arrays. Dimensions will only be concatenated over (and
- removed) if they have no corresponding variable and if they are only
- used as the last dimension of character arrays.
- This keyword may not be supported by all the backends.
- decode_coords : bool or {"coordinates", "all"}, optional
- Controls which variables are set as coordinate variables:
-
- - "coordinates" or True: Set variables referred to in the
- ``'coordinates'`` attribute of the datasets or individual variables
- as coordinate variables.
- - "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
- other attributes as coordinate variables.
- drop_variables: str or iterable, optional
- A variable or list of variables to exclude from the dataset parsing.
- This may be useful to drop variables with problems or
- inconsistent values.
- backend_kwargs:
- Additional keyword arguments passed on to the engine open function.
- **kwargs: dict
- Additional keyword arguments passed on to the engine open function.
- For example:
-
- - 'group': path to the netCDF4 group in the given file to open given as
- a str,supported by "netcdf4", "h5netcdf", "zarr".
-
- - 'lock': resource lock to use when reading data from disk. Only
- relevant when using dask or another form of parallelism. By default,
- appropriate locks are chosen to safely read and write files with the
- currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
- "pynio", "pseudonetcdf", "cfgrib".
-
- See engine open function for kwargs accepted by each specific engine.
-
-
- Returns
- -------
- dataset : Dataset
- The newly created dataset.
-
- Notes
- -----
- ``open_dataset`` opens the file with read-only access. When you modify
- values of a Dataset, even one linked to files on disk, only the in-memory
- copy you are manipulating in xarray is modified: the original file on disk
- is never touched.
-
- See Also
- --------
- open_mfdataset
- """
-
- if cache is None:
- cache = chunks is None
-
- if backend_kwargs is not None:
- kwargs.update(backend_kwargs)
-
- if engine is None:
- engine = plugins.guess_engine(filename_or_obj)
-
- backend = plugins.get_backend(engine)
-
- decoders = _resolve_decoders_kwargs(
- decode_cf,
- open_backend_dataset_parameters=backend.open_dataset_parameters,
- mask_and_scale=mask_and_scale,
- decode_times=decode_times,
- decode_timedelta=decode_timedelta,
- concat_characters=concat_characters,
- use_cftime=use_cftime,
- decode_coords=decode_coords,
- )
-
- overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None)
- backend_ds = backend.open_dataset(
- filename_or_obj,
- drop_variables=drop_variables,
- **decoders,
- **kwargs,
- )
- ds = _dataset_from_backend_dataset(
- backend_ds,
- filename_or_obj,
- engine,
- chunks,
- cache,
- overwrite_encoded_chunks,
- drop_variables=drop_variables,
- **decoders,
- **kwargs,
- )
-
- return ds
diff --git a/xarray/backends/cfgrib_.py b/xarray/backends/cfgrib_.py
index d582af82c6e..c63e1543746 100644
--- a/xarray/backends/cfgrib_.py
+++ b/xarray/backends/cfgrib_.py
@@ -62,7 +62,7 @@ def open_store_variable(self, name, var):
data = var.data
else:
wrapped_array = CfGribArrayWrapper(self, var.data)
- data = indexing.LazilyOuterIndexedArray(wrapped_array)
+ data = indexing.LazilyIndexedArray(wrapped_array)
encoding = self.ds.encoding.copy()
encoding["original_shape"] = var.data.shape
@@ -87,9 +87,9 @@ def get_encoding(self):
class CfgribfBackendEntrypoint(BackendEntrypoint):
- def guess_can_open(self, store_spec):
+ def guess_can_open(self, filename_or_obj):
try:
- _, ext = os.path.splitext(store_spec)
+ _, ext = os.path.splitext(filename_or_obj)
except TypeError:
return False
return ext in {".grib", ".grib2", ".grb", ".grb2"}
diff --git a/xarray/backends/common.py b/xarray/backends/common.py
index e2905d0866b..aa902602278 100644
--- a/xarray/backends/common.py
+++ b/xarray/backends/common.py
@@ -1,7 +1,7 @@
import logging
import time
import traceback
-from typing import Dict, Tuple, Type, Union
+from typing import Any, Dict, Tuple, Type, Union
import numpy as np
@@ -344,12 +344,41 @@ def encode(self, variables, attributes):
class BackendEntrypoint:
+ """
+ ``BackendEntrypoint`` is a class container and it is the main interface
+ for the backend plugins, see :ref:`RST backend_entrypoint`.
+ It shall implement:
+
+ - ``open_dataset`` method: it shall implement reading from file, variables
+ decoding and it returns an instance of :py:class:`~xarray.Dataset`.
+ It shall take in input at least ``filename_or_obj`` argument and
+ ``drop_variables`` keyword argument.
+ For more details see :ref:`RST open_dataset`.
+ - ``guess_can_open`` method: it shall return ``True`` if the backend is able to open
+ ``filename_or_obj``, ``False`` otherwise. The implementation of this
+ method is not mandatory.
+ """
+
open_dataset_parameters: Union[Tuple, None] = None
+ """list of ``open_dataset`` method parameters"""
+
+ def open_dataset(
+ self,
+ filename_or_obj: str,
+ drop_variables: Tuple[str] = None,
+ **kwargs: Any,
+ ):
+ """
+ Backend open_dataset method used by Xarray in :py:func:`~xarray.open_dataset`.
+ """
- def open_dataset(self):
raise NotImplementedError
- def guess_can_open(self, store_spec):
+ def guess_can_open(self, filename_or_obj):
+ """
+ Backend open_dataset method used by Xarray in :py:func:`~xarray.open_dataset`.
+ """
+
return False
diff --git a/xarray/backends/h5netcdf_.py b/xarray/backends/h5netcdf_.py
index ca531af81f6..0909418dd8d 100644
--- a/xarray/backends/h5netcdf_.py
+++ b/xarray/backends/h5netcdf_.py
@@ -187,7 +187,7 @@ def open_store_variable(self, name, var):
import h5py
dimensions = var.dimensions
- data = indexing.LazilyOuterIndexedArray(H5NetCDFArrayWrapper(name, self))
+ data = indexing.LazilyIndexedArray(H5NetCDFArrayWrapper(name, self))
attrs = _read_attributes(var)
# netCDF4 specific encoding
@@ -334,14 +334,14 @@ def close(self, **kwargs):
class H5netcdfBackendEntrypoint(BackendEntrypoint):
- def guess_can_open(self, store_spec):
+ def guess_can_open(self, filename_or_obj):
try:
- return read_magic_number(store_spec).startswith(b"\211HDF\r\n\032\n")
+ return read_magic_number(filename_or_obj).startswith(b"\211HDF\r\n\032\n")
except TypeError:
pass
try:
- _, ext = os.path.splitext(store_spec)
+ _, ext = os.path.splitext(filename_or_obj)
except TypeError:
return False
diff --git a/xarray/backends/netCDF4_.py b/xarray/backends/netCDF4_.py
index 78ad1a4c20f..7243a36dea3 100644
--- a/xarray/backends/netCDF4_.py
+++ b/xarray/backends/netCDF4_.py
@@ -388,7 +388,7 @@ def ds(self):
def open_store_variable(self, name, var):
dimensions = var.dimensions
- data = indexing.LazilyOuterIndexedArray(NetCDF4ArrayWrapper(name, self))
+ data = indexing.LazilyIndexedArray(NetCDF4ArrayWrapper(name, self))
attributes = {k: var.getncattr(k) for k in var.ncattrs()}
_ensure_fill_value_valid(data, attributes)
# netCDF4 specific encoding; save _FillValue for later
@@ -513,11 +513,11 @@ def close(self, **kwargs):
class NetCDF4BackendEntrypoint(BackendEntrypoint):
- def guess_can_open(self, store_spec):
- if isinstance(store_spec, str) and is_remote_uri(store_spec):
+ def guess_can_open(self, filename_or_obj):
+ if isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj):
return True
try:
- _, ext = os.path.splitext(store_spec)
+ _, ext = os.path.splitext(filename_or_obj)
except TypeError:
return False
return ext in {".nc", ".nc4", ".cdf"}
diff --git a/xarray/backends/pseudonetcdf_.py b/xarray/backends/pseudonetcdf_.py
index a9d7f0bbed4..62966711b11 100644
--- a/xarray/backends/pseudonetcdf_.py
+++ b/xarray/backends/pseudonetcdf_.py
@@ -74,7 +74,7 @@ def ds(self):
return self._manager.acquire()
def open_store_variable(self, name, var):
- data = indexing.LazilyOuterIndexedArray(PncArrayWrapper(name, self))
+ data = indexing.LazilyIndexedArray(PncArrayWrapper(name, self))
attrs = {k: getattr(var, k) for k in var.ncattrs()}
return Variable(var.dimensions, data, attrs)
diff --git a/xarray/backends/pydap_.py b/xarray/backends/pydap_.py
index 09bff9acc1d..148f32cf982 100644
--- a/xarray/backends/pydap_.py
+++ b/xarray/backends/pydap_.py
@@ -92,7 +92,7 @@ def open(cls, url, session=None):
return cls(ds)
def open_store_variable(self, var):
- data = indexing.LazilyOuterIndexedArray(PydapArrayWrapper(var))
+ data = indexing.LazilyIndexedArray(PydapArrayWrapper(var))
return Variable(var.dimensions, data, _fix_attributes(var.attributes))
def get_variables(self):
@@ -108,8 +108,8 @@ def get_dimensions(self):
class PydapBackendEntrypoint(BackendEntrypoint):
- def guess_can_open(self, store_spec):
- return isinstance(store_spec, str) and is_remote_uri(store_spec)
+ def guess_can_open(self, filename_or_obj):
+ return isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj)
def open_dataset(
self,
diff --git a/xarray/backends/pynio_.py b/xarray/backends/pynio_.py
index 8ace5697d09..ea9841da21e 100644
--- a/xarray/backends/pynio_.py
+++ b/xarray/backends/pynio_.py
@@ -74,7 +74,7 @@ def ds(self):
return self._manager.acquire()
def open_store_variable(self, name, var):
- data = indexing.LazilyOuterIndexedArray(NioArrayWrapper(name, self))
+ data = indexing.LazilyIndexedArray(NioArrayWrapper(name, self))
return Variable(var.dimensions, data, var.attributes)
def get_variables(self):
@@ -99,6 +99,7 @@ def close(self):
class PynioBackendEntrypoint(BackendEntrypoint):
def open_dataset(
+ self,
filename_or_obj,
mask_and_scale=True,
decode_times=True,
diff --git a/xarray/backends/rasterio_.py b/xarray/backends/rasterio_.py
index d776b116ea8..51f0599e8e0 100644
--- a/xarray/backends/rasterio_.py
+++ b/xarray/backends/rasterio_.py
@@ -335,9 +335,7 @@ def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, loc
else:
attrs[k] = v
- data = indexing.LazilyOuterIndexedArray(
- RasterioArrayWrapper(manager, lock, vrt_params)
- )
+ data = indexing.LazilyIndexedArray(RasterioArrayWrapper(manager, lock, vrt_params))
# this lets you write arrays loaded with rasterio
data = indexing.CopyOnWriteArray(data)
diff --git a/xarray/backends/scipy_.py b/xarray/backends/scipy_.py
index b98515c7b5b..02e84c4d7a5 100644
--- a/xarray/backends/scipy_.py
+++ b/xarray/backends/scipy_.py
@@ -233,14 +233,14 @@ def close(self):
class ScipyBackendEntrypoint(BackendEntrypoint):
- def guess_can_open(self, store_spec):
+ def guess_can_open(self, filename_or_obj):
try:
- return read_magic_number(store_spec).startswith(b"CDF")
+ return read_magic_number(filename_or_obj).startswith(b"CDF")
except TypeError:
pass
try:
- _, ext = os.path.splitext(store_spec)
+ _, ext = os.path.splitext(filename_or_obj)
except TypeError:
return False
return ext in {".nc", ".nc4", ".cdf", ".gz"}
diff --git a/xarray/backends/store.py b/xarray/backends/store.py
index d57b3ab9df8..860a0254b64 100644
--- a/xarray/backends/store.py
+++ b/xarray/backends/store.py
@@ -4,8 +4,8 @@
class StoreBackendEntrypoint(BackendEntrypoint):
- def guess_can_open(self, store_spec):
- return isinstance(store_spec, AbstractDataStore)
+ def guess_can_open(self, filename_or_obj):
+ return isinstance(filename_or_obj, AbstractDataStore)
def open_dataset(
self,
diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py
index d740b207e37..a4663b6708f 100644
--- a/xarray/backends/zarr.py
+++ b/xarray/backends/zarr.py
@@ -336,7 +336,7 @@ def __init__(
self._write_region = write_region
def open_store_variable(self, name, zarr_array):
- data = indexing.LazilyOuterIndexedArray(ZarrArrayWrapper(name, self))
+ data = indexing.LazilyIndexedArray(ZarrArrayWrapper(name, self))
dimensions, attributes = _get_zarr_dims_and_attrs(zarr_array, DIMENSION_KEY)
attributes = dict(attributes)
encoding = {
diff --git a/xarray/coding/cftimeindex.py b/xarray/coding/cftimeindex.py
index 948bff1056a..aafd620c7bf 100644
--- a/xarray/coding/cftimeindex.py
+++ b/xarray/coding/cftimeindex.py
@@ -59,6 +59,12 @@
REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END = 10
+if LooseVersion(pd.__version__) > LooseVersion("1.2.3"):
+ OUT_OF_BOUNDS_TIMEDELTA_ERROR = pd.errors.OutOfBoundsTimedelta
+else:
+ OUT_OF_BOUNDS_TIMEDELTA_ERROR = OverflowError
+
+
def named(name, pattern):
return "(?P<" + name + ">" + pattern + ")"
@@ -562,7 +568,7 @@ def __sub__(self, other):
elif _contains_cftime_datetimes(np.array(other)):
try:
return pd.TimedeltaIndex(np.array(self) - np.array(other))
- except OverflowError:
+ except OUT_OF_BOUNDS_TIMEDELTA_ERROR:
raise ValueError(
"The time difference exceeds the range of values "
"that can be expressed at the nanosecond resolution."
@@ -573,7 +579,7 @@ def __sub__(self, other):
def __rsub__(self, other):
try:
return pd.TimedeltaIndex(other - np.array(self))
- except OverflowError:
+ except OUT_OF_BOUNDS_TIMEDELTA_ERROR:
raise ValueError(
"The time difference exceeds the range of values "
"that can be expressed at the nanosecond resolution."
diff --git a/xarray/conventions.py b/xarray/conventions.py
index bb0cc5cd338..f7be60c3f9e 100644
--- a/xarray/conventions.py
+++ b/xarray/conventions.py
@@ -374,7 +374,7 @@ def decode_cf_variable(
data = BoolTypeArray(data)
if not is_duck_dask_array(data):
- data = indexing.LazilyOuterIndexedArray(data)
+ data = indexing.LazilyIndexedArray(data)
return Variable(dimensions, data, attributes, encoding=encoding)
diff --git a/xarray/core/computation.py b/xarray/core/computation.py
index e68c6b2629d..027bf29000a 100644
--- a/xarray/core/computation.py
+++ b/xarray/core/computation.py
@@ -923,6 +923,14 @@ def apply_ufunc(
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
+ Notes
+ -----
+ This function is designed for the more common case where ``func`` can work on numpy
+ arrays. If ``func`` needs to manipulate a whole xarray object subset to each block
+ it is possible to use :py:func:`xarray.map_blocks`.
+
+ Note that due to the overhead ``map_blocks`` is considerably slower than ``apply_ufunc``.
+
Examples
--------
Calculate the vector magnitude of two arguments:
@@ -1015,6 +1023,8 @@ def earth_mover_distance(first_samples,
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
+ dask.array.apply_gufunc
+ xarray.map_blocks
References
----------
diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py
index e6209b0604b..5f944354901 100644
--- a/xarray/core/dataarray.py
+++ b/xarray/core/dataarray.py
@@ -839,15 +839,15 @@ def __dask_scheduler__(self):
def __dask_postcompute__(self):
func, args = self._to_temp_dataset().__dask_postcompute__()
- return self._dask_finalize, (func, args, self.name)
+ return self._dask_finalize, (self.name, func) + args
def __dask_postpersist__(self):
func, args = self._to_temp_dataset().__dask_postpersist__()
- return self._dask_finalize, (func, args, self.name)
+ return self._dask_finalize, (self.name, func) + args
@staticmethod
- def _dask_finalize(results, func, args, name):
- ds = func(results, *args)
+ def _dask_finalize(results, name, func, *args, **kwargs):
+ ds = func(results, *args, **kwargs)
variable = ds._variables.pop(_THIS_ARRAY)
coords = ds._variables
return DataArray(variable, coords, name=name, fastpath=True)
@@ -3781,6 +3781,8 @@ def polyfit(
See Also
--------
numpy.polyfit
+ numpy.polyval
+ xarray.polyval
"""
return self._to_temp_dataset().polyfit(
dim, deg, skipna=skipna, rcond=rcond, w=w, full=full, cov=cov
diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py
index 9faf74dd4bc..d76d136be8b 100644
--- a/xarray/core/dataset.py
+++ b/xarray/core/dataset.py
@@ -863,15 +863,25 @@ def __dask_scheduler__(self):
return da.Array.__dask_scheduler__
def __dask_postcompute__(self):
+ return self._dask_postcompute, ()
+
+ def __dask_postpersist__(self):
+ return self._dask_postpersist, ()
+
+ def _dask_postcompute(self, results: "Iterable[Variable]") -> "Dataset":
import dask
- info = [
- (k, None) + v.__dask_postcompute__()
- if dask.is_dask_collection(v)
- else (k, v, None, None)
- for k, v in self._variables.items()
- ]
- construct_direct_args = (
+ variables = {}
+ results_iter = iter(results)
+
+ for k, v in self._variables.items():
+ if dask.is_dask_collection(v):
+ rebuild, args = v.__dask_postcompute__()
+ v = rebuild(next(results_iter), *args)
+ variables[k] = v
+
+ return Dataset._construct_direct(
+ variables,
self._coord_names,
self._dims,
self._attrs,
@@ -879,18 +889,50 @@ def __dask_postcompute__(self):
self._encoding,
self._close,
)
- return self._dask_postcompute, (info, construct_direct_args)
- def __dask_postpersist__(self):
- import dask
+ def _dask_postpersist(
+ self, dsk: Mapping, *, rename: Mapping[str, str] = None
+ ) -> "Dataset":
+ from dask import is_dask_collection
+ from dask.highlevelgraph import HighLevelGraph
+ from dask.optimization import cull
- info = [
- (k, None, v.__dask_keys__()) + v.__dask_postpersist__()
- if dask.is_dask_collection(v)
- else (k, v, None, None, None)
- for k, v in self._variables.items()
- ]
- construct_direct_args = (
+ variables = {}
+
+ for k, v in self._variables.items():
+ if not is_dask_collection(v):
+ variables[k] = v
+ continue
+
+ if isinstance(dsk, HighLevelGraph):
+ # dask >= 2021.3
+ # __dask_postpersist__() was called by dask.highlevelgraph.
+ # Don't use dsk.cull(), as we need to prevent partial layers:
+ # https://github.com/dask/dask/issues/7137
+ layers = v.__dask_layers__()
+ if rename:
+ layers = [rename.get(k, k) for k in layers]
+ dsk2 = dsk.cull_layers(layers)
+ elif rename: # pragma: nocover
+ # At the moment of writing, this is only for forward compatibility.
+ # replace_name_in_key requires dask >= 2021.3.
+ from dask.base import flatten, replace_name_in_key
+
+ keys = [
+ replace_name_in_key(k, rename) for k in flatten(v.__dask_keys__())
+ ]
+ dsk2, _ = cull(dsk, keys)
+ else:
+ # __dask_postpersist__() was called by dask.optimize or dask.persist
+ dsk2, _ = cull(dsk, v.__dask_keys__())
+
+ rebuild, args = v.__dask_postpersist__()
+ # rename was added in dask 2021.3
+ kwargs = {"rename": rename} if rename else {}
+ variables[k] = rebuild(dsk2, *args, **kwargs)
+
+ return Dataset._construct_direct(
+ variables,
self._coord_names,
self._dims,
self._attrs,
@@ -898,37 +940,6 @@ def __dask_postpersist__(self):
self._encoding,
self._close,
)
- return self._dask_postpersist, (info, construct_direct_args)
-
- @staticmethod
- def _dask_postcompute(results, info, construct_direct_args):
- variables = {}
- results_iter = iter(results)
- for k, v, rebuild, rebuild_args in info:
- if v is None:
- variables[k] = rebuild(next(results_iter), *rebuild_args)
- else:
- variables[k] = v
-
- final = Dataset._construct_direct(variables, *construct_direct_args)
- return final
-
- @staticmethod
- def _dask_postpersist(dsk, info, construct_direct_args):
- from dask.optimization import cull
-
- variables = {}
- # postpersist is called in both dask.optimize and dask.persist
- # When persisting, we want to filter out unrelated keys for
- # each Variable's task graph.
- for k, v, dask_keys, rebuild, rebuild_args in info:
- if v is None:
- dsk2, _ = cull(dsk, dask_keys)
- variables[k] = rebuild(dsk2, *rebuild_args)
- else:
- variables[k] = v
-
- return Dataset._construct_direct(variables, *construct_direct_args)
def compute(self, **kwargs) -> "Dataset":
"""Manually trigger loading and/or computation of this dataset's data
@@ -3906,6 +3917,7 @@ def merge(
compat: str = "no_conflicts",
join: str = "outer",
fill_value: Any = dtypes.NA,
+ combine_attrs: str = "override",
) -> "Dataset":
"""Merge the arrays of two datasets into a single dataset.
@@ -3934,7 +3946,6 @@ def merge(
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
-
join : {"outer", "inner", "left", "right", "exact"}, optional
Method for joining ``self`` and ``other`` along shared dimensions:
@@ -3946,6 +3957,18 @@ def merge(
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names (including coordinates) to fill values.
+ combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
+ "override"}, default: "override"
+ String indicating how to combine attrs of the objects being merged:
+
+ - "drop": empty attrs on returned Dataset.
+ - "identical": all attrs must be the same on every object.
+ - "no_conflicts": attrs from all objects are combined, any that have
+ the same name must also have the same value.
+ - "drop_conflicts": attrs from all objects are combined, any that have
+ the same name but different values are dropped.
+ - "override": skip comparing and copy attrs from the first dataset to
+ the result.
Returns
-------
@@ -3965,6 +3988,7 @@ def merge(
compat=compat,
join=join,
fill_value=fill_value,
+ combine_attrs=combine_attrs,
)
return self._replace(**merge_result._asdict())
@@ -4665,6 +4689,12 @@ def reduce(
Dataset with this object's DataArrays replaced with new DataArrays
of summarized data and the indicated dimension(s) removed.
"""
+ if "axis" in kwargs:
+ raise ValueError(
+ "passing 'axis' to Dataset reduce methods is ambiguous."
+ " Please use 'dim' instead."
+ )
+
if dim is None or dim is ...:
dims = set(self.dims)
elif isinstance(dim, str) or not isinstance(dim, Iterable):
@@ -6369,6 +6399,8 @@ def polyfit(
See Also
--------
numpy.polyfit
+ numpy.polyval
+ xarray.polyval
"""
variables = {}
skipna_da = skipna
@@ -6854,7 +6886,7 @@ def idxmax(
)
)
- def argmin(self, dim=None, axis=None, **kwargs):
+ def argmin(self, dim=None, **kwargs):
"""Indices of the minima of the member variables.
If there are multiple minima, the indices of the first one found will be
@@ -6868,9 +6900,6 @@ def argmin(self, dim=None, axis=None, **kwargs):
this is deprecated, in future will be an error, since DataArray.argmin will
return a dict with indices for all dimensions, which does not make sense for
a Dataset.
- axis : int, optional
- Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
- can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
@@ -6888,28 +6917,25 @@ def argmin(self, dim=None, axis=None, **kwargs):
See Also
--------
DataArray.argmin
-
"""
- if dim is None and axis is None:
+ if dim is None:
warnings.warn(
- "Once the behaviour of DataArray.argmin() and Variable.argmin() with "
- "neither dim nor axis argument changes to return a dict of indices of "
- "each dimension, for consistency it will be an error to call "
- "Dataset.argmin() with no argument, since we don't return a dict of "
- "Datasets.",
+ "Once the behaviour of DataArray.argmin() and Variable.argmin() without "
+ "dim changes to return a dict of indices of each dimension, for "
+ "consistency it will be an error to call Dataset.argmin() with no argument,"
+ "since we don't return a dict of Datasets.",
DeprecationWarning,
stacklevel=2,
)
if (
dim is None
- or axis is not None
or (not isinstance(dim, Sequence) and dim is not ...)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
argmin_func = getattr(duck_array_ops, "argmin")
- return self.reduce(argmin_func, dim=dim, axis=axis, **kwargs)
+ return self.reduce(argmin_func, dim=dim, **kwargs)
else:
raise ValueError(
"When dim is a sequence or ..., DataArray.argmin() returns a dict. "
@@ -6917,7 +6943,7 @@ def argmin(self, dim=None, axis=None, **kwargs):
"Dataset.argmin() with a sequence or ... for dim"
)
- def argmax(self, dim=None, axis=None, **kwargs):
+ def argmax(self, dim=None, **kwargs):
"""Indices of the maxima of the member variables.
If there are multiple maxima, the indices of the first one found will be
@@ -6931,9 +6957,6 @@ def argmax(self, dim=None, axis=None, **kwargs):
this is deprecated, in future will be an error, since DataArray.argmax will
return a dict with indices for all dimensions, which does not make sense for
a Dataset.
- axis : int, optional
- Axis over which to apply `argmax`. Only one of the 'dim' and 'axis' arguments
- can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
@@ -6953,26 +6976,24 @@ def argmax(self, dim=None, axis=None, **kwargs):
DataArray.argmax
"""
- if dim is None and axis is None:
+ if dim is None:
warnings.warn(
- "Once the behaviour of DataArray.argmax() and Variable.argmax() with "
- "neither dim nor axis argument changes to return a dict of indices of "
- "each dimension, for consistency it will be an error to call "
- "Dataset.argmax() with no argument, since we don't return a dict of "
- "Datasets.",
+ "Once the behaviour of DataArray.argmin() and Variable.argmin() without "
+ "dim changes to return a dict of indices of each dimension, for "
+ "consistency it will be an error to call Dataset.argmin() with no argument,"
+ "since we don't return a dict of Datasets.",
DeprecationWarning,
stacklevel=2,
)
if (
dim is None
- or axis is not None
or (not isinstance(dim, Sequence) and dim is not ...)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
argmax_func = getattr(duck_array_ops, "argmax")
- return self.reduce(argmax_func, dim=dim, axis=axis, **kwargs)
+ return self.reduce(argmax_func, dim=dim, **kwargs)
else:
raise ValueError(
"When dim is a sequence or ..., DataArray.argmin() returns a dict. "
diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py
index 1cac5e89906..82810908bea 100644
--- a/xarray/core/indexing.py
+++ b/xarray/core/indexing.py
@@ -513,7 +513,7 @@ def __getitem__(self, key):
return result
-class LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin):
+class LazilyIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make basic and outer indexing lazy."""
__slots__ = ("array", "key")
@@ -619,10 +619,10 @@ def _updated_key(self, new_key):
return _combine_indexers(self.key, self.shape, new_key)
def __getitem__(self, indexer):
- # If the indexed array becomes a scalar, return LazilyOuterIndexedArray
+ # If the indexed array becomes a scalar, return LazilyIndexedArray
if all(isinstance(ind, integer_types) for ind in indexer.tuple):
key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))
- return LazilyOuterIndexedArray(self.array, key)
+ return LazilyIndexedArray(self.array, key)
return type(self)(self.array, self._updated_key(indexer))
def transpose(self, order):
diff --git a/xarray/core/merge.py b/xarray/core/merge.py
index 14beeff3db5..ec95563bda9 100644
--- a/xarray/core/merge.py
+++ b/xarray/core/merge.py
@@ -893,6 +893,7 @@ def dataset_merge_method(
compat: str,
join: str,
fill_value: Any,
+ combine_attrs: str,
) -> _MergeResult:
"""Guts of the Dataset.merge method."""
# we are locked into supporting overwrite_vars for the Dataset.merge
@@ -922,7 +923,12 @@ def dataset_merge_method(
priority_arg = 2
return merge_core(
- objs, compat, join, priority_arg=priority_arg, fill_value=fill_value
+ objs,
+ compat,
+ join,
+ priority_arg=priority_arg,
+ fill_value=fill_value,
+ combine_attrs=combine_attrs,
)
diff --git a/xarray/core/options.py b/xarray/core/options.py
index d421b4c4f17..129698903c4 100644
--- a/xarray/core/options.py
+++ b/xarray/core/options.py
@@ -85,6 +85,7 @@ class set_options:
- ``display_width``: maximum display width for ``repr`` on xarray objects.
Default: ``80``.
+ - ``display_max_rows``: maximum display rows. Default: ``12``.
- ``arithmetic_join``: DataArray/Dataset alignment in binary operations.
Default: ``'inner'``.
- ``file_cache_maxsize``: maximum number of open files to hold in xarray's
diff --git a/xarray/core/utils.py b/xarray/core/utils.py
index 9648458ec6d..f9c8523306c 100644
--- a/xarray/core/utils.py
+++ b/xarray/core/utils.py
@@ -4,7 +4,6 @@
import functools
import io
import itertools
-import os.path
import re
import warnings
from enum import Enum
@@ -671,11 +670,6 @@ def read_magic_number(filename_or_obj, count=8):
return magic_number
-def is_grib_path(path: str) -> bool:
- _, ext = os.path.splitext(path)
- return ext in [".grib", ".grb", ".grib2", ".grb2"]
-
-
def is_uniform_spaced(arr, **kwargs) -> bool:
"""Return True if values of an array are uniformly spaced and sorted.
diff --git a/xarray/core/variable.py b/xarray/core/variable.py
index 52bcb2c161c..272d7c355ea 100644
--- a/xarray/core/variable.py
+++ b/xarray/core/variable.py
@@ -179,7 +179,7 @@ def _maybe_wrap_data(data):
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
- NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should
+ NumpyArrayAdapter, PandasIndexAdapter and LazilyIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
@@ -531,22 +531,15 @@ def __dask_scheduler__(self):
def __dask_postcompute__(self):
array_func, array_args = self._data.__dask_postcompute__()
- return (
- self._dask_finalize,
- (array_func, array_args, self._dims, self._attrs, self._encoding),
- )
+ return self._dask_finalize, (array_func,) + array_args
def __dask_postpersist__(self):
array_func, array_args = self._data.__dask_postpersist__()
- return (
- self._dask_finalize,
- (array_func, array_args, self._dims, self._attrs, self._encoding),
- )
+ return self._dask_finalize, (array_func,) + array_args
- @staticmethod
- def _dask_finalize(results, array_func, array_args, dims, attrs, encoding):
- data = array_func(results, *array_args)
- return Variable(dims, data, attrs=attrs, encoding=encoding)
+ def _dask_finalize(self, results, array_func, *args, **kwargs):
+ data = array_func(results, *args, **kwargs)
+ return Variable(self._dims, data, attrs=self._attrs, encoding=self._encoding)
@property
def values(self):
diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py
index 5510cf7f219..a83bc28e273 100644
--- a/xarray/plot/utils.py
+++ b/xarray/plot/utils.py
@@ -60,6 +60,9 @@ def _build_discrete_cmap(cmap, levels, extend, filled):
"""
import matplotlib as mpl
+ if len(levels) == 1:
+ levels = [levels[0], levels[0]]
+
if not filled:
# non-filled contour plots
extend = "max"
diff --git a/xarray/tests/test_backends_api.py b/xarray/tests/test_backends_api.py
index d19f5aab585..340495d4564 100644
--- a/xarray/tests/test_backends_api.py
+++ b/xarray/tests/test_backends_api.py
@@ -1,5 +1,3 @@
-import pytest
-
from xarray.backends.api import _get_default_engine
from . import requires_netCDF4, requires_scipy
@@ -14,8 +12,5 @@ def test__get_default_engine():
engine_gz = _get_default_engine("/example.gz")
assert engine_gz == "scipy"
- with pytest.raises(ValueError):
- _get_default_engine("/example.grib")
-
engine_default = _get_default_engine("/example")
assert engine_default == "netcdf4"
diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py
index 8220c8b83dc..908a959db45 100644
--- a/xarray/tests/test_dask.py
+++ b/xarray/tests/test_dask.py
@@ -1599,3 +1599,38 @@ def test_optimize():
arr = xr.DataArray(a).chunk(5)
(arr2,) = dask.optimize(arr)
arr2.compute()
+
+
+# The graph_manipulation module is in dask since 2021.2 but it became usable with
+# xarray only since 2021.3
+@pytest.mark.skipif(LooseVersion(dask.__version__) <= "2021.02.0", reason="new module")
+def test_graph_manipulation():
+ """dask.graph_manipulation passes an optional parameter, "rename", to the rebuilder
+ function returned by __dask_postperist__; also, the dsk passed to the rebuilder is
+ a HighLevelGraph whereas with dask.persist() and dask.optimize() it's a plain dict.
+ """
+ import dask.graph_manipulation as gm
+
+ v = Variable(["x"], [1, 2]).chunk(-1).chunk(1) * 2
+ da = DataArray(v)
+ ds = Dataset({"d1": v[0], "d2": v[1], "d3": ("x", [3, 4])})
+
+ v2, da2, ds2 = gm.clone(v, da, ds)
+
+ assert_equal(v2, v)
+ assert_equal(da2, da)
+ assert_equal(ds2, ds)
+
+ for a, b in ((v, v2), (da, da2), (ds, ds2)):
+ assert a.__dask_layers__() != b.__dask_layers__()
+ assert len(a.__dask_layers__()) == len(b.__dask_layers__())
+ assert a.__dask_graph__().keys() != b.__dask_graph__().keys()
+ assert len(a.__dask_graph__()) == len(b.__dask_graph__())
+ assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys()
+ assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers)
+
+ # Above we performed a slice operation; adding the two slices back together creates
+ # a diamond-shaped dependency graph, which in turn will trigger a collision in layer
+ # names if we were to use HighLevelGraph.cull() instead of
+ # HighLevelGraph.cull_layers() in Dataset.__dask_postpersist__().
+ assert_equal(ds2.d1 + ds2.d2, ds.d1 + ds.d2)
diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py
index 089785f5a12..4a5adf84fb7 100644
--- a/xarray/tests/test_dataarray.py
+++ b/xarray/tests/test_dataarray.py
@@ -7,6 +7,7 @@
import numpy as np
import pandas as pd
import pytest
+from pandas.tseries.frequencies import to_offset
import xarray as xr
from xarray import (
@@ -2990,9 +2991,13 @@ def test_resample(self):
actual = array.resample(time="24H").reduce(np.mean)
assert_identical(expected, actual)
+ # Our use of `loffset` may change if we align our API with pandas' changes.
+ # ref https://github.com/pydata/xarray/pull/4537
actual = array.resample(time="24H", loffset="-12H").mean()
- expected = DataArray(array.to_series().resample("24H", loffset="-12H").mean())
- assert_identical(expected, actual)
+ expected_ = array.to_series().resample("24H").mean()
+ expected_.index += to_offset("-12H")
+ expected = DataArray.from_series(expected_)
+ assert_identical(actual, expected)
with raises_regex(ValueError, "index must be monotonic"):
array[[2, 0, 1]].resample(time="1D")
diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py
index 2118bc8b780..df7f54ed30a 100644
--- a/xarray/tests/test_dataset.py
+++ b/xarray/tests/test_dataset.py
@@ -9,6 +9,7 @@
import pandas as pd
import pytest
from pandas.core.indexes.datetimes import DatetimeIndex
+from pandas.tseries.frequencies import to_offset
import xarray as xr
from xarray import (
@@ -187,7 +188,7 @@ def get_variables(self):
def lazy_inaccessible(k, v):
if k in self._indexvars:
return v
- data = indexing.LazilyOuterIndexedArray(InaccessibleArray(v.values))
+ data = indexing.LazilyIndexedArray(InaccessibleArray(v.values))
return Variable(v.dims, data, v.attrs)
return {k: lazy_inaccessible(k, v) for k, v in self._variables.items()}
@@ -3899,11 +3900,13 @@ def test_resample_loffset(self):
)
ds.attrs["dsmeta"] = "dsdata"
- actual = ds.resample(time="24H", loffset="-12H").mean("time").time
- expected = xr.DataArray(
- ds.bar.to_series().resample("24H", loffset="-12H").mean()
- ).time
- assert_identical(expected, actual)
+ # Our use of `loffset` may change if we align our API with pandas' changes.
+ # ref https://github.com/pydata/xarray/pull/4537
+ actual = ds.resample(time="24H", loffset="-12H").mean().bar
+ expected_ = ds.bar.to_series().resample("24H").mean()
+ expected_.index += to_offset("-12H")
+ expected = DataArray.from_series(expected_)
+ assert_identical(actual, expected)
def test_resample_by_mean_discarding_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
@@ -4746,6 +4749,9 @@ def test_reduce(self):
assert_equal(data.mean(dim=[]), data)
+ with pytest.raises(ValueError):
+ data.mean(axis=0)
+
def test_reduce_coords(self):
# regression test for GH1470
data = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"b": 4})
@@ -4926,9 +4932,6 @@ def mean_only_one_axis(x, axis):
with raises_regex(TypeError, "missing 1 required positional argument: 'axis'"):
ds.reduce(mean_only_one_axis)
- with raises_regex(TypeError, "non-integer axis"):
- ds.reduce(mean_only_one_axis, axis=["x", "y"])
-
def test_reduce_no_axis(self):
def total_sum(x):
return np.sum(x.flatten())
@@ -4938,9 +4941,6 @@ def total_sum(x):
actual = ds.reduce(total_sum)
assert_identical(expected, actual)
- with raises_regex(TypeError, "unexpected keyword argument 'axis'"):
- ds.reduce(total_sum, axis=0)
-
with raises_regex(TypeError, "unexpected keyword argument 'axis'"):
ds.reduce(total_sum, dim="x")
diff --git a/xarray/tests/test_indexing.py b/xarray/tests/test_indexing.py
index 4ef7536e1f2..10641ff54e9 100644
--- a/xarray/tests/test_indexing.py
+++ b/xarray/tests/test_indexing.py
@@ -224,7 +224,7 @@ def test_lazily_indexed_array(self):
original = np.random.rand(10, 20, 30)
x = indexing.NumpyIndexingAdapter(original)
v = Variable(["i", "j", "k"], original)
- lazy = indexing.LazilyOuterIndexedArray(x)
+ lazy = indexing.LazilyIndexedArray(x)
v_lazy = Variable(["i", "j", "k"], lazy)
arr = ReturnItem()
# test orthogonally applied indexers
@@ -244,9 +244,7 @@ def test_lazily_indexed_array(self):
]:
assert expected.shape == actual.shape
assert_array_equal(expected, actual)
- assert isinstance(
- actual._data, indexing.LazilyOuterIndexedArray
- )
+ assert isinstance(actual._data, indexing.LazilyIndexedArray)
# make sure actual.key is appropriate type
if all(
@@ -282,18 +280,18 @@ def test_lazily_indexed_array(self):
actual._data,
(
indexing.LazilyVectorizedIndexedArray,
- indexing.LazilyOuterIndexedArray,
+ indexing.LazilyIndexedArray,
),
)
- assert isinstance(actual._data, indexing.LazilyOuterIndexedArray)
+ assert isinstance(actual._data, indexing.LazilyIndexedArray)
assert isinstance(actual._data.array, indexing.NumpyIndexingAdapter)
def test_vectorized_lazily_indexed_array(self):
original = np.random.rand(10, 20, 30)
x = indexing.NumpyIndexingAdapter(original)
v_eager = Variable(["i", "j", "k"], x)
- lazy = indexing.LazilyOuterIndexedArray(x)
+ lazy = indexing.LazilyIndexedArray(x)
v_lazy = Variable(["i", "j", "k"], lazy)
arr = ReturnItem()
@@ -306,7 +304,7 @@ def check_indexing(v_eager, v_lazy, indexers):
actual._data,
(
indexing.LazilyVectorizedIndexedArray,
- indexing.LazilyOuterIndexedArray,
+ indexing.LazilyIndexedArray,
),
)
assert_array_equal(expected, actual)
@@ -364,19 +362,19 @@ def test_index_scalar(self):
class TestMemoryCachedArray:
def test_wrapper(self):
- original = indexing.LazilyOuterIndexedArray(np.arange(10))
+ original = indexing.LazilyIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
assert_array_equal(wrapped, np.arange(10))
assert isinstance(wrapped.array, indexing.NumpyIndexingAdapter)
def test_sub_array(self):
- original = indexing.LazilyOuterIndexedArray(np.arange(10))
+ original = indexing.LazilyIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
child = wrapped[B[:5]]
assert isinstance(child, indexing.MemoryCachedArray)
assert_array_equal(child, np.arange(5))
assert isinstance(child.array, indexing.NumpyIndexingAdapter)
- assert isinstance(wrapped.array, indexing.LazilyOuterIndexedArray)
+ assert isinstance(wrapped.array, indexing.LazilyIndexedArray)
def test_setitem(self):
original = np.arange(10)
diff --git a/xarray/tests/test_merge.py b/xarray/tests/test_merge.py
index 27e2b10dcbc..5b84eccca14 100644
--- a/xarray/tests/test_merge.py
+++ b/xarray/tests/test_merge.py
@@ -418,3 +418,34 @@ def test_merge_dataarray(self):
da = xr.DataArray(data=1, name="b")
assert_identical(ds.merge(da), xr.merge([ds, da]))
+
+ @pytest.mark.parametrize(
+ ["combine_attrs", "attrs1", "attrs2", "expected_attrs", "expect_error"],
+ # don't need to test thoroughly
+ (
+ ("drop", {"a": 0, "b": 1, "c": 2}, {"a": 1, "b": 2, "c": 3}, {}, False),
+ (
+ "drop_conflicts",
+ {"a": 0, "b": 1, "c": 2},
+ {"b": 2, "c": 2, "d": 3},
+ {"a": 0, "c": 2, "d": 3},
+ False,
+ ),
+ ("override", {"a": 0, "b": 1}, {"a": 1, "b": 2}, {"a": 0, "b": 1}, False),
+ ("no_conflicts", {"a": 0, "b": 1}, {"a": 0, "b": 2}, None, True),
+ ("identical", {"a": 0, "b": 1}, {"a": 0, "b": 2}, None, True),
+ ),
+ )
+ def test_merge_combine_attrs(
+ self, combine_attrs, attrs1, attrs2, expected_attrs, expect_error
+ ):
+ ds1 = xr.Dataset(attrs=attrs1)
+ ds2 = xr.Dataset(attrs=attrs2)
+
+ if expect_error:
+ with pytest.raises(xr.MergeError):
+ ds1.merge(ds2, combine_attrs=combine_attrs)
+ else:
+ actual = ds1.merge(ds2, combine_attrs=combine_attrs)
+ expected = xr.Dataset(attrs=expected_attrs)
+ assert_identical(actual, expected)
diff --git a/xarray/tests/test_units.py b/xarray/tests/test_units.py
index 76dd830de23..8b7835e5da6 100644
--- a/xarray/tests/test_units.py
+++ b/xarray/tests/test_units.py
@@ -3972,35 +3972,6 @@ def test_repr(self, func, variant, dtype):
@pytest.mark.parametrize(
"func",
(
- function("all"),
- function("any"),
- pytest.param(
- function("argmax"),
- marks=pytest.mark.skip(
- reason="calling np.argmax as a function on xarray objects is not "
- "supported"
- ),
- ),
- pytest.param(
- function("argmin"),
- marks=pytest.mark.skip(
- reason="calling np.argmin as a function on xarray objects is not "
- "supported"
- ),
- ),
- function("max"),
- function("min"),
- function("mean"),
- pytest.param(
- function("median"),
- marks=pytest.mark.xfail(reason="median does not work with dataset yet"),
- ),
- function("sum"),
- function("prod"),
- function("std"),
- function("var"),
- function("cumsum"),
- function("cumprod"),
method("all"),
method("any"),
method("argmax", dim="x"),
diff --git a/xarray/tests/test_utils.py b/xarray/tests/test_utils.py
index 193c45f01cd..9d278a6cfb6 100644
--- a/xarray/tests/test_utils.py
+++ b/xarray/tests/test_utils.py
@@ -233,15 +233,6 @@ def test_is_remote_uri():
assert not utils.is_remote_uri("example.nc")
-def test_is_grib_path():
- assert not utils.is_grib_path("example.nc")
- assert not utils.is_grib_path("example.grib ")
- assert utils.is_grib_path("example.grib")
- assert utils.is_grib_path("example.grib2")
- assert utils.is_grib_path("example.grb")
- assert utils.is_grib_path("example.grb2")
-
-
class Test_is_uniform_and_sorted:
def test_sorted_uniform(self):
assert utils.is_uniform_spaced(np.arange(5))
diff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py
index 1aaaff9b2d4..c951e7d3118 100644
--- a/xarray/tests/test_variable.py
+++ b/xarray/tests/test_variable.py
@@ -15,7 +15,7 @@
BasicIndexer,
CopyOnWriteArray,
DaskIndexingAdapter,
- LazilyOuterIndexedArray,
+ LazilyIndexedArray,
MemoryCachedArray,
NumpyIndexingAdapter,
OuterIndexer,
@@ -1173,9 +1173,9 @@ def test_repr(self):
assert expected == repr(v)
def test_repr_lazy_data(self):
- v = Variable("x", LazilyOuterIndexedArray(np.arange(2e5)))
+ v = Variable("x", LazilyIndexedArray(np.arange(2e5)))
assert "200000 values with dtype" in repr(v)
- assert isinstance(v._data, LazilyOuterIndexedArray)
+ assert isinstance(v._data, LazilyIndexedArray)
def test_detect_indexer_type(self):
""" Tests indexer type was correctly detected. """
@@ -2281,7 +2281,7 @@ def test_coarsen_2d(self):
class TestAsCompatibleData:
def test_unchanged_types(self):
- types = (np.asarray, PandasIndexAdapter, LazilyOuterIndexedArray)
+ types = (np.asarray, PandasIndexAdapter, LazilyIndexedArray)
for t in types:
for data in [
np.arange(3),
@@ -2464,19 +2464,19 @@ def test_NumpyIndexingAdapter(self):
dims=("x", "y"), data=NumpyIndexingAdapter(NumpyIndexingAdapter(self.d))
)
- def test_LazilyOuterIndexedArray(self):
- v = Variable(dims=("x", "y"), data=LazilyOuterIndexedArray(self.d))
+ def test_LazilyIndexedArray(self):
+ v = Variable(dims=("x", "y"), data=LazilyIndexedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"),
- data=LazilyOuterIndexedArray(LazilyOuterIndexedArray(self.d)),
+ data=LazilyIndexedArray(LazilyIndexedArray(self.d)),
)
self.check_orthogonal_indexing(v)
# hierarchical wrapping
v = Variable(
- dims=("x", "y"), data=LazilyOuterIndexedArray(NumpyIndexingAdapter(self.d))
+ dims=("x", "y"), data=LazilyIndexedArray(NumpyIndexingAdapter(self.d))
)
self.check_orthogonal_indexing(v)
@@ -2485,9 +2485,7 @@ def test_CopyOnWriteArray(self):
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
- v = Variable(
- dims=("x", "y"), data=CopyOnWriteArray(LazilyOuterIndexedArray(self.d))
- )
+ v = Variable(dims=("x", "y"), data=CopyOnWriteArray(LazilyIndexedArray(self.d)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)