Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add eval method to Dataset #7163

Merged
merged 10 commits into from
Dec 6, 2023
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,7 @@ Computation
Dataset.map_blocks
Dataset.polyfit
Dataset.curvefit
Dataset.eval

Aggregation
-----------
Expand Down
4 changes: 4 additions & 0 deletions doc/whats-new.rst
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ New Features
By `Sam Levang <https://github.com/slevang>`_.
- Allow the usage of h5py drivers (eg: ros3) via h5netcdf (:pull:`8360`).
By `Ezequiel Cimadevilla <https://github.com/zequihg50>`_.
- Add a :py:meth:`Dataset.eval` method, similar to the pandas' method of the
max-sixty marked this conversation as resolved.
Show resolved Hide resolved
same name. (:pull:`7163`). This is currently marked as experimental and
doesn't yet support the ``numexpr`` engine.
By `Maximilian Roos <https://github.com/max-sixty>`_.

Breaking changes
~~~~~~~~~~~~~~~~
Expand Down
63 changes: 63 additions & 0 deletions xarray/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@
Self,
T_ChunkDim,
T_Chunks,
T_DataArray,
T_DataArrayOrSet,
T_Dataset,
)
Expand Down Expand Up @@ -9510,6 +9511,68 @@ def argmax(self, dim: Hashable | None = None, **kwargs) -> Self:
"Dataset.argmin() with a sequence or ... for dim"
)

def eval(
self: T_Dataset,
statement: str,
*,
parser: QueryParserOptions = "pandas",
) -> T_Dataset | T_DataArray:
max-sixty marked this conversation as resolved.
Show resolved Hide resolved
"""
Calculate an expression supplied as a string in the context of the dataset.

This is currently experimental; the API may change particularly around
assignments, which currently returnn a ``Dataset`` with the additional variable.
Currently only the ``python`` engine is supported, which has the same
performance as executing in python.

Parameters
----------
statement : str
String containing the Python-like expression to evaluate.

Returns
-------
result : Dataset or DataArray, depending on whether ``statement`` contains an
assignment.

Examples
--------
>>> ds = xr.Dataset(
... {"a": ("x", np.arange(0, 5, 1)), "b": ("x", np.linspace(0, 1, 5))}
... )
>>> ds
<xarray.Dataset>
Dimensions: (x: 5)
Dimensions without coordinates: x
Data variables:
a (x) int64 0 1 2 3 4
b (x) float64 0.0 0.25 0.5 0.75 1.0

>>> ds.eval("a + b")
<xarray.DataArray (x: 5)>
array([0. , 1.25, 2.5 , 3.75, 5. ])
Dimensions without coordinates: x

>>> ds.eval("c = a + b")
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not a great fan of the method returning different types. Possibly we could have a different method which did these assignments?

Or we don't accept them and someone can pass ds.assign(c=ds.eval("a + b")), but removes some of the power.

<xarray.Dataset>
Dimensions: (x: 5)
Dimensions without coordinates: x
Data variables:
a (x) int64 0 1 2 3 4
b (x) float64 0.0 0.25 0.5 0.75 1.0
c (x) float64 0.0 1.25 2.5 3.75 5.0
"""

return pd.eval(
statement,
resolvers=[self],
target=self,
parser=parser,
# Because numexpr returns a numpy array, using that engine results in
# different behavior. We'd be very open to a contribution handling this.
engine="python",
)

def query(
self,
queries: Mapping[Any, Any] | None = None,
Expand Down
68 changes: 68 additions & 0 deletions xarray/tests/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -6680,6 +6680,74 @@ def test_query(self, backend, engine, parser) -> None:
with pytest.raises(UndefinedVariableError):
ds.query(x="spam > 50") # name not present

@pytest.mark.parametrize("parser", ["pandas", "python"])
@pytest.mark.parametrize(
"backend", ["numpy", pytest.param("dask", marks=[requires_dask])]
)
def test_eval(self, backend, parser) -> None:
"""Currently much more minimal testing that `query` above, and much of the setup
isn't used. But the risks are fairly low — `query` shares much of the code, and
the method is currently experimental."""

# setup test data
np.random.seed(42)
a = np.arange(0, 10, 1)
b = np.random.randint(0, 100, size=10)
c = np.linspace(0, 1, 20)
d = np.random.choice(["foo", "bar", "baz"], size=30, replace=True).astype(
object
)
e = np.arange(0, 10 * 20).reshape(10, 20)
f = np.random.normal(0, 1, size=(10, 20, 30))
if backend == "numpy":
ds = Dataset(
{
"a": ("x", a),
"b": ("x", b),
"c": ("y", c),
"d": ("z", d),
"e": (("x", "y"), e),
"f": (("x", "y", "z"), f),
},
coords={
"a2": ("x", a),
"b2": ("x", b),
"c2": ("y", c),
"d2": ("z", d),
"e2": (("x", "y"), e),
"f2": (("x", "y", "z"), f),
},
)
elif backend == "dask":
ds = Dataset(
max-sixty marked this conversation as resolved.
Show resolved Hide resolved
{
"a": ("x", da.from_array(a, chunks=3)),
"b": ("x", da.from_array(b, chunks=3)),
"c": ("y", da.from_array(c, chunks=7)),
"d": ("z", da.from_array(d, chunks=12)),
"e": (("x", "y"), da.from_array(e, chunks=(3, 7))),
"f": (("x", "y", "z"), da.from_array(f, chunks=(3, 7, 12))),
},
coords={
"a2": ("x", a),
"b2": ("x", b),
"c2": ("y", c),
"d2": ("z", d),
"e2": (("x", "y"), e),
"f2": (("x", "y", "z"), f),
},
)

actual = ds.eval("a + 5", parser=parser)
expect = ds["a"] + 5
assert_identical(expect, actual)

# check pandas query syntax is supported
if parser == "pandas":
actual = ds.eval("(a2 > 5) and (b2 > 50)", parser=parser)
expect = (ds["a"] > 5) & (ds["b"] > 50)
assert_identical(expect, actual)


# pytest tests — new tests should go here, rather than in the class.

Expand Down
Loading