Skip to content

Commit e4a6ec2

Browse files
committed
cleanups + add comments
1 parent e976ada commit e4a6ec2

File tree

2 files changed

+16
-15
lines changed

2 files changed

+16
-15
lines changed

xarray/core/dataset.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4133,23 +4133,21 @@ def unstack(
41334133
# function requires.
41344134
# https://github.com/pydata/xarray/pull/4746#issuecomment-753282125
41354135
any(is_duck_dask_array(v.data) for v in self.variables.values())
4136-
# Sparse doesn't currently support (though we could special-case
4137-
# it)
4138-
# https://github.com/pydata/sparse/issues/422
4136+
# Sparse doesn't currently support advanced indexing
4137+
# https://github.com/pydata/sparse/issues/114
41394138
or any(
41404139
isinstance(v.data, sparse_array_type)
41414140
for v in self.variables.values()
41424141
)
4143-
# or sparse
41444142
# Until https://github.com/pydata/xarray/pull/4751 is resolved,
41454143
# we check explicitly whether it's a numpy array. Once that is
41464144
# resolved, explicitly exclude pint arrays.
4147-
# # pint doesn't implement `np.full_like` in a way that's
4148-
# # currently compatible.
4149-
# # https://github.com/pydata/xarray/pull/4746#issuecomment-753425173
4150-
# # or any(
4151-
# # isinstance(v.data, pint_array_type) for v in self.variables.values()
4152-
# # )
4145+
# pint doesn't implement `np.full_like` in a way that's
4146+
# currently compatible.
4147+
# https://github.com/pydata/xarray/pull/4746#issuecomment-753425173
4148+
# or any(
4149+
# isinstance(v.data, pint_array_type) for v in self.variables.values()
4150+
# )
41534151
or any(
41544152
not isinstance(v.data, np.ndarray) for v in self.variables.values()
41554153
)

xarray/core/variable.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1565,22 +1565,22 @@ def _unstack_once(
15651565
index: pd.MultiIndex,
15661566
dim: Hashable,
15671567
fill_value=dtypes.NA,
1568-
sparse=False,
1568+
sparse: bool = False,
15691569
) -> "Variable":
15701570
"""
15711571
Unstacks this variable given an index to unstack and the name of the
15721572
dimension to which the index refers.
15731573
"""
15741574

15751575
reordered = self.transpose(..., dim)
1576-
shape = reordered.shape
1576+
15771577
new_dim_sizes = [lev.size for lev in index.levels]
15781578
new_dim_names = index.names
15791579
indexer = index.codes
15801580

15811581
# Potentially we could replace `len(other_dims)` with just `-1`
15821582
other_dims = [d for d in self.dims if d != dim]
1583-
new_shape = tuple(list(shape[: len(other_dims)]) + new_dim_sizes)
1583+
new_shape = tuple(list(reordered.shape[: len(other_dims)]) + new_dim_sizes)
15841584
new_dims = reordered.dims[: len(other_dims)] + new_dim_names
15851585

15861586
if fill_value is dtypes.NA:
@@ -1594,14 +1594,17 @@ def _unstack_once(
15941594
dtype = self.dtype
15951595

15961596
if sparse:
1597+
# unstacking a dense multitindexed array to a sparse array
1598+
# Use the sparse.COO constructor until sparse supports advanced indexing
1599+
# https://github.com/pydata/sparse/issues/114
15971600
# TODO: how do we allow different sparse array types
15981601
from sparse import COO
15991602

16001603
codes = zip(*index.codes)
1601-
if not shape[:-1]:
1604+
if reordered.ndim == 1:
16021605
indexes = codes
16031606
else:
1604-
sizes = itertools.product(range(*shape[:-1]))
1607+
sizes = itertools.product(range(*reordered.shape[:-1]))
16051608
tuple_indexes = itertools.product(sizes, codes)
16061609
indexes = map(lambda x: list(itertools.chain(*x)), tuple_indexes) # type: ignore
16071610

0 commit comments

Comments
 (0)