Skip to content

Commit 4e9d557

Browse files
authored
Add a benchmark to monitor performance for large dataset indexing (#9012)
1 parent dcf2ac4 commit 4e9d557

File tree

1 file changed

+10
-0
lines changed

1 file changed

+10
-0
lines changed

asv_bench/benchmarks/indexing.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
nt = 500
1313

1414
basic_indexes = {
15+
"1scalar": {"x": 0},
1516
"1slice": {"x": slice(0, 3)},
1617
"1slice-1scalar": {"x": 0, "y": slice(None, None, 3)},
1718
"2slicess-1scalar": {"x": slice(3, -3, 3), "y": 1, "t": slice(None, -3, 3)},
@@ -74,6 +75,10 @@ def setup(self, key):
7475
"x_coords": ("x", np.linspace(1.1, 2.1, nx)),
7576
},
7677
)
78+
# Benchmark how indexing is slowed down by adding many scalar variable
79+
# to the dataset
80+
# https://github.com/pydata/xarray/pull/9003
81+
self.ds_large = self.ds.merge({f"extra_var{i}": i for i in range(400)})
7782

7883

7984
class Indexing(Base):
@@ -89,6 +94,11 @@ def time_indexing_outer(self, key):
8994
def time_indexing_vectorized(self, key):
9095
self.ds.isel(**vectorized_indexes[key]).load()
9196

97+
@parameterized(["key"], [list(basic_indexes.keys())])
98+
def time_indexing_basic_ds_large(self, key):
99+
# https://github.com/pydata/xarray/pull/9003
100+
self.ds_large.isel(**basic_indexes[key]).load()
101+
92102

93103
class Assignment(Base):
94104
@parameterized(["key"], [list(basic_indexes.keys())])

0 commit comments

Comments
 (0)