8787 _check_inplace ,
8888 _default ,
8989 decode_numpy_dict_values ,
90+ drop_dims_from_indexers ,
9091 either_dict_or_kwargs ,
9192 hashable ,
9293 infix_dims ,
@@ -1767,7 +1768,7 @@ def maybe_chunk(name, var, chunks):
17671768 return self ._replace (variables )
17681769
17691770 def _validate_indexers (
1770- self , indexers : Mapping [Hashable , Any ]
1771+ self , indexers : Mapping [Hashable , Any ], missing_dims : str = "raise" ,
17711772 ) -> Iterator [Tuple [Hashable , Union [int , slice , np .ndarray , Variable ]]]:
17721773 """ Here we make sure
17731774 + indexer has a valid keys
@@ -1777,9 +1778,7 @@ def _validate_indexers(
17771778 """
17781779 from .dataarray import DataArray
17791780
1780- invalid = indexers .keys () - self .dims .keys ()
1781- if invalid :
1782- raise ValueError ("dimensions %r do not exist" % invalid )
1781+ indexers = drop_dims_from_indexers (indexers , self .dims , missing_dims )
17831782
17841783 # all indexers should be int, slice, np.ndarrays, or Variable
17851784 for k , v in indexers .items ():
@@ -1875,6 +1874,7 @@ def isel(
18751874 self ,
18761875 indexers : Mapping [Hashable , Any ] = None ,
18771876 drop : bool = False ,
1877+ missing_dims : str = "raise" ,
18781878 ** indexers_kwargs : Any ,
18791879 ) -> "Dataset" :
18801880 """Returns a new dataset with each array indexed along the specified
@@ -1896,6 +1896,12 @@ def isel(
18961896 drop : bool, optional
18971897 If ``drop=True``, drop coordinates variables indexed by integers
18981898 instead of making them scalar.
1899+ missing_dims : {"raise", "warn", "ignore"}, default "raise"
1900+ What to do if dimensions that should be selected from are not present in the
1901+ Dataset:
1902+ - "exception": raise an exception
1903+ - "warning": raise a warning, and ignore the missing dimensions
1904+ - "ignore": ignore the missing dimensions
18991905 **indexers_kwargs : {dim: indexer, ...}, optional
19001906 The keyword arguments form of ``indexers``.
19011907 One of indexers or indexers_kwargs must be provided.
@@ -1918,13 +1924,11 @@ def isel(
19181924 """
19191925 indexers = either_dict_or_kwargs (indexers , indexers_kwargs , "isel" )
19201926 if any (is_fancy_indexer (idx ) for idx in indexers .values ()):
1921- return self ._isel_fancy (indexers , drop = drop )
1927+ return self ._isel_fancy (indexers , drop = drop , missing_dims = missing_dims )
19221928
19231929 # Much faster algorithm for when all indexers are ints, slices, one-dimensional
19241930 # lists, or zero or one-dimensional np.ndarray's
1925- invalid = indexers .keys () - self .dims .keys ()
1926- if invalid :
1927- raise ValueError ("dimensions %r do not exist" % invalid )
1931+ indexers = drop_dims_from_indexers (indexers , self .dims , missing_dims )
19281932
19291933 variables = {}
19301934 dims : Dict [Hashable , Tuple [int , ...]] = {}
@@ -1958,10 +1962,16 @@ def isel(
19581962 file_obj = self ._file_obj ,
19591963 )
19601964
1961- def _isel_fancy (self , indexers : Mapping [Hashable , Any ], * , drop : bool ) -> "Dataset" :
1965+ def _isel_fancy (
1966+ self ,
1967+ indexers : Mapping [Hashable , Any ],
1968+ * ,
1969+ drop : bool ,
1970+ missing_dims : str = "raise" ,
1971+ ) -> "Dataset" :
19621972 # Note: we need to preserve the original indexers variable in order to merge the
19631973 # coords below
1964- indexers_list = list (self ._validate_indexers (indexers ))
1974+ indexers_list = list (self ._validate_indexers (indexers , missing_dims ))
19651975
19661976 variables : Dict [Hashable , Variable ] = {}
19671977 indexes : Dict [Hashable , pd .Index ] = {}
0 commit comments