Skip to content

Commit 12a0dc4

Browse files
jbrockmendeldatapythonista
authored andcommitted
STYLE: Avoid using backslash to continue code to the next line (#23073)
1 parent e6277be commit 12a0dc4

File tree

23 files changed

+106
-102
lines changed

23 files changed

+106
-102
lines changed

pandas/compat/numpy/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,8 @@ def np_array_datetime64_compat(arr, *args, **kwargs):
6060
if not _np_version_under1p11:
6161

6262
# is_list_like
63-
if hasattr(arr, '__iter__') and not \
64-
isinstance(arr, string_and_binary_types):
63+
if (hasattr(arr, '__iter__') and
64+
not isinstance(arr, string_and_binary_types)):
6565
arr = [tz_replacer(s) for s in arr]
6666
else:
6767
arr = tz_replacer(arr)

pandas/core/algorithms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -275,8 +275,8 @@ def match(to_match, values, na_sentinel=-1):
275275
# replace but return a numpy array
276276
# use a Series because it handles dtype conversions properly
277277
from pandas import Series
278-
result = Series(result.ravel()).replace(-1, na_sentinel).values.\
279-
reshape(result.shape)
278+
result = Series(result.ravel()).replace(-1, na_sentinel)
279+
result = result.values.reshape(result.shape)
280280

281281
return result
282282

pandas/core/base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -395,8 +395,8 @@ def nested_renaming_depr(level=4):
395395

396396
elif isinstance(obj, ABCSeries):
397397
nested_renaming_depr()
398-
elif isinstance(obj, ABCDataFrame) and \
399-
k not in obj.columns:
398+
elif (isinstance(obj, ABCDataFrame) and
399+
k not in obj.columns):
400400
raise KeyError(
401401
"Column '{col}' does not exist!".format(col=k))
402402

pandas/core/generic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5651,8 +5651,8 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
56515651
# fill in 2d chunks
56525652
result = {col: s.fillna(method=method, value=value)
56535653
for col, s in self.iteritems()}
5654-
new_obj = self._constructor.\
5655-
from_dict(result).__finalize__(self)
5654+
prelim_obj = self._constructor.from_dict(result)
5655+
new_obj = prelim_obj.__finalize__(self)
56565656
new_data = new_obj._data
56575657

56585658
else:

pandas/core/groupby/generic.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1027,8 +1027,9 @@ def nunique(self, dropna=True):
10271027
try:
10281028
sorter = np.lexsort((val, ids))
10291029
except TypeError: # catches object dtypes
1030-
assert val.dtype == object, \
1031-
'val.dtype must be object, got %s' % val.dtype
1030+
msg = ('val.dtype must be object, got {dtype}'
1031+
.format(dtype=val.dtype))
1032+
assert val.dtype == object, msg
10321033
val, _ = algorithms.factorize(val, sort=False)
10331034
sorter = np.lexsort((val, ids))
10341035
_isna = lambda a: a == -1

pandas/core/groupby/groupby.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -578,8 +578,8 @@ def wrapper(*args, **kwargs):
578578
# a little trickery for aggregation functions that need an axis
579579
# argument
580580
kwargs_with_axis = kwargs.copy()
581-
if 'axis' not in kwargs_with_axis or \
582-
kwargs_with_axis['axis'] is None:
581+
if ('axis' not in kwargs_with_axis or
582+
kwargs_with_axis['axis'] is None):
583583
kwargs_with_axis['axis'] = self.axis
584584

585585
def curried_with_axis(x):
@@ -1490,8 +1490,10 @@ def nth(self, n, dropna=None):
14901490
self._set_group_selection()
14911491

14921492
if not dropna:
1493-
mask = np.in1d(self._cumcount_array(), nth_values) | \
1494-
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
1493+
mask_left = np.in1d(self._cumcount_array(), nth_values)
1494+
mask_right = np.in1d(self._cumcount_array(ascending=False) + 1,
1495+
-nth_values)
1496+
mask = mask_left | mask_right
14951497

14961498
out = self._selected_obj[mask]
14971499
if not self.as_index:
@@ -1552,8 +1554,8 @@ def nth(self, n, dropna=None):
15521554
result.loc[mask] = np.nan
15531555

15541556
# reset/reindex to the original groups
1555-
if len(self.obj) == len(dropped) or \
1556-
len(result) == len(self.grouper.result_index):
1557+
if (len(self.obj) == len(dropped) or
1558+
len(result) == len(self.grouper.result_index)):
15571559
result.index = self.grouper.result_index
15581560
else:
15591561
result = result.reindex(self.grouper.result_index)

pandas/core/groupby/grouper.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,8 @@ def _set_grouper(self, obj, sort=False):
157157
if self.key is not None:
158158
key = self.key
159159
# The 'on' is already defined
160-
if getattr(self.grouper, 'name', None) == key and \
161-
isinstance(obj, ABCSeries):
160+
if (getattr(self.grouper, 'name', None) == key and
161+
isinstance(obj, ABCSeries)):
162162
ax = self._grouper.take(obj.index)
163163
else:
164164
if key not in obj._info_axis:
@@ -530,9 +530,9 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
530530
except Exception:
531531
all_in_columns_index = False
532532

533-
if not any_callable and not all_in_columns_index and \
534-
not any_arraylike and not any_groupers and \
535-
match_axis_length and level is None:
533+
if (not any_callable and not all_in_columns_index and
534+
not any_arraylike and not any_groupers and
535+
match_axis_length and level is None):
536536
keys = [com.asarray_tuplesafe(keys)]
537537

538538
if isinstance(level, (tuple, list)):
@@ -593,15 +593,15 @@ def is_in_obj(gpr):
593593

594594
# create the Grouping
595595
# allow us to passing the actual Grouping as the gpr
596-
ping = Grouping(group_axis,
597-
gpr,
598-
obj=obj,
599-
name=name,
600-
level=level,
601-
sort=sort,
602-
observed=observed,
603-
in_axis=in_axis) \
604-
if not isinstance(gpr, Grouping) else gpr
596+
ping = (Grouping(group_axis,
597+
gpr,
598+
obj=obj,
599+
name=name,
600+
level=level,
601+
sort=sort,
602+
observed=observed,
603+
in_axis=in_axis)
604+
if not isinstance(gpr, Grouping) else gpr)
605605

606606
groupings.append(ping)
607607

pandas/core/groupby/ops.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -521,8 +521,8 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1,
521521
result = result.astype('float64')
522522
result[mask] = np.nan
523523

524-
if kind == 'aggregate' and \
525-
self._filter_empty_groups and not counts.all():
524+
if (kind == 'aggregate' and
525+
self._filter_empty_groups and not counts.all()):
526526
if result.ndim == 2:
527527
try:
528528
result = lib.row_bool_subset(
@@ -743,8 +743,9 @@ def group_info(self):
743743
else:
744744
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
745745

746-
return comp_ids.astype('int64', copy=False), \
747-
obs_group_ids.astype('int64', copy=False), ngroups
746+
return (comp_ids.astype('int64', copy=False),
747+
obs_group_ids.astype('int64', copy=False),
748+
ngroups)
748749

749750
@cache_readonly
750751
def ngroups(self):

pandas/core/indexing.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1836,8 +1836,8 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
18361836
"""Translate any partial string timestamp matches in key, returning the
18371837
new key (GH 10331)"""
18381838
if isinstance(labels, MultiIndex):
1839-
if isinstance(key, compat.string_types) and \
1840-
labels.levels[0].is_all_dates:
1839+
if (isinstance(key, compat.string_types) and
1840+
labels.levels[0].is_all_dates):
18411841
# Convert key '2016-01-01' to
18421842
# ('2016-01-01'[, slice(None, None, None)]+)
18431843
key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
@@ -1847,8 +1847,8 @@ def _get_partial_string_timestamp_match_key(self, key, labels):
18471847
# (..., slice('2016-01-01', '2016-01-01', None), ...)
18481848
new_key = []
18491849
for i, component in enumerate(key):
1850-
if isinstance(component, compat.string_types) and \
1851-
labels.levels[i].is_all_dates:
1850+
if (isinstance(component, compat.string_types) and
1851+
labels.levels[i].is_all_dates):
18521852
new_key.append(slice(component, component, None))
18531853
else:
18541854
new_key.append(component)

pandas/core/internals/concat.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -184,8 +184,8 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
184184
if len(values) and values[0] is None:
185185
fill_value = None
186186

187-
if getattr(self.block, 'is_datetimetz', False) or \
188-
is_datetimetz(empty_dtype):
187+
if (getattr(self.block, 'is_datetimetz', False) or
188+
is_datetimetz(empty_dtype)):
189189
if self.block is None:
190190
array = empty_dtype.construct_array_type()
191191
missing_arr = array([fill_value], dtype=empty_dtype)

0 commit comments

Comments
 (0)