Skip to content

CLN: String formatting % -> f-strings #29518

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Nov 18, 2019
Prev Previous commit
Next Next commit
Flake8
  • Loading branch information
alimcmaster1 committed Nov 10, 2019
commit f773a0b4fd9b2510ddf4dba7caacba41144d6f08
29 changes: 19 additions & 10 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -2083,7 +2083,7 @@ def to_stata(
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
**kwargs
**kwargs,
)
writer.write_file()

Expand All @@ -2107,7 +2107,7 @@ def to_parquet(
compression="snappy",
index=None,
partition_cols=None,
**kwargs
**kwargs,
):
"""
Write a DataFrame to the binary parquet format.
Expand Down Expand Up @@ -2187,7 +2187,7 @@ def to_parquet(
compression=compression,
index=index,
partition_cols=partition_cols,
**kwargs
**kwargs,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this a black version thing?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Weirdly using 19.3b0 locally seems to add this for me - I've reverted

Copy link
Member Author

@alimcmaster1 alimcmaster1 Nov 11, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Black code check in CI fail without this - i've added this back in

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Personally, I find this quite odd that you have to add a comma at the end.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Related to this - #29607

)

@Substitution(
Expand Down Expand Up @@ -2846,7 +2846,9 @@ def _getitem_bool_array(self, key):
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(f"Item wrong length {len(key)} instead of {len(self.index)}.")
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)

# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
Expand Down Expand Up @@ -2955,7 +2957,9 @@ def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(f"Item wrong length {len(key)} instead of {len(self.index)}!")
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
Expand Down Expand Up @@ -4125,7 +4129,7 @@ def fillna(
inplace=False,
limit=None,
downcast=None,
**kwargs
**kwargs,
):
return super().fillna(
value=value,
Expand All @@ -4134,7 +4138,7 @@ def fillna(
inplace=inplace,
limit=limit,
downcast=downcast,
**kwargs
**kwargs,
)

@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
Expand Down Expand Up @@ -4551,7 +4555,10 @@ def _maybe_casted_values(index, labels=None):

if not drop:
if isinstance(self.index, ABCMultiIndex):
names = [(n if n is not None else f"level_{i}") for i, n in enumerate(self.index.names)]
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
Expand Down Expand Up @@ -4869,7 +4876,9 @@ def sort_values(
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(f"Length of ascending ({len(ascending)}) != length of by ({len(by)})")
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer

Expand Down Expand Up @@ -6575,7 +6584,7 @@ def _gotitem(
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
versionadded="\n.. versionadded:: 0.20.0\n",
**_shared_doc_kwargs
**_shared_doc_kwargs,
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, func, axis=0, *args, **kwargs):
Expand Down
32 changes: 21 additions & 11 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,9 @@ def _construct_axes_from_arguments(
if alias is not None:
if a in kwargs:
if alias in kwargs:
raise TypeError(f"arguments are mutually exclusive for [{a},{alias}]")
raise TypeError(
f"arguments are mutually exclusive for [{a},{alias}]"
)
continue
if alias in kwargs:
kwargs[a] = kwargs.pop(alias)
Expand Down Expand Up @@ -2057,7 +2059,7 @@ def __getstate__(self):
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta
**meta,
)

def __setstate__(self, state):
Expand Down Expand Up @@ -6795,7 +6797,10 @@ def replace(
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(f"Replacement lists must match in length. Expecting {len(to_replace)} got {len(value)} ")
raise ValueError(
f"Replacement lists must match in length."
f" Expecting {len(to_replace)} got {len(value)} "
)

new_data = self._data.replace_list(
src_list=to_replace,
Expand Down Expand Up @@ -7048,7 +7053,7 @@ def interpolate(
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs
**kwargs,
):
"""
Interpolate values according to different methods.
Expand Down Expand Up @@ -7122,7 +7127,7 @@ def interpolate(
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs
**kwargs,
)

if inplace:
Expand Down Expand Up @@ -7822,7 +7827,7 @@ def groupby(
group_keys=True,
squeeze=False,
observed=False,
**kwargs
**kwargs,
):
"""
Group DataFrame or Series using a mapper or by a Series of columns.
Expand Down Expand Up @@ -7948,7 +7953,7 @@ def groupby(
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
**kwargs
**kwargs,
)

def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
Expand Down Expand Up @@ -9518,7 +9523,8 @@ def tshift(self, periods=1, freq=None, axis=0):
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = (f"Given freq {freq.rule_code} does not match PeriodIndex freq {orig_freq.rule_code}")
msg = f"Given freq {freq.rule_code} does not match" \
f" PeriodIndex freq {orig_freq.rule_code}"
raise ValueError(msg)
else:
new_data = self._data.copy()
Expand Down Expand Up @@ -9710,7 +9716,9 @@ def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(f"{ax_name} is not a valid DatetimeIndex or PeriodIndex")
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
Expand Down Expand Up @@ -9872,7 +9880,9 @@ def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(f"{ax_name} is not a valid DatetimeIndex or PeriodIndex")
raise TypeError(
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
else:
Expand Down Expand Up @@ -11568,7 +11578,7 @@ def stat_func(
level=None,
numeric_only=None,
min_count=0,
**kwargs
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
Expand Down
14 changes: 10 additions & 4 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ def __new__(
name=None,
fastpath=None,
tupleize_cols=True,
**kwargs
**kwargs,
) -> "Index":

from .range import RangeIndex
Expand Down Expand Up @@ -961,7 +961,7 @@ def __repr__(self):
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = f",{space}".join(f'{k}={v}' for k, v in attrs)
prepr = f",{space}".join(f"{k}={v}" for k, v in attrs)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's try to minimize what we execute in f-space (better name for this?)


# no data provided, just attributes
if data is None:
Expand Down Expand Up @@ -1471,7 +1471,10 @@ def _validate_index_level(self, level):
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError(f"Too many levels: Index has only 1 level, {level} is not a valid level number")
raise IndexError(
f"Too many levels: Index has only 1 level,"
f" {level} is not a valid level number"
)
elif level > 0:
raise IndexError(
"Too many levels: Index has only 1 level, not %d" % (level + 1)
Expand Down Expand Up @@ -5064,7 +5067,10 @@ def get_slice_bound(self, label, side, kind):
assert kind in ["ix", "loc", "getitem", None]

if side not in ("left", "right"):
raise ValueError(f"Invalid value for side kwarg, must be either 'left' or 'right': {side}")
raise ValueError(
f"Invalid value for side kwarg, must be either"
f" 'left' or 'right': {side}"
)

original_label = label

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/category.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ def _format_attrs(self):
]
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
attrs.append(('dtype', f"'{self.dtype.name}'"))
attrs.append(("dtype", f"'{self.dtype.name}'"))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/indexes/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1422,7 +1422,7 @@ def date_range(
normalize=False,
name=None,
closed=None,
**kwargs
**kwargs,
):
"""
Return a fixed frequency DatetimeIndex.
Expand Down Expand Up @@ -1572,7 +1572,7 @@ def date_range(
tz=tz,
normalize=normalize,
closed=closed,
**kwargs
**kwargs,
)
return DatetimeIndex._simple_new(dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name)

Expand All @@ -1588,7 +1588,7 @@ def bdate_range(
weekmask=None,
holidays=None,
closed=None,
**kwargs
**kwargs,
):
"""
Return a fixed frequency DatetimeIndex, with business day as the default
Expand Down Expand Up @@ -1681,7 +1681,7 @@ def bdate_range(
normalize=normalize,
name=name,
closed=closed,
**kwargs
**kwargs,
)


Expand Down
23 changes: 17 additions & 6 deletions pandas/core/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -956,7 +956,7 @@ def copy(
codes=None,
deep=False,
_set_identity=False,
**kwargs
**kwargs,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
Expand Down Expand Up @@ -1020,7 +1020,7 @@ def _shallow_copy_with_infer(self, values, **kwargs):
return MultiIndex(
levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
**kwargs
**kwargs,
)
return self._shallow_copy(values, **kwargs)

Expand Down Expand Up @@ -1328,7 +1328,9 @@ def inferred_type(self):
def _get_level_number(self, level):
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(f"The name {level} occurs multiple times, use a level number")
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError:
Expand All @@ -1338,7 +1340,10 @@ def _get_level_number(self, level):
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(f"Too many levels: Index has only {self.nlevels} levels, {orig_level} is not a valid level number")
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels,"
f" {orig_level} is not a valid level number"
)
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
Expand Down Expand Up @@ -2284,7 +2289,10 @@ def reorder_levels(self, order):
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(f"Length of order must be same as number of levels ({self.nlevels}), got {len(order)}")
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}),"
f" got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
Expand Down Expand Up @@ -2594,7 +2602,10 @@ def slice_locs(self, start=None, end=None, step=None, kind=None):

def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth ({self.lexsort_depth})")
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth"
f" ({self.lexsort_depth})"
)

n = len(tup)
start, end = 0, len(self)
Expand Down
Loading