Skip to content

Commit

Permalink
CLN: replace %s syntax with .format in pandas.core.reshape (pandas-de…
Browse files Browse the repository at this point in the history
…v#17252)

Replaced %s syntax with .format in pandas.core.reshape.  Additionally, made some of the existing positional .format code more explicit.
  • Loading branch information
jschendel authored and jowens committed Sep 20, 2017
1 parent b49446e commit 536b761
Show file tree
Hide file tree
Showing 5 changed files with 67 additions and 58 deletions.
29 changes: 15 additions & 14 deletions pandas/core/reshape/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{0}"'.format(type(objs).__name__))
'"{name}"'.format(name=type(objs).__name__))

if join == 'outer':
self.intersect = False
Expand Down Expand Up @@ -309,8 +309,8 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,

self._is_series = isinstance(sample, Series)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {0}, "
"input was {1}".format(sample.ndim, axis))
raise AssertionError("axis must be between 0 and {ndim}, input was"
" {axis}".format(ndim=sample.ndim, axis=axis))

# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
Expand Down Expand Up @@ -431,8 +431,8 @@ def _get_new_axes(self):
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be "
"equal to {0}".format(ndim - 1))
raise AssertionError("length of join_axes must not be equal "
"to {length}".format(length=ndim - 1))

# ufff...
indices = compat.lrange(ndim)
Expand All @@ -451,7 +451,8 @@ def _get_comb_axis(self, i):
intersect=self.intersect)
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of %s" % types)
raise TypeError("Cannot concatenate list of {types}"
.format(types=types))

def _get_concat_axis(self):
"""
Expand All @@ -470,8 +471,8 @@ def _get_concat_axis(self):
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type "
"%r" % type(x).__name__)
"with object of type {type!r}"
.format(type=type(x).__name__))
if x.name is not None:
names[i] = x.name
has_names = True
Expand Down Expand Up @@ -505,8 +506,8 @@ def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: %s'
% str(overlap))
raise ValueError('Indexes have overlapping values: '
'{overlap!s}'.format(overlap=overlap))


def _concat_indexes(indexes):
Expand Down Expand Up @@ -547,8 +548,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key %s not in level %s'
% (str(key), str(level)))
raise ValueError('Key {key!s} not in level {level!s}'
.format(key=key, level=level))

to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
Expand Down Expand Up @@ -597,8 +598,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None):

mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: %s'
% str(hlevel[mask]))
raise ValueError('Values not found in passed level: {hlevel!s}'
.format(hlevel=hlevel[mask]))

new_labels.append(np.repeat(mapped, n))

Expand Down
53 changes: 28 additions & 25 deletions pandas/core/reshape/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,28 +534,27 @@ def __init__(self, left, right, how='inner', on=None,
'indicator option can only accept boolean or string arguments')

if not isinstance(left, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(left)))
raise ValueError('can not merge DataFrame with instance of '
'type {left}'.format(left=type(left)))
if not isinstance(right, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(right)))
raise ValueError('can not merge DataFrame with instance of '
'type {right}'.format(right=type(right)))

if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
'{0}'.format(type(left_index)))
'{left_index}'.format(left_index=type(left_index)))
if not is_bool(right_index):
raise ValueError(
'right_index parameter must be of type bool, not '
'{0}'.format(type(right_index)))
'{right_index}'.format(right_index=type(right_index)))

# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
'result ({0} levels on the left, {1} on the right)')
msg = msg.format(left.columns.nlevels, right.columns.nlevels)
'result ({left} levels on the left, {right} on the right)'
).format(left=left.columns.nlevels,
right=right.columns.nlevels)
warnings.warn(msg, UserWarning)

self._validate_specification()
Expand Down Expand Up @@ -613,7 +612,8 @@ def _indicator_pre_merge(self, left, right):
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when "
"data contains a column named {}".format(i))
"data contains a column named {name}"
.format(name=i))
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column")
Expand Down Expand Up @@ -717,7 +717,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
if name in result:
result[name] = key_col
else:
result.insert(i, name or 'key_%d' % i, key_col)
result.insert(i, name or 'key_{i}'.format(i=i), key_col)

def _get_join_indexers(self):
""" return the join indexers """
Expand Down Expand Up @@ -952,8 +952,8 @@ def _validate_specification(self):
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
if not common_cols.is_unique:
raise MergeError("Data columns not unique: %s"
% repr(common_cols))
raise MergeError("Data columns not unique: {common!r}"
.format(common=common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
Expand Down Expand Up @@ -1119,12 +1119,14 @@ def get_result(self):


def _asof_function(direction, on_type):
return getattr(libjoin, 'asof_join_%s_%s' % (direction, on_type), None)
name = 'asof_join_{dir}_{on}'.format(dir=direction, on=on_type)
return getattr(libjoin, name, None)


def _asof_by_function(direction, on_type, by_type):
return getattr(libjoin, 'asof_join_%s_%s_by_%s' %
(direction, on_type, by_type), None)
name = 'asof_join_{dir}_{on}_by_{by}'.format(
dir=direction, on=on_type, by=by_type)
return getattr(libjoin, name, None)


_type_casters = {
Expand Down Expand Up @@ -1153,7 +1155,7 @@ def _get_cython_type(dtype):
type_name = _get_dtype(dtype).name
ctype = _cython_types.get(type_name, 'object')
if ctype == 'error':
raise MergeError('unsupported type: ' + type_name)
raise MergeError('unsupported type: {type}'.format(type=type_name))
return ctype


Expand Down Expand Up @@ -1235,7 +1237,8 @@ def _validate_specification(self):

# check 'direction' is valid
if self.direction not in ['backward', 'forward', 'nearest']:
raise MergeError('direction invalid: ' + self.direction)
raise MergeError('direction invalid: {direction}'
.format(direction=self.direction))

@property
def _asof_key(self):
Expand Down Expand Up @@ -1264,7 +1267,7 @@ def _get_merge_keys(self):
lt = left_join_keys[-1]

msg = "incompatible tolerance, must be compat " \
"with type {0}".format(type(lt))
"with type {lt}".format(lt=type(lt))

if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
Expand All @@ -1283,8 +1286,8 @@ def _get_merge_keys(self):

# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
raise MergeError("allow_exact_matches must be boolean, "
"passed {0}".format(self.allow_exact_matches))
msg = "allow_exact_matches must be boolean, passed {passed}"
raise MergeError(msg.format(passed=self.allow_exact_matches))

return left_join_keys, right_join_keys, join_names

Expand All @@ -1306,11 +1309,11 @@ def flip(xs):
tolerance = self.tolerance

# we required sortedness in the join keys
msg = " keys must be sorted"
msg = "{side} keys must be sorted"
if not Index(left_values).is_monotonic:
raise ValueError('left' + msg)
raise ValueError(msg.format(side='left'))
if not Index(right_values).is_monotonic:
raise ValueError('right' + msg)
raise ValueError(msg.format(side='right'))

# initial type conversion as needed
if needs_i8_conversion(left_values):
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/reshape/pivot.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,18 +145,18 @@ def _add_margins(table, data, values, rows, cols, aggfunc,
if not isinstance(margins_name, compat.string_types):
raise ValueError('margins_name argument must be a string')

exception_msg = 'Conflicting name "{0}" in margins'.format(margins_name)
msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(exception_msg)
raise ValueError(msg)

grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name)

# could be passed a Series object with no 'columns'
if hasattr(table, 'columns'):
for level in table.columns.names[1:]:
if margins_name in table.columns.get_level_values(level):
raise ValueError(exception_msg)
raise ValueError(msg)

if len(rows) > 1:
key = (margins_name,) + ('',) * (len(rows) - 1)
Expand Down Expand Up @@ -553,7 +553,7 @@ def _get_names(arrs, names, prefix='row'):
if isinstance(arr, ABCSeries) and arr.name is not None:
names.append(arr.name)
else:
names.append('%s_%d' % (prefix, i))
names.append('{prefix}_{i}'.format(prefix=prefix, i=i))
else:
if len(names) != len(arrs):
raise AssertionError('arrays and names must have the same length')
Expand Down
31 changes: 18 additions & 13 deletions pandas/core/reshape/reshape.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas.compat import range, text_type, zip
from pandas import compat
import itertools
import re
Expand Down Expand Up @@ -91,8 +91,8 @@ def __init__(self, values, index, level=-1, value_columns=None,

if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
msg = ("Ambiguous reference to {level}. The index "
"names are not unique.".format(level=level))
raise ValueError(msg)

self.level = self.index._get_level_number(level)
Expand Down Expand Up @@ -229,7 +229,7 @@ def get_new_values(self):
sorted_values = sorted_values.astype(name, copy=False)

# fill in our values & mask
f = getattr(_reshape, "unstack_{}".format(name))
f = getattr(_reshape, "unstack_{name}".format(name=name))
f(sorted_values,
mask.view('u1'),
stride,
Expand Down Expand Up @@ -516,8 +516,8 @@ def factorize(index):
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
msg = ("Ambiguous reference to {level}. The column "
"names are not unique.".format(level=level))
raise ValueError(msg)

# Will also convert negative level numbers and check if out of bounds.
Expand Down Expand Up @@ -747,7 +747,7 @@ def melt(frame, id_vars=None, value_vars=None, var_name=None,
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_%s' % i
var_name = ['variable_{i}'.format(i=i)
for i in range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
Expand Down Expand Up @@ -1027,7 +1027,8 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix='\d+'):
in a typicaly case.
"""
def get_var_names(df, stub, sep, suffix):
regex = "^{0}{1}{2}".format(re.escape(stub), re.escape(sep), suffix)
regex = "^{stub}{sep}{suffix}".format(
stub=re.escape(stub), sep=re.escape(sep), suffix=suffix)
return df.filter(regex=regex).columns.tolist()

def melt_stub(df, stub, i, j, value_vars, sep):
Expand Down Expand Up @@ -1180,13 +1181,14 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,

# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did not match the length of "
"the columns being encoded ({2}).")
len_msg = ("Length of '{name}' ({len_item}) did not match the "
"length of the columns being encoded ({len_enc}).")

if is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
len_msg = len_msg.format(name=name, len_item=len(item),
len_enc=len(columns_to_encode))
raise ValueError(len_msg)

check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
Expand Down Expand Up @@ -1253,7 +1255,10 @@ def get_empty_Frame(data, sparse):
number_of_cols = len(levels)

if prefix is not None:
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) for v in levels]
dummy_strs = [u'{prefix}{sep}{level}' if isinstance(v, text_type)
else '{prefix}{sep}{level}' for v in levels]
dummy_cols = [dummy_str.format(prefix=prefix, sep=prefix_sep, level=v)
for dummy_str, v in zip(dummy_strs, levels)]
else:
dummy_cols = levels

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/reshape/tile.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,9 +229,9 @@ def _bins_to_cuts(x, bins, right=True, labels=None,
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {}.\nYou "
raise ValueError("Bin edges must be unique: {bins!r}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(repr(bins)))
"the 'duplicates' kwarg".format(bins=bins))
else:
bins = unique_bins

Expand Down

0 comments on commit 536b761

Please sign in to comment.