Skip to content

Commit f795173

Browse files
alokpcmoritz
authored andcommitted
Use flake8-comprehensions (ray-project#1976)
* Add flake8 to Travis * Add flake8-comprehensions [flake8 plugin](https://github.com/adamchainz/flake8-comprehensions) that checks for useless constructions. * Use generators instead of lists where appropriate A lot of the builtins can take in generators instead of lists. This commit applies `flake8-comprehensions` to find them. * Fix lint error * Fix some string formatting The rest can be fixed in another PR * Fix compound literals syntax This should probably be merged after ray-project#1963. * dict() -> {} * Use dict literal syntax dict(...) -> {...} * Rewrite nested dicts * Fix hanging indent * Add missing import * Add missing quote * fmt * Add missing whitespace * rm duplicate pip install This is already installed in another file. * Fix indent * move `merge_dicts` into utils * Bring up to date with `master` * Add automatic syntax upgrade * rm pyupgrade In case users want to still use it on their own, the upgrade-syn.sh script was left in the `.travis` dir.
1 parent 99ae74e commit f795173

37 files changed

+330
-273
lines changed

.travis/install-dependencies.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ elif [[ "$LINT" == "1" ]]; then
7777
bash miniconda.sh -b -p $HOME/miniconda
7878
export PATH="$HOME/miniconda/bin:$PATH"
7979
# Install Python linting tools.
80-
pip install -q flake8
80+
pip install -q flake8 flake8-comprehensions
8181
elif [[ "$LINUX_WHEELS" == "1" ]]; then
8282
sudo apt-get install docker
8383
sudo usermod -a -G docker travis

.travis/upgrade-syn.sh

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
#!/usr/bin/env bash
2+
3+
# Cause the script to exit if a single command fails
4+
set -eo pipefail
5+
6+
# this stops git rev-parse from failing if we run this from the .git directory
7+
builtin cd "$(dirname "${BASH_SOURCE:-$0}")"
8+
9+
ROOT="$(git rev-parse --show-toplevel)"
10+
builtin cd "$ROOT"
11+
12+
find \
13+
python test \
14+
-name '*.py' -type f \
15+
-not -path 'python/ray/cloudpickle/*' \
16+
-not -path 'python/ray/dataframe/*' \
17+
-exec python -m pyupgrade {} +
18+
19+
if ! git diff --quiet; then
20+
echo 'Reformatted staged files. Please review and stage the changes.'
21+
echo 'Files updated:'
22+
echo
23+
24+
git --no-pager diff --name-only
25+
26+
exit 1
27+
fi

examples/cython/cython_main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def run_func(func, *args, **kwargs):
2626
return result
2727

2828

29-
@click.group(context_settings=dict(help_option_names=["-h", "--help"]))
29+
@click.group(context_settings={'help_option_names': ['-h', '--help']})
3030
def cli():
3131
"""Working with Cython actors and functions in Ray"""
3232

examples/resnet/resnet_main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def train():
220220
while True:
221221
all_weights = ray.get([actor.compute_steps.remote(weight_id)
222222
for actor in train_actors])
223-
mean_weights = {k: (sum([weights[k] for weights in all_weights]) /
223+
mean_weights = {k: (sum(weights[k] for weights in all_weights) /
224224
num_gpus)
225225
for k in all_weights[0]}
226226
weight_id = ray.put(mean_weights)

examples/streaming/streaming.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def next(self):
9292
article_index = 0
9393
while True:
9494
print("article index = {}".format(article_index))
95-
wordcounts = dict()
95+
wordcounts = {}
9696
counts = ray.get([reducer.next_reduce_result.remote(article_index)
9797
for reducer in reducers])
9898
for count in counts:

python/ray/actor.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -491,8 +491,8 @@ def pred(x):
491491
# Extract the signatures of each of the methods. This will be used
492492
# to catch some errors if the methods are called with inappropriate
493493
# arguments.
494-
self._method_signatures = dict()
495-
self._actor_method_num_return_vals = dict()
494+
self._method_signatures = {}
495+
self._actor_method_num_return_vals = {}
496496
for method_name, method in self._actor_methods:
497497
# Print a warning message if the method signature is not
498498
# supported. We don't raise an exception because if the actor

python/ray/autoscaler/aws/config.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -145,10 +145,8 @@ def _configure_key_pair(config):
145145
def _configure_subnet(config):
146146
ec2 = _resource("ec2", config)
147147
subnets = sorted(
148-
[
149-
s for s in ec2.subnets.all()
150-
if s.state == "available" and s.map_public_ip_on_launch
151-
],
148+
(s for s in ec2.subnets.all()
149+
if s.state == "available" and s.map_public_ip_on_launch),
152150
reverse=True, # sort from Z-A
153151
key=lambda subnet: subnet.availability_zone)
154152
if not subnets:
@@ -293,11 +291,11 @@ def _get_key(key_name, config):
293291

294292

295293
def _client(name, config):
296-
boto_config = Config(retries=dict(max_attempts=BOTO_MAX_RETRIES))
294+
boto_config = Config(retries={'max_attempts': BOTO_MAX_RETRIES})
297295
return boto3.client(name, config["provider"]["region"], config=boto_config)
298296

299297

300298
def _resource(name, config):
301-
boto_config = Config(retries=dict(max_attempts=BOTO_MAX_RETRIES))
299+
boto_config = Config(retries={'max_attempts': BOTO_MAX_RETRIES})
302300
return boto3.resource(
303301
name, config["provider"]["region"], config=boto_config)

python/ray/autoscaler/aws/node_provider.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
class AWSNodeProvider(NodeProvider):
1414
def __init__(self, provider_config, cluster_name):
1515
NodeProvider.__init__(self, provider_config, cluster_name)
16-
config = Config(retries=dict(max_attempts=BOTO_MAX_RETRIES))
16+
config = Config(retries={'max_attempts': BOTO_MAX_RETRIES})
1717
self.ec2 = boto3.resource(
1818
"ec2", region_name=provider_config["region"], config=config)
1919

python/ray/dataframe/concat.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -38,17 +38,17 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
3838
"and ray.dataframe.DataFrame objs are "
3939
"valid", type(type_check))
4040

41-
all_series = all([isinstance(obj, pandas.Series)
42-
for obj in objs])
41+
all_series = all(isinstance(obj, pandas.Series)
42+
for obj in objs)
4343
if all_series:
4444
return pandas.concat(objs, axis, join, join_axes,
4545
ignore_index, keys, levels, names,
4646
verify_integrity, copy)
4747

4848
if isinstance(objs, dict):
4949
raise NotImplementedError(
50-
"Obj as dicts not implemented. To contribute to "
51-
"Pandas on Ray, please visit github.com/ray-project/ray.")
50+
"Obj as dicts not implemented. To contribute to "
51+
"Pandas on Ray, please visit github.com/ray-project/ray.")
5252

5353
axis = pandas.DataFrame()._get_axis_number(axis)
5454

python/ray/dataframe/dataframe.py

+28-16
Original file line numberDiff line numberDiff line change
@@ -668,7 +668,7 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
668668
mismatch = len(by) != len(self) if axis == 0 \
669669
else len(by) != len(self.columns)
670670

671-
if all([obj in self for obj in by]) and mismatch:
671+
if all(obj in self for obj in by) and mismatch:
672672
raise NotImplementedError(
673673
"Groupby with lists of columns not yet supported.")
674674
elif mismatch:
@@ -2194,7 +2194,7 @@ def idxmax(self, axis=0, skipna=True):
21942194
A Series with the index for each maximum value for the axis
21952195
specified.
21962196
"""
2197-
if not all([d != np.dtype('O') for d in self.dtypes]):
2197+
if not all(d != np.dtype('O') for d in self.dtypes):
21982198
raise TypeError(
21992199
"reduction operation 'argmax' not allowed for this dtype")
22002200

@@ -2216,7 +2216,7 @@ def idxmin(self, axis=0, skipna=True):
22162216
A Series with the index for each minimum value for the axis
22172217
specified.
22182218
"""
2219-
if not all([d != np.dtype('O') for d in self.dtypes]):
2219+
if not all(d != np.dtype('O') for d in self.dtypes):
22202220
raise TypeError(
22212221
"reduction operation 'argmax' not allowed for this dtype")
22222222

@@ -3196,9 +3196,9 @@ def quantile_helper(df, base_object):
31963196
"""
31973197
# This if call prevents ValueErrors with object only partitions
31983198
if (numeric_only and
3199-
all([dtype == np.dtype('O') or
3200-
is_timedelta64_dtype(dtype)
3201-
for dtype in df.dtypes])):
3199+
all(dtype == np.dtype('O') or
3200+
is_timedelta64_dtype(dtype)
3201+
for dtype in df.dtypes)):
32023202
return base_object
32033203
else:
32043204
return df.quantile(q=q, axis=axis, numeric_only=numeric_only,
@@ -4224,16 +4224,28 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep="", float_format=None,
42244224
tupleize_cols=None, date_format=None, doublequote=True,
42254225
escapechar=None, decimal="."):
42264226

4227-
kwargs = dict(
4228-
path_or_buf=path_or_buf, sep=sep, na_rep=na_rep,
4229-
float_format=float_format, columns=columns, header=header,
4230-
index=index, index_label=index_label, mode=mode,
4231-
encoding=encoding, compression=compression, quoting=quoting,
4232-
quotechar=quotechar, line_terminator=line_terminator,
4233-
chunksize=chunksize, tupleize_cols=tupleize_cols,
4234-
date_format=date_format, doublequote=doublequote,
4235-
escapechar=escapechar, decimal=decimal
4236-
)
4227+
kwargs = {
4228+
'path_or_buf': path_or_buf,
4229+
'sep': sep,
4230+
'na_rep': na_rep,
4231+
'float_format': float_format,
4232+
'columns': columns,
4233+
'header': header,
4234+
'index': index,
4235+
'index_label': index_label,
4236+
'mode': mode,
4237+
'encoding': encoding,
4238+
'compression': compression,
4239+
'quoting': quoting,
4240+
'quotechar': quotechar,
4241+
'line_terminator': line_terminator,
4242+
'chunksize': chunksize,
4243+
'tupleize_cols': tupleize_cols,
4244+
'date_format': date_format,
4245+
'doublequote': doublequote,
4246+
'escapechar': escapechar,
4247+
'decimal': decimal
4248+
}
42374249

42384250
if compression is not None:
42394251
warnings.warn("Defaulting to Pandas implementation",

python/ray/dataframe/io.py

+55-54
Original file line numberDiff line numberDiff line change
@@ -208,60 +208,61 @@ def read_csv(filepath_or_buffer,
208208
kwargs: Keyword arguments in pandas::from_csv
209209
"""
210210

211-
kwargs = dict(
212-
sep=sep,
213-
delimiter=delimiter,
214-
header=header,
215-
names=names,
216-
index_col=index_col,
217-
usecols=usecols,
218-
squeeze=squeeze,
219-
prefix=prefix,
220-
mangle_dupe_cols=mangle_dupe_cols,
221-
dtype=dtype,
222-
engine=engine,
223-
converters=converters,
224-
true_values=true_values,
225-
false_values=false_values,
226-
skipinitialspace=skipinitialspace,
227-
skiprows=skiprows,
228-
nrows=nrows,
229-
na_values=na_values,
230-
keep_default_na=keep_default_na,
231-
na_filter=na_filter,
232-
verbose=verbose,
233-
skip_blank_lines=skip_blank_lines,
234-
parse_dates=parse_dates,
235-
infer_datetime_format=infer_datetime_format,
236-
keep_date_col=keep_date_col,
237-
date_parser=date_parser,
238-
dayfirst=dayfirst,
239-
iterator=iterator,
240-
chunksize=chunksize,
241-
compression=compression,
242-
thousands=thousands,
243-
decimal=decimal,
244-
lineterminator=lineterminator,
245-
quotechar=quotechar,
246-
quoting=quoting,
247-
escapechar=escapechar,
248-
comment=comment,
249-
encoding=encoding,
250-
dialect=dialect,
251-
tupleize_cols=tupleize_cols,
252-
error_bad_lines=error_bad_lines,
253-
warn_bad_lines=warn_bad_lines,
254-
skipfooter=skipfooter,
255-
skip_footer=skip_footer,
256-
doublequote=doublequote,
257-
delim_whitespace=delim_whitespace,
258-
as_recarray=as_recarray,
259-
compact_ints=compact_ints,
260-
use_unsigned=use_unsigned,
261-
low_memory=low_memory,
262-
buffer_lines=buffer_lines,
263-
memory_map=memory_map,
264-
float_precision=float_precision)
211+
kwargs = {
212+
'sep': sep,
213+
'delimiter': delimiter,
214+
'header': header,
215+
'names': names,
216+
'index_col': index_col,
217+
'usecols': usecols,
218+
'squeeze': squeeze,
219+
'prefix': prefix,
220+
'mangle_dupe_cols': mangle_dupe_cols,
221+
'dtype': dtype,
222+
'engine': engine,
223+
'converters': converters,
224+
'true_values': true_values,
225+
'false_values': false_values,
226+
'skipinitialspace': skipinitialspace,
227+
'skiprows': skiprows,
228+
'nrows': nrows,
229+
'na_values': na_values,
230+
'keep_default_na': keep_default_na,
231+
'na_filter': na_filter,
232+
'verbose': verbose,
233+
'skip_blank_lines': skip_blank_lines,
234+
'parse_dates': parse_dates,
235+
'infer_datetime_format': infer_datetime_format,
236+
'keep_date_col': keep_date_col,
237+
'date_parser': date_parser,
238+
'dayfirst': dayfirst,
239+
'iterator': iterator,
240+
'chunksize': chunksize,
241+
'compression': compression,
242+
'thousands': thousands,
243+
'decimal': decimal,
244+
'lineterminator': lineterminator,
245+
'quotechar': quotechar,
246+
'quoting': quoting,
247+
'escapechar': escapechar,
248+
'comment': comment,
249+
'encoding': encoding,
250+
'dialect': dialect,
251+
'tupleize_cols': tupleize_cols,
252+
'error_bad_lines': error_bad_lines,
253+
'warn_bad_lines': warn_bad_lines,
254+
'skipfooter': skipfooter,
255+
'skip_footer': skip_footer,
256+
'doublequote': doublequote,
257+
'delim_whitespace': delim_whitespace,
258+
'as_recarray': as_recarray,
259+
'compact_ints': compact_ints,
260+
'use_unsigned': use_unsigned,
261+
'low_memory': low_memory,
262+
'buffer_lines': buffer_lines,
263+
'memory_map': memory_map,
264+
'float_precision': float_precision,
265+
}
265266

266267
# Default to Pandas read_csv for non-serializable objects
267268
if not isinstance(filepath_or_buffer, str) or \

python/ray/dataframe/test/test_dataframe.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1783,7 +1783,7 @@ def test_fillna_dtype_conversion(num_partitions=2):
17831783
)
17841784

17851785
# equiv of replace
1786-
df = pd.DataFrame(dict(A=[1, np.nan], B=[1., 2.]))
1786+
df = pd.DataFrame({'A': [1, np.nan], 'B': [1., 2.]})
17871787
ray_df = from_pandas(df, num_partitions)
17881788
for v in ['', 1, np.nan, 1.0]:
17891789
assert ray_df_equals_pandas(

python/ray/dataframe/utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from . import get_npartitions
1010

1111

12-
_NAN_BLOCKS = dict()
12+
_NAN_BLOCKS = {}
1313

1414

1515
def _get_nan_block_id(n_row=1, n_col=1, transpose=False):
@@ -225,7 +225,7 @@ def _map_partitions(func, partitions, *argslists):
225225
return [_deploy_func.remote(func, part, argslists[0])
226226
for part in partitions]
227227
else:
228-
assert(all([len(args) == len(partitions) for args in argslists]))
228+
assert(all(len(args) == len(partitions) for args in argslists))
229229
return [_deploy_func.remote(func, *args)
230230
for args in zip(partitions, *argslists)]
231231

python/ray/experimental/array/distributed/core.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def subblocks(a, *ranges):
241241
result = DistArray(shape)
242242
for index in np.ndindex(*result.num_blocks):
243243
result.objectids[index] = a.objectids[tuple(
244-
[ranges[i][index[i]] for i in range(a.ndim)])]
244+
ranges[i][index[i]] for i in range(a.ndim))]
245245
return result
246246

247247

0 commit comments

Comments
 (0)