Skip to content

Commit f795173

Browse files
alokpcmoritz
authored andcommitted
Use flake8-comprehensions (#1976)
* Add flake8 to Travis * Add flake8-comprehensions [flake8 plugin](https://github.com/adamchainz/flake8-comprehensions) that checks for useless constructions. * Use generators instead of lists where appropriate A lot of the builtins can take in generators instead of lists. This commit applies `flake8-comprehensions` to find them. * Fix lint error * Fix some string formatting The rest can be fixed in another PR * Fix compound literals syntax This should probably be merged after #1963. * dict() -> {} * Use dict literal syntax dict(...) -> {...} * Rewrite nested dicts * Fix hanging indent * Add missing import * Add missing quote * fmt * Add missing whitespace * rm duplicate pip install This is already installed in another file. * Fix indent * move `merge_dicts` into utils * Bring up to date with `master` * Add automatic syntax upgrade * rm pyupgrade In case users want to still use it on their own, the upgrade-syn.sh script was left in the `.travis` dir.
1 parent 99ae74e commit f795173

37 files changed

+330
-273
lines changed

.travis/install-dependencies.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ elif [[ "$LINT" == "1" ]]; then
7777
bash miniconda.sh -b -p $HOME/miniconda
7878
export PATH="$HOME/miniconda/bin:$PATH"
7979
# Install Python linting tools.
80-
pip install -q flake8
80+
pip install -q flake8 flake8-comprehensions
8181
elif [[ "$LINUX_WHEELS" == "1" ]]; then
8282
sudo apt-get install docker
8383
sudo usermod -a -G docker travis

.travis/upgrade-syn.sh

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
#!/usr/bin/env bash
2+
3+
# Cause the script to exit if a single command fails
4+
set -eo pipefail
5+
6+
# this stops git rev-parse from failing if we run this from the .git directory
7+
builtin cd "$(dirname "${BASH_SOURCE:-$0}")"
8+
9+
ROOT="$(git rev-parse --show-toplevel)"
10+
builtin cd "$ROOT"
11+
12+
find \
13+
python test \
14+
-name '*.py' -type f \
15+
-not -path 'python/ray/cloudpickle/*' \
16+
-not -path 'python/ray/dataframe/*' \
17+
-exec python -m pyupgrade {} +
18+
19+
if ! git diff --quiet; then
20+
echo 'Reformatted staged files. Please review and stage the changes.'
21+
echo 'Files updated:'
22+
echo
23+
24+
git --no-pager diff --name-only
25+
26+
exit 1
27+
fi

examples/cython/cython_main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def run_func(func, *args, **kwargs):
2626
return result
2727

2828

29-
@click.group(context_settings=dict(help_option_names=["-h", "--help"]))
29+
@click.group(context_settings={'help_option_names': ['-h', '--help']})
3030
def cli():
3131
"""Working with Cython actors and functions in Ray"""
3232

examples/resnet/resnet_main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def train():
220220
while True:
221221
all_weights = ray.get([actor.compute_steps.remote(weight_id)
222222
for actor in train_actors])
223-
mean_weights = {k: (sum([weights[k] for weights in all_weights]) /
223+
mean_weights = {k: (sum(weights[k] for weights in all_weights) /
224224
num_gpus)
225225
for k in all_weights[0]}
226226
weight_id = ray.put(mean_weights)

examples/streaming/streaming.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def next(self):
9292
article_index = 0
9393
while True:
9494
print("article index = {}".format(article_index))
95-
wordcounts = dict()
95+
wordcounts = {}
9696
counts = ray.get([reducer.next_reduce_result.remote(article_index)
9797
for reducer in reducers])
9898
for count in counts:

python/ray/actor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -491,8 +491,8 @@ def pred(x):
491491
# Extract the signatures of each of the methods. This will be used
492492
# to catch some errors if the methods are called with inappropriate
493493
# arguments.
494-
self._method_signatures = dict()
495-
self._actor_method_num_return_vals = dict()
494+
self._method_signatures = {}
495+
self._actor_method_num_return_vals = {}
496496
for method_name, method in self._actor_methods:
497497
# Print a warning message if the method signature is not
498498
# supported. We don't raise an exception because if the actor

python/ray/autoscaler/aws/config.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -145,10 +145,8 @@ def _configure_key_pair(config):
145145
def _configure_subnet(config):
146146
ec2 = _resource("ec2", config)
147147
subnets = sorted(
148-
[
149-
s for s in ec2.subnets.all()
150-
if s.state == "available" and s.map_public_ip_on_launch
151-
],
148+
(s for s in ec2.subnets.all()
149+
if s.state == "available" and s.map_public_ip_on_launch),
152150
reverse=True, # sort from Z-A
153151
key=lambda subnet: subnet.availability_zone)
154152
if not subnets:
@@ -293,11 +291,11 @@ def _get_key(key_name, config):
293291

294292

295293
def _client(name, config):
296-
boto_config = Config(retries=dict(max_attempts=BOTO_MAX_RETRIES))
294+
boto_config = Config(retries={'max_attempts': BOTO_MAX_RETRIES})
297295
return boto3.client(name, config["provider"]["region"], config=boto_config)
298296

299297

300298
def _resource(name, config):
301-
boto_config = Config(retries=dict(max_attempts=BOTO_MAX_RETRIES))
299+
boto_config = Config(retries={'max_attempts': BOTO_MAX_RETRIES})
302300
return boto3.resource(
303301
name, config["provider"]["region"], config=boto_config)

python/ray/autoscaler/aws/node_provider.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
class AWSNodeProvider(NodeProvider):
1414
def __init__(self, provider_config, cluster_name):
1515
NodeProvider.__init__(self, provider_config, cluster_name)
16-
config = Config(retries=dict(max_attempts=BOTO_MAX_RETRIES))
16+
config = Config(retries={'max_attempts': BOTO_MAX_RETRIES})
1717
self.ec2 = boto3.resource(
1818
"ec2", region_name=provider_config["region"], config=config)
1919

python/ray/dataframe/concat.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,17 +38,17 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
3838
"and ray.dataframe.DataFrame objs are "
3939
"valid", type(type_check))
4040

41-
all_series = all([isinstance(obj, pandas.Series)
42-
for obj in objs])
41+
all_series = all(isinstance(obj, pandas.Series)
42+
for obj in objs)
4343
if all_series:
4444
return pandas.concat(objs, axis, join, join_axes,
4545
ignore_index, keys, levels, names,
4646
verify_integrity, copy)
4747

4848
if isinstance(objs, dict):
4949
raise NotImplementedError(
50-
"Obj as dicts not implemented. To contribute to "
51-
"Pandas on Ray, please visit github.com/ray-project/ray.")
50+
"Obj as dicts not implemented. To contribute to "
51+
"Pandas on Ray, please visit github.com/ray-project/ray.")
5252

5353
axis = pandas.DataFrame()._get_axis_number(axis)
5454

python/ray/dataframe/dataframe.py

Lines changed: 28 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -668,7 +668,7 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
668668
mismatch = len(by) != len(self) if axis == 0 \
669669
else len(by) != len(self.columns)
670670

671-
if all([obj in self for obj in by]) and mismatch:
671+
if all(obj in self for obj in by) and mismatch:
672672
raise NotImplementedError(
673673
"Groupby with lists of columns not yet supported.")
674674
elif mismatch:
@@ -2194,7 +2194,7 @@ def idxmax(self, axis=0, skipna=True):
21942194
A Series with the index for each maximum value for the axis
21952195
specified.
21962196
"""
2197-
if not all([d != np.dtype('O') for d in self.dtypes]):
2197+
if not all(d != np.dtype('O') for d in self.dtypes):
21982198
raise TypeError(
21992199
"reduction operation 'argmax' not allowed for this dtype")
22002200

@@ -2216,7 +2216,7 @@ def idxmin(self, axis=0, skipna=True):
22162216
A Series with the index for each minimum value for the axis
22172217
specified.
22182218
"""
2219-
if not all([d != np.dtype('O') for d in self.dtypes]):
2219+
if not all(d != np.dtype('O') for d in self.dtypes):
22202220
raise TypeError(
22212221
"reduction operation 'argmax' not allowed for this dtype")
22222222

@@ -3196,9 +3196,9 @@ def quantile_helper(df, base_object):
31963196
"""
31973197
# This if call prevents ValueErrors with object only partitions
31983198
if (numeric_only and
3199-
all([dtype == np.dtype('O') or
3200-
is_timedelta64_dtype(dtype)
3201-
for dtype in df.dtypes])):
3199+
all(dtype == np.dtype('O') or
3200+
is_timedelta64_dtype(dtype)
3201+
for dtype in df.dtypes)):
32023202
return base_object
32033203
else:
32043204
return df.quantile(q=q, axis=axis, numeric_only=numeric_only,
@@ -4224,16 +4224,28 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep="", float_format=None,
42244224
tupleize_cols=None, date_format=None, doublequote=True,
42254225
escapechar=None, decimal="."):
42264226

4227-
kwargs = dict(
4228-
path_or_buf=path_or_buf, sep=sep, na_rep=na_rep,
4229-
float_format=float_format, columns=columns, header=header,
4230-
index=index, index_label=index_label, mode=mode,
4231-
encoding=encoding, compression=compression, quoting=quoting,
4232-
quotechar=quotechar, line_terminator=line_terminator,
4233-
chunksize=chunksize, tupleize_cols=tupleize_cols,
4234-
date_format=date_format, doublequote=doublequote,
4235-
escapechar=escapechar, decimal=decimal
4236-
)
4227+
kwargs = {
4228+
'path_or_buf': path_or_buf,
4229+
'sep': sep,
4230+
'na_rep': na_rep,
4231+
'float_format': float_format,
4232+
'columns': columns,
4233+
'header': header,
4234+
'index': index,
4235+
'index_label': index_label,
4236+
'mode': mode,
4237+
'encoding': encoding,
4238+
'compression': compression,
4239+
'quoting': quoting,
4240+
'quotechar': quotechar,
4241+
'line_terminator': line_terminator,
4242+
'chunksize': chunksize,
4243+
'tupleize_cols': tupleize_cols,
4244+
'date_format': date_format,
4245+
'doublequote': doublequote,
4246+
'escapechar': escapechar,
4247+
'decimal': decimal
4248+
}
42374249

42384250
if compression is not None:
42394251
warnings.warn("Defaulting to Pandas implementation",

0 commit comments

Comments
 (0)