Skip to content

Commit ba02fc0

Browse files
robertnishiharapcmoritz
authored andcommitted
Run flake8 in Travis and make code PEP8 compliant. (ray-project#387)
1 parent 083e7a2 commit ba02fc0

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

54 files changed

+2390
-1312
lines changed

.travis.yml

+4
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ matrix:
3535
- cd doc
3636
- pip install -r requirements-doc.txt
3737
- sphinx-build -W -b html -d _build/doctrees source _build/html
38+
- cd ..
39+
# Run Python linting.
40+
- flake8 --ignore=E111,E114
41+
--exclude=python/ray/core/src/common/flatbuffers_ep-prefix/,python/ray/core/generated/,src/numbuf/thirdparty/,src/common/format/,examples/,doc/source/conf.py
3842
- os: linux
3943
dist: trusty
4044
env: VALGRIND=1 PYTHON=2.7

.travis/install-dependencies.sh

+3
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,9 @@ elif [[ "$LINT" == "1" ]]; then
6464
# Install miniconda.
6565
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
6666
bash miniconda.sh -b -p $HOME/miniconda
67+
export PATH="$HOME/miniconda/bin:$PATH"
68+
# Install Python linting tools.
69+
pip install flake8
6770
else
6871
echo "Unrecognized environment."
6972
exit 1

python/ray/__init__.py

+20-10
Original file line numberDiff line numberDiff line change
@@ -2,19 +2,29 @@
22
from __future__ import division
33
from __future__ import print_function
44

5+
from ray.worker import (register_class, error_info, init, connect, disconnect,
6+
get, put, wait, remote, log_event, log_span,
7+
flush_log)
8+
from ray.actor import actor
9+
from ray.actor import get_gpu_ids
10+
from ray.worker import EnvironmentVariable, env
11+
from ray.worker import SCRIPT_MODE, WORKER_MODE, PYTHON_MODE, SILENT_MODE
12+
513
# Ray version string
614
__version__ = "0.01"
715

16+
__all__ = ["register_class", "error_info", "init", "connect", "disconnect",
17+
"get", "put", "wait", "remote", "log_event", "log_span",
18+
"flush_log", "actor", "get_gpu_ids", "EnvironmentVariable", "env",
19+
"SCRIPT_MODE", "WORKER_MODE", "PYTHON_MODE", "SILENT_MODE",
20+
"__version__"]
21+
822
import ctypes
923
# Windows only
1024
if hasattr(ctypes, "windll"):
11-
# Makes sure that all child processes die when we die
12-
# Also makes sure that fatal crashes result in process termination rather than an error dialog (the latter is annoying since we have a lot of processes)
13-
# This is done by associating all child processes with a "job" object that imposes this behavior.
14-
(lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32)
15-
16-
from ray.worker import register_class, error_info, init, connect, disconnect, get, put, wait, remote, log_event, log_span, flush_log
17-
from ray.actor import actor
18-
from ray.actor import get_gpu_ids
19-
from ray.worker import EnvironmentVariable, env
20-
from ray.worker import SCRIPT_MODE, WORKER_MODE, PYTHON_MODE, SILENT_MODE
25+
# Makes sure that all child processes die when we die. Also makes sure that
26+
# fatal crashes result in process termination rather than an error dialog
27+
# (the latter is annoying since we have a lot of processes). This is done by
28+
# associating all child processes with a "job" object that imposes this
29+
# behavior.
30+
(lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501

python/ray/actor.py

+60-21
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
# the worker is currently allowed to use.
1919
gpu_ids = []
2020

21+
2122
def get_gpu_ids():
2223
"""Get the IDs of the GPU that are available to the worker.
2324
@@ -26,12 +27,15 @@ def get_gpu_ids():
2627
"""
2728
return gpu_ids
2829

30+
2931
def random_string():
3032
return np.random.bytes(20)
3133

34+
3235
def random_actor_id():
3336
return ray.local_scheduler.ObjectID(random_string())
3437

38+
3539
def get_actor_method_function_id(attr):
3640
"""Get the function ID corresponding to an actor method.
3741
@@ -47,10 +51,14 @@ def get_actor_method_function_id(attr):
4751
assert len(function_id) == 20
4852
return ray.local_scheduler.ObjectID(function_id)
4953

54+
5055
def fetch_and_register_actor(key, worker):
5156
"""Import an actor."""
52-
driver_id, actor_id_str, actor_name, module, pickled_class, assigned_gpu_ids, actor_method_names = \
53-
worker.redis_client.hmget(key, ["driver_id", "actor_id", "name", "module", "class", "gpu_ids", "actor_method_names"])
57+
(driver_id, actor_id_str, actor_name,
58+
module, pickled_class, assigned_gpu_ids,
59+
actor_method_names) = worker.redis_client.hmget(
60+
key, ["driver_id", "actor_id", "name", "module", "class", "gpu_ids",
61+
"actor_method_names"])
5462
actor_id = ray.local_scheduler.ObjectID(actor_id_str)
5563
actor_name = actor_name.decode("ascii")
5664
module = module.decode("ascii")
@@ -64,12 +72,14 @@ def fetch_and_register_actor(key, worker):
6472
class TemporaryActor(object):
6573
pass
6674
worker.actors[actor_id_str] = TemporaryActor()
75+
6776
def temporary_actor_method(*xs):
6877
raise Exception("The actor with name {} failed to be imported, and so "
6978
"cannot execute this method".format(actor_name))
7079
for actor_method_name in actor_method_names:
7180
function_id = get_actor_method_function_id(actor_method_name).id()
72-
worker.functions[driver_id][function_id] = (actor_method_name, temporary_actor_method)
81+
worker.functions[driver_id][function_id] = (actor_method_name,
82+
temporary_actor_method)
7383

7484
try:
7585
unpickled_class = pickling.loads(pickled_class)
@@ -84,11 +94,15 @@ def temporary_actor_method(*xs):
8494
# TODO(pcm): Why is the below line necessary?
8595
unpickled_class.__module__ = module
8696
worker.actors[actor_id_str] = unpickled_class.__new__(unpickled_class)
87-
for (k, v) in inspect.getmembers(unpickled_class, predicate=(lambda x: inspect.isfunction(x) or inspect.ismethod(x))):
97+
for (k, v) in inspect.getmembers(
98+
unpickled_class, predicate=(lambda x: (inspect.isfunction(x) or
99+
inspect.ismethod(x)))):
88100
function_id = get_actor_method_function_id(k).id()
89101
worker.functions[driver_id][function_id] = (k, v)
90-
# We do not set worker.function_properties[driver_id][function_id] because
91-
# we currently do need the actor worker to submit new tasks for the actor.
102+
# We do not set worker.function_properties[driver_id][function_id]
103+
# because we currently do need the actor worker to submit new tasks for
104+
# the actor.
105+
92106

93107
def select_local_scheduler(local_schedulers, num_gpus, worker):
94108
"""Select a local scheduler to assign this actor to.
@@ -119,26 +133,33 @@ def select_local_scheduler(local_schedulers, num_gpus, worker):
119133
# Loop through all of the local schedulers.
120134
for local_scheduler in local_schedulers:
121135
# See if there are enough available GPUs on this local scheduler.
122-
local_scheduler_total_gpus = int(float(local_scheduler[b"num_gpus"].decode("ascii")))
123-
gpus_in_use = worker.redis_client.hget(local_scheduler[b"ray_client_id"], b"gpus_in_use")
136+
local_scheduler_total_gpus = int(float(
137+
local_scheduler[b"num_gpus"].decode("ascii")))
138+
gpus_in_use = worker.redis_client.hget(local_scheduler[b"ray_client_id"],
139+
b"gpus_in_use")
124140
gpus_in_use = 0 if gpus_in_use is None else int(gpus_in_use)
125141
if gpus_in_use + num_gpus <= local_scheduler_total_gpus:
126142
# Attempt to reserve some GPUs for this actor.
127-
new_gpus_in_use = worker.redis_client.hincrby(local_scheduler[b"ray_client_id"], b"gpus_in_use", num_gpus)
143+
new_gpus_in_use = worker.redis_client.hincrby(
144+
local_scheduler[b"ray_client_id"], b"gpus_in_use", num_gpus)
128145
if new_gpus_in_use > local_scheduler_total_gpus:
129146
# If we failed to reserve the GPUs, undo the increment.
130-
worker.redis_client.hincrby(local_scheduler[b"ray_client_id"], b"gpus_in_use", num_gpus)
147+
worker.redis_client.hincrby(local_scheduler[b"ray_client_id"],
148+
b"gpus_in_use", num_gpus)
131149
else:
132150
# We succeeded at reserving the GPUs, so we are done.
133151
local_scheduler_id = local_scheduler[b"ray_client_id"]
134152
gpu_ids = list(range(new_gpus_in_use - num_gpus, new_gpus_in_use))
135153
break
136154
if local_scheduler_id is None:
137155
raise Exception("Could not find a node with enough GPUs to create this "
138-
"actor. The local scheduler information is {}.".format(local_schedulers))
156+
"actor. The local scheduler information is {}."
157+
.format(local_schedulers))
139158
return local_scheduler_id, gpu_ids
140159

141-
def export_actor(actor_id, Class, actor_method_names, num_cpus, num_gpus, worker):
160+
161+
def export_actor(actor_id, Class, actor_method_names, num_cpus, num_gpus,
162+
worker):
142163
"""Export an actor to redis.
143164
144165
Args:
@@ -158,13 +179,16 @@ def export_actor(actor_id, Class, actor_method_names, num_cpus, num_gpus, worker
158179
driver_id = worker.task_driver_id.id()
159180
for actor_method_name in actor_method_names:
160181
function_id = get_actor_method_function_id(actor_method_name).id()
161-
worker.function_properties[driver_id][function_id] = (1, num_cpus, num_gpus)
182+
worker.function_properties[driver_id][function_id] = (1, num_cpus,
183+
num_gpus)
162184

163185
# Select a local scheduler for the actor.
164186
local_schedulers = state.get_local_schedulers(worker)
165-
local_scheduler_id, gpu_ids = select_local_scheduler(local_schedulers, num_gpus, worker)
187+
local_scheduler_id, gpu_ids = select_local_scheduler(local_schedulers,
188+
num_gpus, worker)
166189

167-
worker.redis_client.publish("actor_notifications", actor_id.id() + local_scheduler_id)
190+
worker.redis_client.publish("actor_notifications",
191+
actor_id.id() + local_scheduler_id)
168192

169193
d = {"driver_id": driver_id,
170194
"actor_id": actor_id.id(),
@@ -176,6 +200,7 @@ def export_actor(actor_id, Class, actor_method_names, num_cpus, num_gpus, worker
176200
worker.redis_client.hmset(key, d)
177201
worker.redis_client.rpush("Exports", key)
178202

203+
179204
def actor(*args, **kwargs):
180205
def make_actor_decorator(num_cpus=1, num_gpus=0):
181206
def make_actor(Class):
@@ -189,7 +214,8 @@ def actor_method_call(actor_id, attr, *args, **kwargs):
189214
raise Exception("Actors currently do not support **kwargs.")
190215
function_id = get_actor_method_function_id(attr)
191216
# TODO(pcm): Extend args with keyword args.
192-
object_ids = ray.worker.global_worker.submit_task(function_id, "", args,
217+
object_ids = ray.worker.global_worker.submit_task(function_id, "",
218+
args,
193219
actor_id=actor_id)
194220
if len(object_ids) == 1:
195221
return object_ids[0]
@@ -199,24 +225,34 @@ def actor_method_call(actor_id, attr, *args, **kwargs):
199225
class NewClass(object):
200226
def __init__(self, *args, **kwargs):
201227
self._ray_actor_id = random_actor_id()
202-
self._ray_actor_methods = {k: v for (k, v) in inspect.getmembers(Class, predicate=(lambda x: inspect.isfunction(x) or inspect.ismethod(x)))}
203-
export_actor(self._ray_actor_id, Class, self._ray_actor_methods.keys(), num_cpus, num_gpus, ray.worker.global_worker)
228+
self._ray_actor_methods = {
229+
k: v for (k, v) in inspect.getmembers(
230+
Class, predicate=(lambda x: (inspect.isfunction(x) or
231+
inspect.ismethod(x))))}
232+
export_actor(self._ray_actor_id, Class,
233+
self._ray_actor_methods.keys(), num_cpus, num_gpus,
234+
ray.worker.global_worker)
204235
# Call __init__ as a remote function.
205236
if "__init__" in self._ray_actor_methods.keys():
206237
actor_method_call(self._ray_actor_id, "__init__", *args, **kwargs)
207238
else:
208239
print("WARNING: this object has no __init__ method.")
240+
209241
# Make tab completion work.
210242
def __dir__(self):
211243
return self._ray_actor_methods
244+
212245
def __getattribute__(self, attr):
213246
# The following is needed so we can still access self.actor_methods.
214247
if attr in ["_ray_actor_id", "_ray_actor_methods"]:
215248
return super(NewClass, self).__getattribute__(attr)
216249
if attr in self._ray_actor_methods.keys():
217-
return lambda *args, **kwargs: actor_method_call(self._ray_actor_id, attr, *args, **kwargs)
250+
return lambda *args, **kwargs: actor_method_call(
251+
self._ray_actor_id, attr, *args, **kwargs)
218252
# There is no method with this name, so raise an exception.
219-
raise AttributeError("'{}' Actor object has no attribute '{}'".format(Class, attr))
253+
raise AttributeError("'{}' Actor object has no attribute '{}'"
254+
.format(Class, attr))
255+
220256
def __repr__(self):
221257
return "Actor(" + self._ray_actor_id.hex() + ")"
222258

@@ -230,7 +266,9 @@ def __repr__(self):
230266
return make_actor_decorator(num_cpus=1, num_gpus=0)(Class)
231267

232268
# In this case, the actor decorator is something like @ray.actor(num_gpus=1).
233-
if len(args) == 0 and len(kwargs) > 0 and all([key in ["num_cpus", "num_gpus"] for key in kwargs.keys()]):
269+
if len(args) == 0 and len(kwargs) > 0 and all([key
270+
in ["num_cpus", "num_gpus"]
271+
for key in kwargs.keys()]):
234272
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs.keys() else 1
235273
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs.keys() else 0
236274
return make_actor_decorator(num_cpus=num_cpus, num_gpus=num_gpus)
@@ -240,4 +278,5 @@ def __repr__(self):
240278
"some of the arguments 'num_cpus' or 'num_gpus' as in "
241279
"'ray.actor(num_gpus=1)'.")
242280

281+
243282
ray.worker.global_worker.fetch_and_register["Actor"] = fetch_and_register_actor

0 commit comments

Comments
 (0)