Skip to content

Commit e0867c8

Browse files
robertnishiharapcmoritz
authored andcommitted
Switch Python indentation from 2 spaces to 4 spaces. (ray-project#726)
* 4 space indentation for actor.py. * 4 space indentation for worker.py. * 4 space indentation for more files. * 4 space indentation for some test files. * Check indentation in Travis. * 4 space indentation for some rl files. * Fix failure test. * Fix multi_node_test. * 4 space indentation for more files. * 4 space indentation for remaining files. * Fixes.
1 parent 310ba82 commit e0867c8

File tree

100 files changed

+18055
-17558
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

100 files changed

+18055
-17558
lines changed

.gitignore

+3
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717
# Python byte code files
1818
*.pyc
1919

20+
# Backup files
21+
*.bak
22+
2023
# Emacs temporary files
2124
*~
2225
*#

.travis.yml

+1-2
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,7 @@ matrix:
3737
- sphinx-build -W -b html -d _build/doctrees source _build/html
3838
- cd ..
3939
# Run Python linting.
40-
- flake8 --ignore=E111,E114
41-
--exclude=python/ray/core/src/common/flatbuffers_ep-prefix/,python/ray/core/generated/,src/numbuf/thirdparty/,src/common/format/,doc/source/conf.py
40+
- flake8 --exclude=python/ray/core/src/common/flatbuffers_ep-prefix/,python/ray/core/generated/,src/numbuf/thirdparty/,src/common/format/,doc/source/conf.py
4241
- os: linux
4342
dist: trusty
4443
env: VALGRIND=1 PYTHON=2.7

examples/hyperopt/hyperopt_adaptive.py

+125-121
Original file line numberDiff line numberDiff line change
@@ -27,124 +27,128 @@
2727

2828

2929
if __name__ == "__main__":
30-
args = parser.parse_args()
31-
32-
ray.init(redis_address=args.redis_address)
33-
34-
# The number of training passes over the dataset to use for network.
35-
steps = args.steps_per_segment
36-
37-
# Load the mnist data and turn the data into remote objects.
38-
print("Downloading the MNIST dataset. This may take a minute.")
39-
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
40-
train_images = ray.put(mnist.train.images)
41-
train_labels = ray.put(mnist.train.labels)
42-
validation_images = ray.put(mnist.validation.images)
43-
validation_labels = ray.put(mnist.validation.labels)
44-
45-
# Keep track of the accuracies that we've seen at different numbers of
46-
# iterations.
47-
accuracies_by_num_steps = defaultdict(lambda: [])
48-
49-
# Define a method to determine if an experiment looks promising or not.
50-
def is_promising(experiment_info):
51-
accuracies = experiment_info["accuracies"]
52-
total_num_steps = experiment_info["total_num_steps"]
53-
comparable_accuracies = accuracies_by_num_steps[total_num_steps]
54-
if len(comparable_accuracies) == 0:
55-
if len(accuracies) == 1:
56-
# This means that we haven't seen anything finish yet, so keep running
57-
# this experiment.
58-
return True
59-
else:
60-
# The experiment is promising if the second half of the accuracies are
61-
# better than the first half of the accuracies.
62-
return (np.mean(accuracies[:len(accuracies) // 2]) <
63-
np.mean(accuracies[len(accuracies) // 2:]))
64-
# Otherwise, continue running the experiment if it is in the top half of
65-
# experiments we've seen so far at this point in time.
66-
return np.mean(accuracy > np.array(comparable_accuracies)) > 0.5
67-
68-
# Keep track of all of the experiment segments that we're running. This
69-
# dictionary uses the object ID of the experiment as the key.
70-
experiment_info = {}
71-
# Keep track of the curently running experiment IDs.
72-
remaining_ids = []
73-
74-
# Keep track of the best hyperparameters and the best accuracy.
75-
best_hyperparameters = None
76-
best_accuracy = 0
77-
78-
# A function for generating random hyperparameters.
79-
def generate_hyperparameters():
80-
return {"learning_rate": 10 ** np.random.uniform(-5, 5),
81-
"batch_size": np.random.randint(1, 100),
82-
"dropout": np.random.uniform(0, 1),
83-
"stddev": 10 ** np.random.uniform(-5, 5)}
84-
85-
# Launch some initial experiments.
86-
for _ in range(args.num_starting_segments):
87-
hyperparameters = generate_hyperparameters()
88-
experiment_id = objective.train_cnn_and_compute_accuracy.remote(
89-
hyperparameters, steps, train_images, train_labels, validation_images,
90-
validation_labels)
91-
experiment_info[experiment_id] = {"hyperparameters": hyperparameters,
92-
"total_num_steps": steps,
93-
"accuracies": []}
94-
remaining_ids.append(experiment_id)
95-
96-
for _ in range(args.num_segments):
97-
# Wait for a segment of an experiment to finish.
98-
ready_ids, remaining_ids = ray.wait(remaining_ids, num_returns=1)
99-
experiment_id = ready_ids[0]
100-
# Get the accuracy and the weights.
101-
accuracy, weights = ray.get(experiment_id)
102-
# Update the experiment info.
103-
previous_info = experiment_info[experiment_id]
104-
previous_info["accuracies"].append(accuracy)
105-
106-
# Update the best accuracy and best hyperparameters.
107-
if accuracy > best_accuracy:
108-
best_hyperparameters = hyperparameters
109-
best_accuracy = accuracy
110-
111-
if is_promising(previous_info):
112-
# If the experiment still looks promising, then continue running it.
113-
print("Continuing to run the experiment with hyperparameters {}.".format(
114-
previous_info["hyperparameters"]))
115-
new_hyperparameters = previous_info["hyperparameters"]
116-
new_info = {"hyperparameters": new_hyperparameters,
117-
"total_num_steps": previous_info["total_num_steps"] + steps,
118-
"accuracies": previous_info["accuracies"][:]}
119-
starting_weights = weights
120-
else:
121-
# If the experiment does not look promising, start a new experiment.
122-
print("Ending the experiment with hyperparameters {}.".format(
123-
previous_info["hyperparameters"]))
124-
new_hyperparameters = generate_hyperparameters()
125-
new_info = {"hyperparameters": new_hyperparameters,
126-
"total_num_steps": steps,
127-
"accuracies": []}
128-
starting_weights = None
129-
130-
# Start running the next segment.
131-
new_experiment_id = objective.train_cnn_and_compute_accuracy.remote(
132-
new_hyperparameters, steps, train_images, train_labels,
133-
validation_images, validation_labels, weights=starting_weights)
134-
experiment_info[new_experiment_id] = new_info
135-
remaining_ids.append(new_experiment_id)
136-
137-
# Update the set of all accuracies that we've seen.
138-
accuracies_by_num_steps[previous_info["total_num_steps"]].append(accuracy)
139-
140-
# Record the best performing set of hyperparameters.
141-
print("""Best accuracy was {:.3} with
142-
learning_rate: {:.2}
143-
batch_size: {}
144-
dropout: {:.2}
145-
stddev: {:.2}
146-
""".format(100 * best_accuracy,
147-
best_hyperparameters["learning_rate"],
148-
best_hyperparameters["batch_size"],
149-
best_hyperparameters["dropout"],
150-
best_hyperparameters["stddev"]))
30+
args = parser.parse_args()
31+
32+
ray.init(redis_address=args.redis_address)
33+
34+
# The number of training passes over the dataset to use for network.
35+
steps = args.steps_per_segment
36+
37+
# Load the mnist data and turn the data into remote objects.
38+
print("Downloading the MNIST dataset. This may take a minute.")
39+
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
40+
train_images = ray.put(mnist.train.images)
41+
train_labels = ray.put(mnist.train.labels)
42+
validation_images = ray.put(mnist.validation.images)
43+
validation_labels = ray.put(mnist.validation.labels)
44+
45+
# Keep track of the accuracies that we've seen at different numbers of
46+
# iterations.
47+
accuracies_by_num_steps = defaultdict(lambda: [])
48+
49+
# Define a method to determine if an experiment looks promising or not.
50+
def is_promising(experiment_info):
51+
accuracies = experiment_info["accuracies"]
52+
total_num_steps = experiment_info["total_num_steps"]
53+
comparable_accuracies = accuracies_by_num_steps[total_num_steps]
54+
if len(comparable_accuracies) == 0:
55+
if len(accuracies) == 1:
56+
# This means that we haven't seen anything finish yet, so keep
57+
# running this experiment.
58+
return True
59+
else:
60+
# The experiment is promising if the second half of the
61+
# accuracies are better than the first half of the accuracies.
62+
return (np.mean(accuracies[:len(accuracies) // 2]) <
63+
np.mean(accuracies[len(accuracies) // 2:]))
64+
# Otherwise, continue running the experiment if it is in the top half
65+
# of experiments we've seen so far at this point in time.
66+
return np.mean(accuracy > np.array(comparable_accuracies)) > 0.5
67+
68+
# Keep track of all of the experiment segments that we're running. This
69+
# dictionary uses the object ID of the experiment as the key.
70+
experiment_info = {}
71+
# Keep track of the curently running experiment IDs.
72+
remaining_ids = []
73+
74+
# Keep track of the best hyperparameters and the best accuracy.
75+
best_hyperparameters = None
76+
best_accuracy = 0
77+
78+
# A function for generating random hyperparameters.
79+
def generate_hyperparameters():
80+
return {"learning_rate": 10 ** np.random.uniform(-5, 5),
81+
"batch_size": np.random.randint(1, 100),
82+
"dropout": np.random.uniform(0, 1),
83+
"stddev": 10 ** np.random.uniform(-5, 5)}
84+
85+
# Launch some initial experiments.
86+
for _ in range(args.num_starting_segments):
87+
hyperparameters = generate_hyperparameters()
88+
experiment_id = objective.train_cnn_and_compute_accuracy.remote(
89+
hyperparameters, steps, train_images, train_labels,
90+
validation_images, validation_labels)
91+
experiment_info[experiment_id] = {"hyperparameters": hyperparameters,
92+
"total_num_steps": steps,
93+
"accuracies": []}
94+
remaining_ids.append(experiment_id)
95+
96+
for _ in range(args.num_segments):
97+
# Wait for a segment of an experiment to finish.
98+
ready_ids, remaining_ids = ray.wait(remaining_ids, num_returns=1)
99+
experiment_id = ready_ids[0]
100+
# Get the accuracy and the weights.
101+
accuracy, weights = ray.get(experiment_id)
102+
# Update the experiment info.
103+
previous_info = experiment_info[experiment_id]
104+
previous_info["accuracies"].append(accuracy)
105+
106+
# Update the best accuracy and best hyperparameters.
107+
if accuracy > best_accuracy:
108+
best_hyperparameters = hyperparameters
109+
best_accuracy = accuracy
110+
111+
if is_promising(previous_info):
112+
# If the experiment still looks promising, then continue running
113+
# it.
114+
print("Continuing to run the experiment with hyperparameters {}."
115+
.format(previous_info["hyperparameters"]))
116+
new_hyperparameters = previous_info["hyperparameters"]
117+
new_info = {"hyperparameters": new_hyperparameters,
118+
"total_num_steps": (previous_info["total_num_steps"] +
119+
steps),
120+
"accuracies": previous_info["accuracies"][:]}
121+
starting_weights = weights
122+
else:
123+
# If the experiment does not look promising, start a new
124+
# experiment.
125+
print("Ending the experiment with hyperparameters {}."
126+
.format(previous_info["hyperparameters"]))
127+
new_hyperparameters = generate_hyperparameters()
128+
new_info = {"hyperparameters": new_hyperparameters,
129+
"total_num_steps": steps,
130+
"accuracies": []}
131+
starting_weights = None
132+
133+
# Start running the next segment.
134+
new_experiment_id = objective.train_cnn_and_compute_accuracy.remote(
135+
new_hyperparameters, steps, train_images, train_labels,
136+
validation_images, validation_labels, weights=starting_weights)
137+
experiment_info[new_experiment_id] = new_info
138+
remaining_ids.append(new_experiment_id)
139+
140+
# Update the set of all accuracies that we've seen.
141+
accuracies_by_num_steps[previous_info["total_num_steps"]].append(
142+
accuracy)
143+
144+
# Record the best performing set of hyperparameters.
145+
print("""Best accuracy was {:.3} with
146+
learning_rate: {:.2}
147+
batch_size: {}
148+
dropout: {:.2}
149+
stddev: {:.2}
150+
""".format(100 * best_accuracy,
151+
best_hyperparameters["learning_rate"],
152+
best_hyperparameters["batch_size"],
153+
best_hyperparameters["dropout"],
154+
best_hyperparameters["stddev"]))

0 commit comments

Comments
 (0)