Skip to content

Commit

Permalink
[tune][minor] formatting examples, fix travis (ray-project#5869)
Browse files Browse the repository at this point in the history
* formatting

* formatting
  • Loading branch information
richardliaw authored and ericl committed Oct 9, 2019
1 parent a851d7e commit 1181924
Show file tree
Hide file tree
Showing 6 changed files with 70 additions and 80 deletions.
5 changes: 4 additions & 1 deletion doc/examples/plot_hyperparameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def get_data_loaders(batch_size):
shuffle=True)
return train_loader, test_loader


#######################################################################
# Setup: Defining the Neural Network
# ----------------------------------
Expand Down Expand Up @@ -130,6 +131,7 @@ def test(model, test_loader, device=torch.device("cpu")):

return correct / total


#######################################################################
# Evaluating the Hyperparameters
# -------------------------------
Expand All @@ -141,6 +143,7 @@ def test(model, test_loader, device=torch.device("cpu")):
#
# The ``@ray.remote`` decorator defines a remote process.


@ray.remote
def evaluate_hyperparameters(config):
model = ConvNet()
Expand All @@ -152,14 +155,14 @@ def evaluate_hyperparameters(config):
train(model, optimizer, train_loader)
return test(model, test_loader)


#######################################################################
# Synchronous Evaluation of Randomly Generated Hyperparameters
# ------------------------------------------------------------
#
# We will create multiple sets of random hyperparameters for our neural
# network that will be evaluated in parallel.


# Keep track of the best hyperparameters and the best accuracy.
best_hyperparameters = None
best_accuracy = 0
Expand Down
23 changes: 9 additions & 14 deletions python/ray/tune/examples/async_hyperband_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,18 +70,13 @@ def _restore(self, checkpoint_path):
run(MyTrainableClass,
name="asynchyperband_test",
scheduler=ahb,
**{
"stop": {
"training_iteration": 1 if args.smoke_test else 99999
},
"num_samples": 20,
"resources_per_trial": {
"cpu": 1,
"gpu": 0
},
"config": {
"width": sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random())),
},
stop={"training_iteration": 1 if args.smoke_test else 99999},
num_samples=20,
resources_per_trial={
"cpu": 1,
"gpu": 0
},
config={
"width": sample_from(lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random())),
})
32 changes: 15 additions & 17 deletions python/ray/tune/examples/mnist_pytorch_trainable.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,23 +69,21 @@ def _restore(self, checkpoint_path):
analysis = tune.run(
TrainMNIST,
scheduler=sched,
**{
"stop": {
"mean_accuracy": 0.95,
"training_iteration": 3 if args.smoke_test else 20,
},
"resources_per_trial": {
"cpu": 3,
"gpu": int(args.use_gpu)
},
"num_samples": 1 if args.smoke_test else 20,
"checkpoint_at_end": True,
"checkpoint_freq": 3,
"config": {
"args": args,
"lr": tune.uniform(0.001, 0.1),
"momentum": tune.uniform(0.1, 0.9),
}
stop={
"mean_accuracy": 0.95,
"training_iteration": 3 if args.smoke_test else 20,
},
resources_per_trial={
"cpu": 3,
"gpu": int(args.use_gpu)
},
num_samples=1 if args.smoke_test else 20,
checkpoint_at_end=True,
checkpoint_freq=3,
config={
"args": args,
"lr": tune.uniform(0.001, 0.1),
"momentum": tune.uniform(0.1, 0.9),
})

print("Best config is:", analysis.get_best_config(metric="mean_accuracy"))
20 changes: 9 additions & 11 deletions python/ray/tune/examples/pbt_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,15 +112,13 @@ def reset_config(self, new_config):
scheduler=pbt,
reuse_actors=True,
verbose=False,
**{
"stop": {
"training_iteration": 2000,
},
"num_samples": 4,
"config": {
"lr": 0.0001,
# note: this parameter is perturbed but has no effect on
# the model training in this example
"some_other_factor": 1,
},
stop={
"training_iteration": 2000,
},
num_samples=4,
config={
"lr": 0.0001,
# note: this parameter is perturbed but has no effect on
# the model training in this example
"some_other_factor": 1,
})
40 changes: 19 additions & 21 deletions python/ray/tune/examples/pbt_ppo_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,26 +53,24 @@ def explore(config):
"PPO",
name="pbt_humanoid_test",
scheduler=pbt,
**{
"num_samples": 8,
"config": {
"env": "Humanoid-v1",
"kl_coeff": 1.0,
"num_workers": 8,
"num_gpus": 1,
"model": {
"free_log_std": True
},
# These params are tuned from a fixed starting value.
"lambda": 0.95,
"clip_param": 0.2,
"lr": 1e-4,
# These params start off randomly drawn from a set.
"num_sgd_iter": sample_from(
lambda spec: random.choice([10, 20, 30])),
"sgd_minibatch_size": sample_from(
lambda spec: random.choice([128, 512, 2048])),
"train_batch_size": sample_from(
lambda spec: random.choice([10000, 20000, 40000]))
num_samples=8,
config={
"env": "Humanoid-v1",
"kl_coeff": 1.0,
"num_workers": 8,
"num_gpus": 1,
"model": {
"free_log_std": True
},
# These params are tuned from a fixed starting value.
"lambda": 0.95,
"clip_param": 0.2,
"lr": 1e-4,
# These params start off randomly drawn from a set.
"num_sgd_iter": sample_from(
lambda spec: random.choice([10, 20, 30])),
"sgd_minibatch_size": sample_from(
lambda spec: random.choice([128, 512, 2048])),
"train_batch_size": sample_from(
lambda spec: random.choice([10000, 20000, 40000]))
})
30 changes: 14 additions & 16 deletions python/ray/tune/examples/tune_cifar10_gluon.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,20 +206,18 @@ def test():
name=args.expname,
verbose=2,
scheduler=sched,
**{
"stop": {
"mean_accuracy": 0.98,
"training_iteration": 1 if args.smoke_test else args.epochs
},
"resources_per_trial": {
"cpu": int(args.num_workers),
"gpu": int(args.num_gpus)
},
"num_samples": 1 if args.smoke_test else args.num_samples,
"config": {
"lr": tune.sample_from(
lambda spec: np.power(10.0, np.random.uniform(-4, -1))),
"momentum": tune.sample_from(
lambda spec: np.random.uniform(0.85, 0.95)),
}
stop={
"mean_accuracy": 0.98,
"training_iteration": 1 if args.smoke_test else args.epochs
},
resources_per_trial={
"cpu": int(args.num_workers),
"gpu": int(args.num_gpus)
},
num_samples=1 if args.smoke_test else args.num_samples,
config={
"lr": tune.sample_from(
lambda spec: np.power(10.0, np.random.uniform(-4, -1))),
"momentum": tune.sample_from(
lambda spec: np.random.uniform(0.85, 0.95)),
})

0 comments on commit 1181924

Please sign in to comment.