Skip to content

[NOMERG] PPO-Myo #1514

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 18 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion examples/distributed/collectors/single_machine/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from torchrl.collectors.distributed import DistributedDataCollector
from torchrl.envs import EnvCreator, ParallelEnv
from torchrl.envs.libs.gym import GymEnv
from torchrl.envs.libs.robohive import RoboHiveEnv

parser = ArgumentParser()
parser.add_argument(
Expand Down Expand Up @@ -80,6 +81,16 @@
default="ALE/Pong-v5",
help="Gym environment to be run.",
)
LIBS = {
"gym": GymEnv,
"robohive": RoboHiveEnv,
}
parser.add_argument(
"--lib",
default="gym",
help="Lib backend",
choices=list(LIBS.keys()),
)
if __name__ == "__main__":
args = parser.parse_args()
num_workers = args.num_workers
Expand All @@ -89,7 +100,8 @@

device_count = torch.cuda.device_count()

make_env = EnvCreator(lambda: GymEnv(args.env))
lib = LIBS[args.lib]
make_env = EnvCreator(lambda: lib(args.env))
if args.worker_parallelism == "collector" or num_workers == 1:
action_spec = make_env().action_spec
else:
Expand Down
11 changes: 11 additions & 0 deletions examples/distributed/collectors/single_machine/rpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from torchrl.collectors.distributed import RPCDataCollector
from torchrl.envs import EnvCreator, ParallelEnv
from torchrl.envs.libs.gym import GymEnv
from torchrl.envs.libs.robohive import RoboHiveEnv

parser = ArgumentParser()
parser.add_argument(
Expand Down Expand Up @@ -63,6 +64,16 @@
default="ALE/Pong-v5",
help="Gym environment to be run.",
)
LIBS = {
"gym": GymEnv,
"robohive": RoboHiveEnv,
}
parser.add_argument(
"--lib",
default="gym",
help="Lib backend",
choices=list(LIBS.keys()),
)
if __name__ == "__main__":
args = parser.parse_args()
num_workers = args.num_workers
Expand Down
11 changes: 11 additions & 0 deletions examples/distributed/collectors/single_machine/sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from torchrl.collectors.distributed import DistributedSyncDataCollector
from torchrl.envs import EnvCreator, ParallelEnv
from torchrl.envs.libs.gym import GymEnv
from torchrl.envs.libs.robohive import RoboHiveEnv

parser = ArgumentParser()
parser.add_argument(
Expand Down Expand Up @@ -75,6 +76,16 @@
default="ALE/Pong-v5",
help="Gym environment to be run.",
)
LIBS = {
"gym": GymEnv,
"robohive": RoboHiveEnv,
}
parser.add_argument(
"--lib",
default="gym",
help="Lib backend",
choices=list(LIBS.keys()),
)
if __name__ == "__main__":
args = parser.parse_args()
num_workers = args.num_workers
Expand Down
46 changes: 0 additions & 46 deletions examples/ppo/config.yaml

This file was deleted.

35 changes: 35 additions & 0 deletions examples/ppo/config_atari.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Environment
env:
env_name: PongNoFrameskip-v4

# collector
collector:
frames_per_batch: 4096
total_frames: 40_000_000

# logger
logger:
backend: wandb
exp_name: Atari_Schulman17
test_interval: 40_000_000
num_test_episodes: 3

# Optim
optim:
lr: 2.5e-4
eps: 1.0e-6
weight_decay: 0.0
max_grad_norm: 0.5
anneal_lr: True

# loss
loss:
gamma: 0.99
mini_batch_size: 1024
ppo_epochs: 3
gae_lambda: 0.95
clip_epsilon: 0.1
anneal_clip_epsilon: True
critic_coef: 1.0
entropy_coef: 0.01
loss_critic_type: l2
43 changes: 0 additions & 43 deletions examples/ppo/config_example2.yaml

This file was deleted.

32 changes: 32 additions & 0 deletions examples/ppo/config_mujoco.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# task and env
env:
env_name: HalfCheetah-v3

# collector
collector:
frames_per_batch: 2048
total_frames: 1_000_000

# logger
logger:
backend: wandb
exp_name: Mujoco_Schulman17
test_interval: 1_000_000
num_test_episodes: 5

# Optim
optim:
lr: 3e-4
weight_decay: 0.0
anneal_lr: False

# loss
loss:
gamma: 0.99
mini_batch_size: 64
ppo_epochs: 10
gae_lambda: 0.95
clip_epsilon: 0.2
critic_coef: 0.25
entropy_coef: 0.0
loss_critic_type: l2
33 changes: 33 additions & 0 deletions examples/ppo/config_myo.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# task and env
env:
env_name: myoHandReachRandom-v0

# collector
collector:
frames_per_batch: 2048
total_frames: 1_000_000
num_envs: 1

# logger
logger:
backend: wandb
exp_name: myo_hand_reach
test_interval: 1_000_000
num_test_episodes: 5

# Optim
optim:
lr: 3e-4
weight_decay: 0.0
anneal_lr: False

# loss
loss:
gamma: 0.99
mini_batch_size: 64
ppo_epochs: 10
gae_lambda: 0.95
clip_epsilon: 0.2
critic_coef: 0.25
entropy_coef: 0.0
loss_critic_type: l2
Loading