diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..13566b8 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,8 @@ +# Default ignored files +/shelf/ +/workspace.xml +# Editor-based HTTP Client requests +/httpRequests/ +# Datasource local storage ignored files +/dataSources/ +/dataSources.local.xml diff --git a/.idea/Unity ML Agents - Python API - Examples.iml b/.idea/Unity ML Agents - Python API - Examples.iml new file mode 100644 index 0000000..83f5ef8 --- /dev/null +++ b/.idea/Unity ML Agents - Python API - Examples.iml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 0000000..0aa4e7f --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,31 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000..105ce2d --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..670dc26 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..a1468a5 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/other.xml b/.idea/other.xml new file mode 100644 index 0000000..68993fb --- /dev/null +++ b/.idea/other.xml @@ -0,0 +1,6 @@ + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..4cf123e --- /dev/null +++ b/README.md @@ -0,0 +1,107 @@ +[//]: # (Image References) + +[image1]: https://user-images.githubusercontent.com/10624937/42386929-76f671f0-8106-11e8-9376-f17da2ae852e.png "Kernel" +# Reinforcement Learning Project + +This project was created to make it easier to get started with Reinforcement Learning. It now contains: +- An implementation of the [DDPG Algorithm](https://arxiv.org/abs/1509.02971) in Python, which works for both single-agent environments and multi-agent environments. +- Single and parallel environments in [Unity ML agents](https://unity.com/products/machine-learning-agents) using the [Python API](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Python-API.md). +- Two Jupyter notebooks: + - [3DBall.ipynb](notebooks/3DBall.ipynb): This is a simple example to get started with Unity ML Agents & the DDPG Algorithm. + - [3DBall_parallel_environment.ipynb](notebooks/3DBall_parallel_environment.ipynb): The same, but now for an environment run in parallel. + +# Getting Started + +## Install Basic Dependencies + +To set up your python environment to run the code in the notebooks, follow the instructions below. + +- If you're on Windows I recommend installing [Miniforge](https://github.com/conda-forge/miniforge). It's a minimal installer for Conda. I also recommend using the [Mamba](https://github.com/mamba-org/mamba) package manager instead of [Conda](https://docs.conda.io/). It works almost the same as Conda, but only faster. There's a [cheatsheet](https://docs.conda.io/projects/conda/en/latest/user-guide/cheatsheet.html) of Conda commands which also work in Mamba. To install Mamba, use this command: +```bash +conda install mamba -n base -c conda-forge +``` +- Create (and activate) a new environment with Python 3.6 or later. I recommend using Python 3.9: + + - __Linux__ or __Mac__: + ```bash + mamba create --name rl39 python=3.9 numpy + source activate rl39 + ``` + - __Windows__: + ```bash + mamba create --name rl39 python=3.9 numpy + activate rl39 + ``` +- Install PyTorch by following instructions on [Pytorch.org](https://pytorch.org/). For example, to install PyTorch on + Windows with GPU support, use this command: + +```bash +mamba install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch +``` + +- Install additional packages: +```bash +mamba install jupyter notebook matplotlib +``` + +- Create an [IPython kernel](http://ipython.readthedocs.io/en/stable/install/kernel_install.html) for the `rl39` environment in Jupyter. + +```bash +python -m ipykernel install --user --name rl39 --display-name "rl39" +``` + +- Change the kernel to match the `rl39` environment by using the drop-down menu `Kernel` -> `Change kernel` inside Jupyter Notebook. + +## Install Unity Machine Learning Agents + +**Note**: +In order to run the notebooks on **Windows**, it's not necessary to install the Unity Editor, because I have provided the [standalone executables](notebooks/README.md) of the environments for you. + +[Unity ML Agents](https://unity.com/products/machine-learning-agents) is the software that we use for the environments. The agents that we create in Python can interact with these environments. Unity ML Agents consists of several parts: +- [The Unity Editor](https://unity.com/) is used for creating environments. To install: + - Install [Unity Hub](https://unity.com/download). + - Install the latest version of Unity by clicking on the green button `Unity Hub` on the [download page](https://unity3d.com/get-unity/download/archive). + + To start the Unity editor you must first have a project: + + - Start the Unity Hub. + - Click on "Projects" + - Create a new dummy project. + - Click on the project you've just added in the Unity Hub. The Unity Editor should start now. + +- [The Unity ML-Agents Toolkit](https://github.com/Unity-Technologies/ml-agents#unity-ml-agents-toolkit). Download [the latest release](https://github.com/Unity-Technologies/ml-agents/releases) of the source code or use the [Git](https://git-scm.com/downloads/guis) command: `git clone --branch release_18 https://github.com/Unity-Technologies/ml-agents.git`. +- The Unity ML Agents package is used inside the Unity Editor. Please read [the instructions for installation](https://github.com/Unity-Technologies/ml-agents/blob/release_18_docs/docs/Installation.md#install-the-comunityml-agents-unity-package). +- The `mlagents` Python package is used as a bridge between Python and the Unity editor (or standalone executable). To install, use this command: `python -m pip install mlagents==0.27.0`. +Please note that there's no conda package available for this. + +## Install an IDE for Python + +For Windows, I would recommend using [PyCharm](https://www.jetbrains.com/pycharm/) (my choice), or [Visual Studio Code](https://code.visualstudio.com/). +Inside those IDEs you can use the Conda environment you have just created. + +## Creating a custom Unity executable + +### Load the examples project +[The Unity ML-Agents Toolkit](https://github.com/Unity-Technologies/ml-agents#unity-ml-agents-toolkit) contains several [example environments](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Learning-Environment-Examples.md). Here we will load them all inside the Unity editor: +- Start the Unity Hub. +- Click on "Projects" +- Add a project by navigating to the `Project` folder inside the toolkit. +- Click on the project you've just added in the Unity Hub. The Unity Editor should start now. + +### Create a 3D Ball executable +The 3D Ball example contains 12 environments in one, but this doesn't work very well in the Python API. The main problem is that there's no way to reset each environment individually. Therefore, we will remove the other 11 environments in the editor: +- Load the 3D Ball scene, by going to the project window and navigating to `Examples` -> `3DBall` -> `Scenes`-> `3DBall` +- In the Hierarchy window select the other 11 3DBall objects and delete them, so that only the `3DBall` object remains. + +Next, we will build the executable: +- Go to `File` -> `Build Settings` +- In the Build Settings window, click `Build` +- Navigate to `notebooks` folder and add `3DBall` to the folder name that is used for the build. + + +## Instructions for running the notebooks + +1. [Download](notebooks/README.md) the Unity executables for Windows. In case you're not on Windows, you have to build the executables yourself by following the instructions above. +2. Place the Unity executable folders in the same folder as the notebooks. +3. Load a notebook with Jupyter notebook. (The command to start Jupyter notebook is `jupyter notebook`) +4. Follow further instructions in the notebook. diff --git a/Report.md b/Report.md new file mode 100644 index 0000000..1baec2e --- /dev/null +++ b/Report.md @@ -0,0 +1,37 @@ +[//]: # (Image References) + +[image1]: ./plot.png + +# Project 3: Collaboration and Competition +## Learning Algorithm +The learning algorithm used for this project is [Deep Deterministic Policy Gradient (DDPG)](https://arxiv.org/abs/1509.02971). DDPG is known as an Actor-Critic method, and it can be used for continuous action spaces. Just like DQN (from project 1) it uses [Experience Replay](https://paperswithcode.com/method/experience-replay) and a [Target Network](https://towardsdatascience.com/deep-q-network-dqn-ii-b6bf911b6b2c). The Actor learns a deterministic policy function, and the Critic learns a Q value function. They both interact with each other when learning. The Critic uses the deterministic action from the Actor when calculating the Q value. Because the Actor learns a deterministic policy, some noise must be added to the action values, to help with exploration. This algorithm uses a noise decay, so that the noise at the start of the learning process is high and much lower at the end of it. + +Two types of neural networks are used in this project, one for the Actor and one for the Critic. They both have two hidden layers with 256 and 128 linear units. The Actor network has 24 inputs, and 2 outputs. That's because each state has 24 parameters and there are 2 action parameters. The Critic has 26 (24 + 2) inputs and only one output, the Q value. + +In this project there are two agents, so there is an Actor and a Critic neural network for each agent. Both agents learn independently of each other. The Critic only uses the state that the agent sees and not the global state like in the [MADDPG](https://proceedings.neurips.cc/paper/2017/file/68a9750337a418a86fe06c1991a1d64c-Paper.pdf) algorithm. + +The hyperparameters used for this algorithm are: + +- `buffer_size=100000` replay buffer size +- `batch_size=1000` minibatch size +- `gamma=0.99` discount factor +- `tau=1e-3` for soft update of the target network parameters +- `lr_actor=1e-4` learning rate of the actor +- `lr_critic=1e-3` learning rate of the critic +- `weight_decay=0.0` L2 weight decay +- `update_every=20` how often to update the networks +- `noise_decay=3e-6` the noise decay used for the action values + +## Plot of Rewards +![plot][image1] + +The environment was solved in 23746 episodes. + +## Ideas for Future Work +The performance of the agent could be improved in several ways: + +- [MADDPG](https://proceedings.neurips.cc/paper/2017/file/68a9750337a418a86fe06c1991a1d64c-Paper.pdf) +- [Twin Delayed DDPG](https://spinningup.openai.com/en/latest/algorithms/td3.html) +- [Soft Actor Critic (SAC)](https://spinningup.openai.com/en/latest/algorithms/sac.html) +- [Prioritized Experience Replay](https://arxiv.org/abs/1511.05952) + diff --git a/ddpg_agent.py b/ddpg_agent.py new file mode 100644 index 0000000..6838460 --- /dev/null +++ b/ddpg_agent.py @@ -0,0 +1,115 @@ +from model import Actor, Critic +from pytorch_device import pytorch_device +import torch +import torch.nn.functional as f +import torch.optim as optim +from typing import Tuple, List +import copy + + +class DDPGAgent: + """Interacts with and learns from the environment.""" + + def __init__(self, actor: Actor, critic: Critic, gamma=0.99, tau=1e-3, + lr_actor=1e-4, lr_critic=1e-3, weight_decay=1e-2): + """Initialize a DDPG Agent object. + + :param actor: + :param critic: + :param gamma: discount factor + :param tau: for soft update of target parameters + :param lr_actor: learning rate of the actor + :param lr_critic: learning rate of the critic + :param weight_decay: L2 weight decay + """ + self.action_size = actor.action_size + self.gamma = gamma + self.tau = tau + + # Actor Network (w/ Target Network) + self.actor = actor.to(pytorch_device) + self.actor_target = copy.deepcopy(actor).to(pytorch_device) + self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr_actor) + + # Critic Network (w/ Target Network) + self.critic = critic.to(pytorch_device) + self.critic_target = copy.deepcopy(critic).to(pytorch_device) + self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=lr_critic, weight_decay=weight_decay) + + def act(self, state) -> torch.Tensor: + self.actor.eval() + with torch.no_grad(): + action = self.actor(state) + self.actor.train() + return action + + def step(self, samples: Tuple[torch.Tensor, ...]): + """Update policy and value parameters using given batch of experience tuples. + Q_targets = r + γ * critic_target(next_state, actor_target(next_state)) + where: + actor_target(state) -> action + critic_target(state, action) -> Q-value + + :param samples: tuple of (s, a, r, s', done) + """ + states, actions, rewards, next_states, dones = samples + + # ---------------------------- update critic ---------------------------- # + with torch.no_grad(): + # Get predicted next-state actions and Q values from target models + actions_next = self.actor_target(next_states) # + \ + # (torch.rand(*actions.shape, device=pytorch_device) * 0.1 - 0.05) + # torch.clamp_(actions_next, min=-1.0, max=1.0) + q_targets_next = self.critic_target(next_states, actions_next) + # Compute Q targets for current states + q_targets = rewards + (self.gamma * q_targets_next * (1 - dones)) + # Compute critic loss + q_expected = self.critic(states, actions) + critic_loss = f.mse_loss(q_expected, q_targets) + # Minimize the loss + self.critic_optimizer.zero_grad() + critic_loss.backward() + # torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1) + self.critic_optimizer.step() + + # ---------------------------- update actor ---------------------------- # + # Compute actor loss + actions_pred = self.actor(states) # + \ + # (torch.rand(*actions.shape, device=pytorch_device) * 0.1 - 0.05) + # torch.clamp_(actions_pred, min=-1.0, max=1.0) + actor_loss = -self.critic(states, actions_pred).mean() + # Minimize the loss + self.actor_optimizer.zero_grad() + actor_loss.backward() + # torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1) + self.actor_optimizer.step() + + def update_target_networks(self): + soft_update(self.critic, self.critic_target, self.tau) + soft_update(self.actor, self.actor_target, self.tau) + + def get_state_dicts(self): + return {'actor_params': self.actor.state_dict(), + 'actor_optim_params': self.actor_optimizer.state_dict(), + 'critic_params': self.critic.state_dict(), + 'critic_optim_params': self.critic_optimizer.state_dict()} + + def load_state_dicts(self, state_dicts): + self.actor.load_state_dict(state_dicts['actor_params']) + self.actor_optimizer.load_state_dict(state_dicts['actor_optim_params']) + self.critic.load_state_dict(state_dicts['critic_params']) + self.critic_optimizer.load_state_dict(state_dicts['critic_optim_params']) + + +def soft_update(local_model, target_model, tau): + """Soft update model parameters. + θ_target = τ*θ_local + (1 - τ)*θ_target + + Params + ====== + local_model: PyTorch model (weights will be copied from) + target_model: PyTorch model (weights will be copied to) + tau (float): interpolation parameter + """ + for target_param, local_param in zip(target_model.parameters(), local_model.parameters()): + target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data) diff --git a/ddpg_agents.py b/ddpg_agents.py new file mode 100644 index 0000000..c5ed53d --- /dev/null +++ b/ddpg_agents.py @@ -0,0 +1,62 @@ +from ddpg_agent import DDPGAgent +from utilities import convert_to_numpy +import torch +import numpy as np +from typing import List, Tuple + + +class DDPGAgents: + def __init__(self, ddpg_agents: List[DDPGAgent]): + self.ddpg_agents = ddpg_agents + self.num_agents = len(ddpg_agents) + + def act(self, agent_states: torch.Tensor, noise_scale: float) -> np.ndarray: + """ Get actions from all agents + + :param agent_states: states for each agent -> tensor[num_agents, batch_size, state_size] + :param noise_scale: the amount of noise to add to action values + :return: np.ndarray[num_agents, batch_size, action_size] + """ + actions = [] + for i, ddpg_agent in enumerate(self.ddpg_agents): + states = agent_states[i] + noise = np.random.normal(scale=noise_scale, size=ddpg_agent.action_size) + action = convert_to_numpy(ddpg_agent.act(states)) + noise + actions.append(action) + return np.stack(actions) + + def step(self, samples: List[Tuple[torch.Tensor, ...]]): + """ + :param samples: list[num_agents] of tuple(states, actions, rewards, next_states, dones). + Each element in the tuple is a tensor[num_samples, num_agents, *] + """ + for i, ddpg_agent, samples_for_agent in zip(range(len(self)), self.ddpg_agents, samples): + # transpose samples_for_agent to tuple of tensor[num_agents, num_samples, *]: + samples_for_agent = tuple(torch.transpose(t, 0, 1) for t in samples_for_agent) + # convert samples_for_agent to tuple of tensor[num_samples, *]: + samples_for_agent = tuple(t[i] for t in samples_for_agent) + ddpg_agent.step(samples_for_agent) + + def update_target_networks(self): + for ddpg_agent in self.ddpg_agents: + ddpg_agent.update_target_networks() + + def save_checkpoint(self, filename: str): + state_dicts_list = [] + for ddpg_agent in self.ddpg_agents: + state_dicts = ddpg_agent.get_state_dicts() + state_dicts_list.append(state_dicts) + torch.save(state_dicts_list, filename) + + def load_checkpoint(self, filename): + state_dicts_list = torch.load(filename) + for ddpg_agent, state_dicts in zip(self.ddpg_agents, state_dicts_list): + ddpg_agent.load_state_dicts(state_dicts) + + def __len__(self): + """Return number of agents.""" + return self.num_agents +# +# class GaussianNoise: +# def sample(self, output_shape, noise_scale): +# return np.random.normal(scale=noise_scale, size=output_shape) diff --git a/model.py b/model.py new file mode 100644 index 0000000..ed92fd7 --- /dev/null +++ b/model.py @@ -0,0 +1,120 @@ +import torch +import torch.nn as nn +import torch.nn.functional as f +import numpy as np +from typing import List + + +def hidden_init(layer): + """ see https://arxiv.org/abs/1509.02971 Section 7 for details: + (CONTINUOUS CONTROL WITH DEEP REINFORCEMENT LEARNING) + """ + fan_in = layer.weight.data.size()[0] + lim = 1. / np.sqrt(fan_in) + return -lim, lim + + +class Actor(nn.Module): + """Actor (Policy) Model.""" + + def __init__(self, state_size: int, action_size: int, hidden_layer_sizes: List[int], activation_func=f.relu): + """Initialize parameters and build model. + + :param state_size: Dimension of each state + :param action_size: Dimension of each action + :param hidden_layer_sizes: Number of nodes in hidden layers + :param activation_func: Activation function + """ + super(Actor, self).__init__() + self.action_size = action_size + self.input_norm = nn.BatchNorm1d(state_size) + self.activation_func = activation_func + self.input_layer = nn.Linear(state_size, hidden_layer_sizes[0]) + self.hidden_layers = nn.ModuleList() + self.hidden_input_norms = nn.ModuleList() + for i in range(len(hidden_layer_sizes) - 1): + hidden_layer = nn.Linear(hidden_layer_sizes[i], hidden_layer_sizes[i + 1]) + self.hidden_layers.append(hidden_layer) + self.hidden_input_norms.append(nn.BatchNorm1d(hidden_layer_sizes[i])) + self.hidden_input_norms.append(nn.BatchNorm1d(hidden_layer_sizes[-1])) + self.output_layer = nn.Linear(hidden_layer_sizes[-1], action_size) + self.reset_parameters() + + def reset_parameters(self): + self.input_layer.weight.data.uniform_(*hidden_init(self.input_layer)) + for hidden_layer in self.hidden_layers: + hidden_layer.weight.data.uniform_(*hidden_init(hidden_layer)) + self.output_layer.weight.data.uniform_(-3e-3, 3e-3) + + def forward(self, state: torch.Tensor): + """Build an actor (policy) network that maps states -> actions. + Note: Do not call this function directly. Instead, use: actor(state) + """ + x = self.input_norm(state) + x = self.activation_func(self.input_layer(x)) + for i, hidden_layer in enumerate(self.hidden_layers): + x = self.hidden_input_norms[i](x) + x = self.activation_func(hidden_layer(x)) + x = self.hidden_input_norms[-1](x) + # this outputs action values in the range -1 to 1 : + return torch.tanh(self.output_layer(x)) + + def __call__(self, state: torch.Tensor) -> torch.Tensor: + return super().__call__(state) + + +class Critic(nn.Module): + """Critic (Value) Model.""" + + def __init__(self, state_size, action_size, hidden_layer_sizes: List[int], activation_func=f.relu, inject_layer=0): + """Initialize parameters and build model. + + :param state_size: Dimension of each state + :param action_size: Dimension of each action + :param hidden_layer_sizes: Number of nodes in hidden layers + :param activation_func: Activation function + :param inject_layer: The number of the hidden layer to inject action values into + """ + super(Critic, self).__init__() + if inject_layer < 0 or inject_layer >= len(hidden_layer_sizes) - 1: + raise ValueError() + self.inject_layer = inject_layer + self.input_norm = nn.BatchNorm1d(state_size) + self.activation_func = activation_func + self.input_layer = nn.Linear(state_size, hidden_layer_sizes[0]) + self.hidden_layers = nn.ModuleList() + self.hidden_input_norms = nn.ModuleList() + for i in range(len(hidden_layer_sizes) - 1): + in_features = hidden_layer_sizes[i] + # insert the action parameters in hidden layer: + if i == inject_layer: + in_features += action_size + hidden_layer = nn.Linear(in_features, hidden_layer_sizes[i + 1]) + self.hidden_layers.append(hidden_layer) + self.hidden_input_norms.append(nn.BatchNorm1d(hidden_layer_sizes[i])) + # There's only one Q-value as output, because the input is a state-action pair now (compared to DQN): + self.output_layer = nn.Linear(hidden_layer_sizes[-1], 1) + self.reset_parameters() + + def reset_parameters(self): + self.input_layer.weight.data.uniform_(*hidden_init(self.input_layer)) + for hidden_layer in self.hidden_layers: + hidden_layer.weight.data.uniform_(*hidden_init(hidden_layer)) + self.output_layer.weight.data.uniform_(-3e-3, 3e-3) + + def forward(self, state, action): + """Build a critic (value) network that maps (state, action) pairs -> Q-values. + Note: Do not call this function directly. Instead, use: critic(state, action) + """ + x = self.input_norm(state) + x = self.activation_func(self.input_layer(x)) + for i, hidden_layer in enumerate(self.hidden_layers): + x = self.hidden_input_norms[i](x) + # insert the action parameters in hidden layer: + if i == self.inject_layer: + x = torch.cat((x, action), dim=1) + x = self.activation_func(hidden_layer(x)) + return self.output_layer(x) + + def __call__(self, state: torch.Tensor, action: torch.Tensor) -> torch.Tensor: + return super().__call__(state, action) diff --git a/my_unity_environment.py b/my_unity_environment.py new file mode 100644 index 0000000..74903c8 --- /dev/null +++ b/my_unity_environment.py @@ -0,0 +1,99 @@ +import numpy as np +from mlagents_envs.base_env import ActionTuple +from mlagents_envs.environment import UnityEnvironment +from mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel +from typing import Tuple, List, Optional + + +class MyUnityEnvironment: + def __init__(self, file_name=None, no_graphics=False, seed=1, worker_id=0): + """ + :param file_name: The filename of the Unity executable, or None when using the Unity editor + (press Play to connect). + :param no_graphics: Whether to use a graphics window or not. + :param seed: The seed used for a pseudo random number generator. + :param worker_id: The id of the Unity thread to create. You cannot create threads with the same id. + """ + self.engine_configuration_channel = EngineConfigurationChannel() + side_channels = [self.engine_configuration_channel] + self.env = UnityEnvironment(file_name=file_name, no_graphics=no_graphics, seed=seed, worker_id=worker_id, + side_channels=side_channels) + self.env.reset() + self.behavior_names = sorted(self.env.behavior_specs.keys()) + self.behavior_specs = [self.env.behavior_specs[behavior_name] for behavior_name in self.behavior_names] + self.num_agents_list = [] # number of agents for each behavior + for behavior_name in self.behavior_names: + decision_steps, _ = self.env.get_steps(behavior_name) + self.num_agents_list.append(len(decision_steps)) + + def set_timescale(self, time_scale: float): + """ Set the timescale at which the physics simulation runs. + + :param time_scale: a value of 1.0 means the simulation runs in realtime. + """ + self.engine_configuration_channel.set_configuration_parameters(time_scale=time_scale) + + def set_display_size(self, width: int, height: int): + self.engine_configuration_channel.set_configuration_parameters(width=width, height=height) + + def reset(self): + self.env.reset() + + def get_observations(self, behavior_index: int) -> np.ndarray: + """ Get observations for behavior. + Agents can have different behaviors. For example: Two strikers, and a goalie in the soccer example. + + :return: np.ndarray[num_agents, observation_size] + """ + num_agents = self.num_agents_list[behavior_index] + behavior_spec = self.behavior_specs[behavior_index] + behavior_name = self.behavior_names[behavior_index] + observations = np.ndarray((num_agents, *behavior_spec.observation_specs[0].shape)) + decision_steps, terminal_steps = self.env.get_steps(behavior_name) + for agent_id in decision_steps: + observations[agent_id] = decision_steps[agent_id].obs[0] + return observations + + def set_actions(self, behavior_index: int, continuous: Optional[np.ndarray] = None, + discrete: Optional[np.ndarray] = None): + """ Set actions for behavior. + + :param behavior_index: + :param continuous: ndarray[num_agents, *] + :param discrete: + """ + + behavior_name = self.behavior_names[behavior_index] + action_tuple = ActionTuple(continuous=continuous, discrete=discrete) + self.env.set_actions(behavior_name, action_tuple) + + def step(self): + """ Step forward in environment. """ + self.env.step() + + def get_experiences(self, behavior_index: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ Get experiences for all agents with behavior %behavior_index. + + :param behavior_index: + :return: Tuple of (observations, rewards, dones). Each element is ndarray[num_agents, *] + """ + num_agents = self.num_agents_list[behavior_index] + behavior_spec = self.behavior_specs[behavior_index] + behavior_name = self.behavior_names[behavior_index] + # TODO: implement stacked observations: + observations = np.ndarray((num_agents, *behavior_spec.observation_specs[0].shape)) + rewards = np.ndarray((num_agents, 1)) + dones = np.ndarray((num_agents, 1)) + decision_steps, terminal_steps = self.env.get_steps(behavior_name) + for agent_id in decision_steps: + observations[agent_id] = decision_steps[agent_id].obs[0] + rewards[agent_id] = decision_steps[agent_id].reward + dones[agent_id] = False + for agent_id in terminal_steps: + observations[agent_id] = terminal_steps[agent_id].obs[0] + rewards[agent_id] = terminal_steps[agent_id].reward + dones[agent_id] = not terminal_steps[agent_id].interrupted + return observations, rewards, dones + + def close(self): + self.env.close() diff --git a/notebooks/3DBall.ipynb b/notebooks/3DBall.ipynb new file mode 100644 index 0000000..9555609 --- /dev/null +++ b/notebooks/3DBall.ipynb @@ -0,0 +1,542 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3D Balance Ball\n", + "\n", + "In this notebook, we will run the [3D Balance Ball example](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Learning-Environment-Examples.md#3dball-3d-balance-ball) from [Unity ML Agents](https://unity.com/products/machine-learning-agents). Please check the README file to setup this project.\n", + "\n", + "### 1. Start the Environment\n", + "\n", + "We begin by importing the necessary packages:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "from my_unity_environment import MyUnityEnvironment\n", + "from model import Actor, Critic\n", + "from ddpg_agents import DDPGAgents\n", + "from ddpg_agent import DDPGAgent\n", + "from replay_buffer import ReplayBuffer\n", + "from utilities import convert_to_tensor\n", + "import numpy as np\n", + "import torch\n", + "import torch.nn.functional as f\n", + "import random\n", + "from collections import deque\n", + "import time\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we will start the environment. Before running the code cell below, change the `ENV_FILE_NAME` parameter to match the location of the Unity executable that you [downloaded](README.md) or [created](../README.md#creating-a-custom-unity-executable) yourself. For example:\n", + "\n", + "```\n", + "ENV_FILE_NAME = \"3DBall_Windows_x86_64/UnityEnvironment.exe\"\n", + "```\n", + "A new window should pop up. Don't worry if the window becomes unresponsive.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "ENV_FILE_NAME = \"3DBall_Windows_x86_64/UnityEnvironment.exe\"\n", + "CHECKPOINT_FILENAME = \"checkpoint-3dball.pth\" # this is used for saving and loading the model\n", + "DISPLAY_SIZE = [1024, 768] # The width and height of the Unity window\n", + "\n", + "test_env = MyUnityEnvironment(file_name=ENV_FILE_NAME, no_graphics=False)\n", + "test_env.set_timescale(1.0)\n", + "test_env.set_display_size(width=DISPLAY_SIZE[0], height=DISPLAY_SIZE[1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Examine the State and Action Spaces\n", + "\n", + "In this environment, an agent must balance a ball on its head for as long as possible.\n", + "\n", + "**Agent Reward Function:**\n", + "- +0.1 for every step the ball remains on its head.\n", + "- -1.0 if the ball falls off.\n", + "\n", + "**Behavior Parameters:**\n", + "- Vector Observation space: 8 variables corresponding to rotation of the agent cube, and position and velocity of ball.\n", + "- Actions: 2 continuous actions, with one value corresponding to X-rotation, and the other to Z-rotation.\n", + "\n", + "Run the code cell below to print some information about the environment:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of agents: 1\n", + "Size of each action: 2\n", + "States look like: [-0.04766776 -0.08700117 -0.54295158 4. 0.11863136 0.\n", + " 0. 0. ]\n", + "States have shape: (8,)\n" + ] + } + ], + "source": [ + "def examine_environment(myenv: MyUnityEnvironment):\n", + " # number of agents in the first behavior:\n", + " print('Number of agents:', myenv.num_agents_list[0])\n", + "\n", + " # number of actions\n", + " print('Size of each action:', myenv.behavior_specs[0].action_spec.continuous_size)\n", + "\n", + " # examine the state space\n", + " print('States look like:', myenv.get_observations(0)[0])\n", + " print('States have shape:', myenv.behavior_specs[0].observation_specs[0].shape)\n", + "\n", + "examine_environment(test_env)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Take Random Actions in the Parallel Environment\n", + "\n", + "Run the code cell below, to watch a random agent in action." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Score from episode 0: 2.0000000447034836\n", + "Score from episode 1: 1.500000037252903\n", + "Score from episode 2: 2.400000050663948\n", + "Score from episode 3: 2.1000000461935997\n", + "Score from episode 4: 1.2000000327825546\n", + "Score from episode 5: 2.400000050663948\n", + "Score from episode 6: 1.0000000298023224\n", + "Score from episode 7: 1.8000000417232513\n", + "Score from episode 8: 1.1000000312924385\n", + "Score from episode 9: 1.1000000312924385\n", + "Time elapsed: 27.05\n" + ] + } + ], + "source": [ + "def test_random_agents(myenv: MyUnityEnvironment, n_episodes: int, max_t: int):\n", + " start_time = time.time()\n", + " for i in range(n_episodes):\n", + " myenv.reset()\n", + " scores = np.zeros(myenv.num_agents_list[0])\n", + " for t in range(max_t):\n", + " actions = np.random.randn(myenv.num_agents_list[0],\n", + " myenv.behavior_specs[0].action_spec.continuous_size)\n", + " actions = np.clip(actions, -1, 1)\n", + " myenv.set_actions(behavior_index=0, continuous=actions)\n", + " myenv.step()\n", + " _, rewards, dones = myenv.get_experiences(behavior_index=0)\n", + " scores += rewards.squeeze()\n", + " if np.any(dones):\n", + " break\n", + " print('Score from episode {}: {}'.format(i, np.max(scores)))\n", + " print(f\"Time elapsed: {time.time() - start_time:.2f}\")\n", + "\n", + "test_random_agents(test_env, n_episodes=10, max_t=200)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "outputs": [], + "source": [ + "test_env.close()" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "### 4. Train the Agent with DDPG\n", + "\n", + "Run the code cells below to train the agent from scratch.\n", + "\n", + "Alternatively, you can skip to the next step below (**5. Watch a Smart Agent**), to load the saved model weights from a pre-trained agent." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "class DDPGAgentsTester:\n", + " def __init__(self, ddpg_agents: DDPGAgents,\n", + " myenv: MyUnityEnvironment,\n", + " buffer_size=int(1.0e6), # replay buffer size\n", + " noise_start=1.0\n", + " ):\n", + " self.ddpg_agents = ddpg_agents\n", + " self.myenv = myenv\n", + " self.buffer_size = buffer_size\n", + " self.scores = []\n", + " self.scores_deque = deque(maxlen=100)\n", + " self.episode = 0\n", + " self.noise = noise_start\n", + " self.replay_buffer = ReplayBuffer(buffer_size)\n", + "\n", + " def train_agents(self, n_episodes, max_t, goal=float(\"inf\"), print_every=1000, update_every=1,\n", + " num_updates=1, batch_size=64, noise_decay=6.93e-6):\n", + " \"\"\" Multi Agent Deep Deterministic Policy Gradient algorithm.\n", + "\n", + " Params\n", + " ======\n", + " n_episodes (int): maximum number of training episodes\n", + " max_t (int): maximum number of timesteps per episode\n", + " goal (float): the algorithm will stop when the goal is reached\n", + " print_every (int) : print intermediate results every %print_every episodes\n", + " update_every (int): update the neural networks every %update_every time steps\n", + " num_updates: How many updates to do in a row\n", + " batch_size (int): minibatch size\n", + " noise_decay (float): noise decay factor = 1.0 - %noise_decay\n", + " \"\"\"\n", + " noise_decay = 1.0 - noise_decay\n", + " start_episode = self.episode\n", + " stop_episode = self.episode + n_episodes\n", + " steps = 0\n", + " start_time = time.time()\n", + " last_print_time = 0\n", + " for self.episode in range(start_episode, stop_episode):\n", + " score = np.zeros(len(self.ddpg_agents))\n", + " self.myenv.reset()\n", + " states = self.myenv.get_observations(behavior_index=0)\n", + " for t in range(max_t):\n", + " steps += 1\n", + " # get actions from all agents:\n", + " actions = self.ddpg_agents.act(convert_to_tensor(states[:, np.newaxis, :]), self.noise)\n", + " # remove batch_size from actions:\n", + " actions = actions[:, 0, :]\n", + " self.myenv.set_actions(behavior_index=0, continuous=actions)\n", + " self.myenv.step()\n", + " next_states, rewards, dones = self.myenv.get_experiences(behavior_index=0)\n", + "\n", + " # add sample to replay buffer:\n", + " sample = (states, actions, rewards, next_states, dones)\n", + " self.replay_buffer.add(sample)\n", + "\n", + " states = next_states\n", + " self.noise *= noise_decay\n", + " score += rewards.squeeze()\n", + "\n", + " # update networks every %update_every time steps:\n", + " if steps % update_every == 0 and len(self.replay_buffer) > batch_size * 100:\n", + " for _ in range(num_updates):\n", + " samples = [self.replay_buffer.sample(batch_size) for _ in range(len(self.ddpg_agents))]\n", + " self.ddpg_agents.step(samples)\n", + " #soft update the target network towards the actual networks:\n", + " self.ddpg_agents.update_target_networks()\n", + "\n", + " if np.any(dones): # exit loop if episode finished\n", + " break\n", + "\n", + " self.scores_deque.append(score)\n", + " self.scores.append(score)\n", + "\n", + " average_scores = np.mean(self.scores_deque, 0) # average score over last 100 episodes for each agent\n", + " if time.time() - last_print_time > 1.0:\n", + " time_per_step = (time.time() - start_time) / steps\n", + " print('\\rEpisode {}\\tSteps: {}\\tTime per step: {:.6f}\\tAverage Scores: {:.3f}'\n", + " .format(self.episode, steps, time_per_step, *average_scores), end=\"\")\n", + " last_print_time = time.time()\n", + " if self.episode % print_every == 0:\n", + " print(\"\\r\" + \" \" * 80, end=\"\")\n", + " print('\\rEpisode {}\\tAverage Scores: {:.3f}'.format(self.episode, *average_scores))\n", + " if len(self.scores) >= print_every and np.max(average_scores) >= goal:\n", + " print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}\\tTime elapsed: {}'.format(\n", + " self.episode, np.max(average_scores), time.time() - start_time))\n", + " break\n", + "\n", + " def test_agent(self, n_episodes, max_t):\n", + " for _ in range(n_episodes):\n", + " self.myenv.reset()\n", + " states = self.myenv.get_observations(behavior_index=0)\n", + " score = np.zeros(len(self.ddpg_agents))\n", + " for _ in range(max_t):\n", + " # get actions from all agents:\n", + " actions = self.ddpg_agents.act(convert_to_tensor(states[:, np.newaxis, :]), noise_scale=0.0)\n", + " # remove batch_size from actions:\n", + " actions = actions[:, 0, :]\n", + "\n", + " self.myenv.set_actions(behavior_index=0, continuous=actions)\n", + " self.myenv.step()\n", + " next_states, rewards, dones = self.myenv.get_experiences(behavior_index=0)\n", + "\n", + " score += rewards.squeeze()\n", + " states = next_states\n", + " if np.any(dones): # exit loop if episode finished\n", + " break\n", + " print(\"Score: {}\".format(score))" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "outputs": [], + "source": [ + "random_seed = 1\n", + "np.random.seed(random_seed)\n", + "torch.manual_seed(random_seed)\n", + "random.seed(random_seed)\n", + "train_env = MyUnityEnvironment(file_name=ENV_FILE_NAME, seed=random_seed, no_graphics=True, worker_id=0)\n", + "train_env.set_timescale(time_scale=100.0)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "code", + "execution_count": 8, + "outputs": [], + "source": [ + "actor1 = Actor(state_size=8, action_size=2, hidden_layer_sizes=[400, 300], activation_func=f.leaky_relu)\n", + "critic1 = Critic(state_size=8, action_size=2, hidden_layer_sizes=[400, 300], activation_func=f.leaky_relu,\n", + " inject_layer=0)\n", + "ddpg_agent1 = DDPGAgent(actor1, critic1, gamma=0.99, tau=1.0e-3, lr_actor=1.0e-4, lr_critic=1.0e-3, weight_decay=1.0e-2)\n", + "ddpg_agent_list = [ddpg_agent1]\n", + "ddpg_agents = DDPGAgents(ddpg_agent_list)\n", + "ddpg_agents_tester = DDPGAgentsTester(ddpg_agents, train_env, buffer_size=int(1.0e6), noise_start=1.0)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "You can skip this cell, if you don’t want to train the agent from scratch. It may take 30 to 45 minutes:" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Episode 0\tAverage Scores: 1.500 \n", + "Episode 1000\tAverage Scores: 0.854 \n", + "Episode 2000\tAverage Scores: 0.710 \n", + "Episode 3000\tAverage Scores: 0.829 \n", + "Episode 4000\tAverage Scores: 1.844 \n", + "Episode 5000\tAverage Scores: 2.710 \n", + "Episode 6000\tAverage Scores: 3.985 \n", + "Episode 6301\tSteps: 182838\tTime per step: 0.011439\tAverage Scores: 10.000\n", + "Environment solved in 6301 episodes!\tAverage Score: 10.00\tTime elapsed: 2091.4249007701874\n" + ] + } + ], + "source": [ + "ddpg_agents_tester.myenv = train_env\n", + "ddpg_agents_tester.train_agents(n_episodes=int(1.0e5), max_t=100, goal=10.0, update_every=1,\n", + " num_updates=1, batch_size=64, noise_decay=6.93e-6)\n", + "ddpg_agents.save_checkpoint(filename=CHECKPOINT_FILENAME)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEGCAYAAABiq/5QAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAAAvlklEQVR4nO3dd5gUVdYG8PdMIGcJkocsUcKIBAMLoiIq6rqKCmJEMGBcBTHgGhbdlVU/I8EEiChKUBAFBMlhyDkPwxCGHIdh0v3+6OqZztPd01XV1fX+nodnuqurq0413adu3bpBlFIgIiL7iDM7ACIiMhYTPxGRzTDxExHZDBM/EZHNMPETEdlMgtkBBKNq1aoqKSnJ7DCIiCxl9erVx5RS1TyXWyLxJyUlISUlxewwiIgsRUT2+VrOqh4iIpth4icishkmfiIim2HiJyKyGSZ+IiKb0S3xi8iXInJERDa5LKsiInNEZKf2t7Je+yciIt/0LPF/DeBGj2VDAcxTSjUBME97TkREBtKtHb9SaqGIJHks7gOgm/b4GwALALykVwxERHr7esleTF13EOv3n3Jbvv2tG1EyId7ne+ZvP4IHv1rltbxfp3qoUqaE27Lb29dBg6plIxYvYHwHrhpKqUMAoJQ6JCLV/a0oIgMBDASAevXqGRQeEVHwDp2+gBG/bPH52ktTNuCDvu18vuYr6QPAhOVpEHFf1r5+Zcsn/qAppUYDGA0AycnJnC2GiKJOdm6+39cOnsoKa5t7/9073HCCZnSrngwRqQkA2t8jBu+fiMj2jE78MwAM0B4PADDd4P0TEdmens05JwFYBqCZiKSLyMMARgLoKSI7AfTUnhMRkYH0bNVzj5+Xeui1TyIiKhp77hIR2QwTPxGRzTDxExHpQCF6W6Ez8RMR2QwTPxGRzTDxExHZDBM/EZHNMPETEdkMEz8RkQ4EUvRKJmHiJyKyGSZ+IiIdsB0/EVEMUtGb2wNi4icishkmfiIim2HiJyIKk+f8uFbBxE9EpKP0k5mYtfGQ2WG4YeInItJRn4+X4PGJa8wOww0TPxGRjo6fzzY7BC9M/EREOojmpp5M/EREOojivM/ET0QUrkCl+mhu8MPET0Skg2hu6snET0RkM0z8REQ2w8RPRBSmaK7OCYSJn4jIAOOXpUIpBRUF7TwTzA6AiCgWeeb3V6dvRuPq5ZGZnWtOQC5Y4iciMkhWbh4ys/PMDoOJn4jIbpj4iYjCFE51vfk1/Ez8RES2Y0riF5FnRWSziGwSkUkiUsqMOIiIDKWiYygHwxO/iNQGMARAslKqFYB4AH2NjoOIyAx2rupJAFBaRBIAlAFw0KQ4iIjCFnIHrmgo7sOExK+UOgDgvwDSABwCcFop9YfneiIyUERSRCTl6NGjRodJRBSzzKjqqQygD4AGAGoBKCsi/TzXU0qNVkolK6WSq1WrZnSYRETFkrLvJHYdOWt2GD6ZUdVzHYC9SqmjSqkcAD8D6GJCHEREurpu1EL3BUVU8Bs19o8ZiT8NQCcRKSMiAqAHgK0mxEFEZLiN6af8vmbUMD5m1PGvADAFwBoAG7UYRhsdBxFRcYWTqMcs2hv5QEJkyiBtSqnXAbxuxr6JiExTRFVOLFf1EBGRiZj4iYiMUtTNXWOiYOInIooWYlBdDxM/EVGYOPUiERFZAhM/EZFRouQKgYmfiMhmmPiJiMIUcgcutuohIiIzMPETRakjZ7Lw9RLzu/dbwaHTFzB++T6zwyg2o1oJmTJkAxEVbfDENVi97ySubVYdDaqWNTucqDbgy5XYkXEON7a8FNXKlzQ7nLCJQZU9LPETRalTmdkAgNy8fJMjiX4nM3MAAPlGDW8ZLrbqIaJAjOrFGQucn1S05/2omHAXTPxEUSE3Lx+fzN+FzOxcs0OxJOc5UhmcWUM9N3+7LFWXOELFxE8UBaavO4j//L4do/7Y4fValBQSo5qzbjzaS/zztxcxfziHZSayj6zcPADA+ew8kyOxJrNqxaL9ROMPEz8RxQyL5mHDMfETRSnL3LCMAoWflbU/LPbcJSIKUqy0gOLUi0S2ZO0Sq9ksXuA3DBM/URTw1WPTrCaKFPuY+InI8mKkpscwTPxEFDOMruqJ9AmHY/UQWZhSCqMX7kbGmazg1vdRnWOVTklmm7nhENJPXgDAarFgMfET6WDPsfN4Z9Y2DJqw2uxQYt4T360peGz0STLS+2OrHiILy8t3ZIRzWcGNvWPUJX6ss3p5n+34iWyONyxDZ/UOXEZh4icishkmfqIox0IsRRoTPxHFDJ4jg8PET6SDcEvpvt7HJorBs/rVkVFjDpmS+EWkkohMEZFtIrJVRDqbEQeR3oL9HQdaLxZb/Bw5k4VP5u/S4WasxTO/QRJM2u+HAGYrpe4UkRIAypgUB5GurF4C1ctTk9Zixd4T6NasGlrWqhix7Vq/564xDC/xi0gFANcAGAcASqlspdQpo+Mg0lOwCWH2pkNYuvtYQcKatfGQfkFFkfPa3ML5+aG9b86WDCzcUcT0hQay6ondjKqehgCOAvhKRNaKyFgRKeu5kogMFJEUEUk5ejR6/qOJImnQhDW4d8yKgudnguzwZVePfpuC+79c6fd1i+Zhw5mR+BMAtAfwmVKqHYDzAIZ6rqSUGq2USlZKJVerVs3oGInIZMfPXcQHc3cgP987nf+8Jt3ne5buOoauI/9Edm6IlxLRIoaHbEgHkK6UchZzpsBxIiCyrYA3d2Pv3m5QXvppAz6YuxMrU094vfbcD+t9vmfEL1tw4NQFfL8qTe/wLM3wxK+UOgxgv4g00xb1ALDF6DiI9BTJut9orke+kJ2H//y+DRdz8yK+7cxsxzZz80L/ACxb4jeIWa16ngIwUWvRswfAgybFQaSrWC+tf7FwNz6ZvxuVSpfAo9c0jOi27TgDmVFfF1MSv1JqHYBkM/ZNZDXRfPK4qJWss/MiX8IuTv+FWJl8XS/suUuko2iuprGKcD5Dq47SGdM9d4liXai/X5ZPvRVW9VCkBZ34RaS0yw1ZIooguyU3vQvkrOoJLKjELyK3AFgHYLb2vK2IzNAxLiJLs2hNg+X8vvmw2SFYUrAl/hEAOgI4BRTcnE3SIyCiWMKCp2+hfC6B6usfG885jcMRbOLPVUqd1jUSIhubtDL8DkdztmRg3taMCEYTuvSTmWgwbCaOnbsYsW06q2t48RR5wSb+TSJyL4B4EWkiIv8HYKmOcRHZyob08MtVj36bgoe/SYlgNKGbtHI/lALuHbM8qPWDqQrjxZJ+gk38TwFoCeAigO8AnAbwjE4xERGseZ/gQk74PXjnbz+C33yNTmrBzyHaFdmBS0TiAcxQSl0HYLj+IRHFDismbyP4quN/8KtVAIDUkb39rhPrPuzb1pD9FJn4lVJ5IpIpIhVZz08UnOIkrWlrD6B6+ZIxnfhCOSHaaciGzo0uMWQ/wQ7ZkAVgo4jMgWMYZQCAUmqILlERWVxxSvrPTF4HAGhWo3xkgoligaecLMZ2i/FeM5WIN6ZPbbCJf6b2j4hCEIlSezSX/D1PcKGe8HytfzozB2vSTmL+9qNhbdPKjOp4FlTiV0p9o42k2VRbtF0plaNfWETkFM2Jb/+JzLDeFyi/jZy9FZNW7i94Hs3Hb1XB9tztBmAngE8AfApgh4hco19YRLQ946zZIfj1+V+7sevIOeR5zI4VbIE1UDLP8Rh/f+uhM3hn1lZcyI78mP92FWxVz/sArldKbQcAEWkKYBKADnoFRkQO0VbVk5WTh5G/bcNnC3ajSzFvRvo6tjiPZe/P2QEAKFvCrOlDYk+wdxISnUkfAJRSOwAk6hOS+SYs34c1aSfNDoMIADBj3UGzQ/Apqxht9p18lfz9jcOfm+8+5v8v6/1/Lt8sSy1OWDEv2MSfIiLjRKSb9m8MgJgdJOOVaZtwx6fsmEzR4dMFu80OwVBxQWalpyat9fvavuPh3Xuwi2AT/2AAmwEMAfA0HHPkDtIrKKJYdCE7D2/+uiWm66r3n7gQ0vq+q7GirG4rBgVbaZYA4EOl1CigoDdvSd2iIopBYxftwbjFe1G5TCKe7N7E7HAiorgtbnxW9dg074+537jZaIMt8c8DUNrleWkAcyMfDlHsytFawOTmW7t9YiSaVwZK7p43d+2iZ4sahu0r2MRfSil1zvlEe1xGn5CIYhQbpBcI9FHE+Tkr2PR8oItgE/95EWnvfCIiyQBCq8wjIsvbffQc3p29DQBwMTe/iLWL5rs5p+8UH42nzWiMKRjB1vE/A+BHETkIx7HWAnC3XkERxaQYqLzuO3o5jp6N3GQrvAgyR8ASv4hcISKXKqVWAbgMwGQAuXDMvbvXgPiizoo9xzFxxT6zw6Ao53NESZ2z3JJdx9Bk+CxkZudGfNujF+7G5oOncTGEtvtvz9yC+8YGNzGLK3/nx2g8SQSaFjKaFVXi/wLAddrjzgBehmNSlrYARgO4U7fIotTdox1f5PuurG9yJGRV/jooFdd9Y1cAAMYs3Iunr4tsq6F3ZjmqdyqUCr737JhFRZcNQ6nqWbf/VND7psCKquOPV0qd0B7fDWC0UuonpdSrABrrGxqRdemV3IORp2MpNMujXn/25sNBv3dj+mmMWbinyPX8fXL5UVi6jr6IglNk4hcR5ym+B4A/XV7jwBlENpNdjBu6t3y8GG/P2uq2jO34zVFU4p8E4C8RmQ5HK55FACAijeGYd9fycvPy8eavW3D8XORuWBF52n8iEx/9ucuQfUVDvfPmg+GnB6PGpI+EKPiowxKw1K6UeltE5gGoCeAPVfiNioOjrt/y5m7NwLjFe3Hk7EX83z3tzA6HYtTgicYNbbUh3fwyWe+PFge1nq8cb520D1i1sieYOXe9bstro3PGhLx859/it0kmcvJs1ZOTa1yCiPZUNHbRHmw+eMbv66nHz/tcvnT3cb1Csh1jJngksikzbvJGQ1VPIG/N3Brw9d83ZxgUiX2ZlvhFJF5E1orIr0bt80xWDl6fvsltHPFp6w4AAGZtPIzfNh4yKhSyMSOqsH9I2Y/524/ov6MgTV2bbnYIETd702FcN2qh2WGExcwS/9MAAp/6I+yjuTvxzbJ9mLQyrWDZnC2FpYvBE9cYGQ7ZlBEF8henbMCDX63Sf0dBenbyerNDiLhBE6w7JYkpiV9E6gDoDWCskft1DoroOU+oq0/mG9PyguzBZw9emwk0/8CJ89kYMWNzsZqJUujMaov/AYAXAZT3t4KIDAQwEADq1asXkZ06h3sNVOL6z+/b/b9IFKRAdfsWaq0YEeOXp/p97e2ZW7E94yza1atkWDxkQolfRG4GcEQpFfA6SSk1WimVrJRKrlatWkT2Hadl/rdnbUXS0JnYc/RcEe8gsrdDpy/gzV+3FGsbeQEK8zlaa7oovx8dc8wo8XcFcKuI3ASgFIAKIjJBKdVP7x17FrQe+jp66kAptnhW8Vi1yufZyeuwfM+JolcMl/ax2O0qyGyGl/iVUsOUUnWUUkkA+gL404ikD3j3CLT6TEgU/SLZnDMvX+Ffv2zBwVPGTYUR6H5YtNuQfgoAMHdLBn5I2W9uMFHGVu347TqlG5kvEieANWkn8eWSvXh28rriB2QDt368BADwyLcpeHHKBpOjiS6mDrSmlFoAYIFR+/M33CuRkcL9FjrrwY0cpTKcXZ2+kOP2fOnuY37X3XPMdy9d0petSvyeeZ83lMgornX8sf61+2Cu+4gui3b6T/xOeg3MlhPozrKN2WpoZSuN+kexwao3dZ3C+cmEU6DS65c5YXnkZ8v7dcNBS9/7AGyW+D3r+HkeIKO41vGHX9Vj7WRjhjd+KV5TVF+e/G5txLdpNFtV9XjW8Rfnd/TZgt3FjIZiUcaZLLw2fRNy8xxfLj0Haftrx1GMX5bqtTyYqpVgGVWwjZZC2OnMHLw8daPbeF5ZOXl4eepGr3sXVmarEn8kv1vvzt6Gwd0aRXCLFAuGT92IuVuP4NKKpdyWR7LKx3kyGfDlyoht058dGWdDfk+0JPFw/G/uDny3Ig3NapTHgC5JAIDvV6bhuxVpKJkQh9dvaRnxfQ7oXB+9WteM+HYDifkS/7FzFzF86kZczM3D+3PcbzodMLA9NNlDMHW/zkECc0O48Tht7QH8uiH00WOH/bwBZ7NycCozG8N+di/J6sXM+YYDyc7Nx/CpG3EswGx7zuo012o153/pzA2H8NPqyI8y+kafVujU8JKIbzeQmC/xv/nrFkxfdxD1qpQxOxQiAMDB01kAQquSeSbMtvuTVu5H5TIlcP5iLiatTEOLmuXRv3NSWNsKVjgl/jMXciMfiIffNh3CxBVpOH8xFx/0DX22vSNnL+L5H2NjlNGYL/E7S2C8LUZm8lUK/mvHUUP2rVBYag31d3A2K/SEHE5534jqIWchfu3+U/ho3k79dxjFYj7xO0XnxSfZha86/q+XphqzbxX9zUqN/H3uO56JUXN8zx4b3Z9S5MR8VY/zP9LKN5zIuraHcXO0SDp9l2dvOoxTmdno27F4w6CPXbw35PcY0ave31y+/mTl5OGyV2fj8joVdYrIPDGf+IkoOM4ZpYqb+MNiQMHsg7mhVe9MW+uYlnV9+mk9wjFVzFf1zNRaQrwza5vJkZCdmdnS5fO/dmOvNiaOaxSHTl/AK9M2erUuene28b8VMz6dUX9sx/r9p9yWHTjpaOmXp4zrw2CGmE/8RNHA7Dr2JbuOey17ccoGTFiehmV73F8zo3OiGcOpfPTnLvT5ZInbsnnbHBPUr007aXg8RmLiL8JnC3Zj2W7vH01x5eUrvDptE/afyIz4tskY5y/m4sUp63326Fyz75TxAQXh8JmsgseBeq7vyDiLpKEzDYjI4T0TrjICifVxvZj4i/Du7G24Z8zyiG939b6TGL98H57/ITbaBdvR+OX78ENKOj5dsMvrtblbM3Tbb3FS0ifzgyvN3zd2RTH2ErojZ/13qqLIY+I3mdlVABS+ghJzGP+FZg+49vlfuwOOk3/U5ok4tsv7TPymMfuHT8UXSm2A583dNJOr+Eb+ti2mb15GQqiFsloe4zNFMyZ+IgN4JhEz53vecvCM2/MHvloV1mBssWzG+oP4Y3No1XVWGrSRiZ/IBGZO5HHXF8vcnuflK0NG+rQao4bUMAM7cJksWkcypOA5U/i7s7dhrUe7cH+KMzNUcRucZPsYFfTQ6Swfa9rD+v2nMPTnjdh66EzRK8cIJn6iMHnm30Dt3z1P8N8ui/yUgMHiPLTuPNvy2wGreoL03uxt2Oij6/ZnC3Zj0c7QLglPZWbjhSlsxhlLirpZH8nWW1k5xUvcbFdALPEH6dMFuzFm0R6v5c7u7akjewe9rY/m7cL+E5wEhsKzLsjqJCJ/WOIPQU6e8ngeXskrxjsF2o5SKmApmlUrFG2Y+Ith+Z7ID+VA1hHsCXzxzmMRvYl/TdNqEdsW2RMTfzGEW1fKAn/sCfhVkMjW8VcqnRixbZE9sY6fqJiUMrYnNu/NxoY72tVGp0bGTrLuxMRfDCczswseL911DF0aVy3yPRvST7nPUMTiv2UFW33z4FerIrrfX9YfREIcvzhRJ8Sbd7dcXgt/u6y6TsEExqqeYvjQZUafe4MczfDOz5cVvRJZjtGl8Kna7FAURUK96jPx3M3EbzCW02KHc2z7i7lstUOhMzMXMPEbzIhJpckY47QquzlbMtgpikJm5mQvhid+EakrIvNFZKuIbBaRp/XaV2Z2brHev6aI6df2Hj9f5DYOnLqAZ75fi4u5eQCALO0vEcWYEBO53Ur8uQCeV0o1B9AJwBMi0kKPHf28pnj1oHd8ujTg68GU8l6btgnT1h3Eoh3Hgn4PWYtEuLkmWVSIP+4uJrXoAUxI/EqpQ0qpNdrjswC2Aqitx75yo6jHpL+vxMq9J5A0dCZOns/2s0boRszYjJTUE0Gtu3TXMfx71taI7TsUuXn5eO6Hddh15FyR657JysHjE1fjRAQ/p0jiCZ1ClRBvXk27qXX8IpIEoB0AryYxIjJQRFJEJOXo0fDGxY6GGYaCvfob7WMcoHB9vTQ16NZD945dgS8WRm7fodh88Ax+XnMAz/2wrsh1Jy5Pw6yNh/HFwuDmjDUS79oQAEAEb97WConx0f+NMC3xi0g5AD8BeEYp5TUQtlJqtFIqWSmVXK1aeF3Ujb538sfmw/jfnB0AgG2Hz+CFH9cXnHyUUjiVGVpp9UJ2Hp74bg0On87C+GWp+H5lWkTi3HXkHJ6dvM7QK6KVe09gxIzNbssCnZd/WLUf3y5LxbytGXj/j+2GVaWczszB4AmrQ/6/+mlNuk4RkZX071QfO9++yewwimRKBy4RSYQj6U9USv2s23702rAfA8evBgA827MpBo1fjdTjmWhcvVzB675G93TyVVXw26ZDmLnhEErExxW02+7bsV7AGILpQfrcD+uwIf00BnRJKnLdSHHO+jTi1pZer/n6f3rxpw3uz29spkdYXr5auhe/bTqMJtXL4bnrg9/n8KmbdIyKLMFC9X1mtOoRAOMAbFVKjdJ5X3pu3q8jZ7OQetwxmbaz/loB+GR+4GqKj+btxG8bDwFw1n/rM2a/81MxY8L3f/2yBUt3H9Meby5ibd+em7wOnd6ZhwytHf2KPce9riYA4M1ft2DprmNey5fuOoZ//bLF57advXF9fTKnL+Rg0PjVEb0fQ2QGM6p6ugLoD6C7iKzT/kX/tVEIPnDp0RuKUXN2YPDENQCALS7TwEU8QYv/5Ka3L5fsxb1jHLd01qSdCvn9ObkKP689gMNnsvCf37cDAO4evRxfL031Wnfc4r0+e1TfO3YFvlyy12s5ELh6cPyyVMzefNjrys065TzSlYX66JjRqmexUkqUUm2UUm21f7P02NfBU+ZMdhJOnv78r8KrgaShM90m417t0p8gJfUEhk/d6PdkcD7bfz+BtWknMdSlCsVzE4t2HsVbv3qXhJVSeGnKhrAnAJm+ruhmtbuOnMOQSWv9jl2fr30e2zMKT4hTVqej2Su/FTwfMWNzwdVEuJw/3bNZuXhsfAqOnr1Y+Jr2w/50wW4kDZ1ZsNzO89WSNcV0z12zWquknfDu2BXqyWDvscJtuM7WdefnyzBxRZrf7c3ccNDvNvuNXYHvV+3HhYKObe4b6T9upfsAcpozF3IxOWU/+o8LbjwiT09/vy7wCiJ44cf1mLH+IDYe8J7eEgB2alVmS3a5z4HgOlzC10tTC64mwuUstE1etR+/b87AJ/N3eb1GxqlbpbTZIRTp8rqVzA4hZBydUweeySkcr0wLfLPwi4V7ULVcCfwjua7bctcTQtLQmVj3Wk8Mn7oJ8XFScDWwI+Oc17rfeFSV5OcrvPjTBjzYNQl1Kpdxe23Mwj2oWDoRd13hvu9QjPxtW8Hj9S5XEkV1mgtG0tCZmPRop4Lnmw6cxpeL92LO1gx0qF+5YHnPUX9BBPjj2WsLljlL9RdyvK+cvvjLnIKEnTWpXj7qpyktnRh8+blCqQScySreiAKRENMl/ugSWpE/M0CVDeCY6/efUzYEXAcAXp+xGTM3HsKM9d5XAspjPVcHTl3AlNXpGPjtaq83vD1rq1erm1C5Vm3p4Z4xywsePzZ+NX5eewBns3KxYHthn5CdR84VnASDcfpCTkRjpKL9+47WQa03a8jVOkfiXyhX8z8O6qJfICFg4jfIoAlrdN1+dm4+Hp+4GnuOuVczTV/nv+pnSor/tufOEu+h0xcKqjjOXszFfWNdE2oKsrT1Xpu+CYt3HsOJ89l45JuUkNvBFyXQcRQlvoix6/uPW4E7Pl2C5XuOF9wwdlq2+zhembYRC3eE14mQiqdGhVLo3bpmwHXqVimNFrUqoESCOenM+f1y/Zbd1raWz3WbXVregIiKxsQfI9akncSsjYcxOoT7GpNT9vt9zbmdfOVeonGtxvp9c0ZBQvx22T70G7cC4xbvwdytGZiwfF+IR6CftBOZAV9ftPMY1qSdQt/Ry71e255xFhOWp+H+L1fqFR4V4Y0+3n0/XH31wBWOByY1r/r0vvbo16ke7uxQp2CZs8rwno71ULtS9N2nYOKPEYdOR64e9Ni5i5iyOt3tuT8K8DnkwmcLdmPyqsj0NCZ7q1quJO5o7z2cl3NohAZVHZ0kE0waKqFSmRJ467bWKJUYX7DMeRXQsUFlLBna3ZS4AmHijxH//X1HxLb1hkfnpk8DdDw7l5XrcxTU89l5eOmnjRGLiext+E3NMaBzfbzh0vN7xpNX4dnrmhYk2Z8f74Ih3RsXvF6jQknD43R6+abmuL9zffRu7bvKx2xs1WNxru3JI+UXjxvBgcahOX7e/9UAUaRcUq4k3ujTCrl5+QUNEZrXrIDmNSsUrHPZpRVw2aUV8MeWDGw7fBZNqpdHxhlzvp9VypbAv/q0MmXfwWDip2IZvdC93X+wE5AT6S0vGobn9eGF65uiWzNzJll3YuKnYvGs///YpcMToM8VCdlXMONvJWrj3BfVmsssT3ZvYnYIrOMnotjy6X3t8di1DfHJfe1128fKl3uEtP63D3UMuk+CEVjiJyLLcJbhA7XZr1ulDIb1aq5bDD881hnVK5QK6T3XNA1vThG9MPETkWXExQleuL4pejSvYVoMydqwH8Nvao4rGlQxLY7iYOInIksxs448MV4Qp907ePSahqbFUVwxXcdftVwJs0MgClvDamXNDiFi/tYsuqo6QvXDY50BWGqSrYBiOvG3rl3R7BCIwvbn893MDiFiugdRNZM6srcBkYSnXb1KAALfW7CS2DgKIrKkth5j2b95WytMHtjJ98o66H5ZdZTyM6zyVw9eUfA4MT4OL1zfFFMf72pUaLqK6cRv1py7RHpIHdkb7/49epoEFuXqJlUDvp46sjemPeGeSPt3qo8rG16iZ1huGlUri16tfI/++TePTlZPdm8SNaNrFldMJ34iq3rrNkd3/6G9LnNbflu72nioa4OC5+/c3hp3J9fFlT5al1yRVBn9O9V3m3ymOHztw9UX/TugarnIjI/zzUMd8d9/XF7s7fzwWGe8eVsr/DS4C0bc0sLnOq/e7L184iNXFnvf0SymE3+iSaP1ERWXcwz6Qdc2clteMiEer7kksJa1KuDdO9tgsnbz0dWPg7oUJL1Vw68LK46OSY5kf3WTql77eKBLEi51ac9+Q8tLcXmdyNxXu7ZpNbdhjsPVsUGVgpPfA10beNXRx8fFoUpZ70YgXRsHvlqxuphuzvn89c3w++YMs8Mgm7q9XW1MXVv0RPOenuvZFJV9JCNX93euj/Xpp9GyVuEgZR/f2w4lE+JxMjMbtSq6jwHvr4XbjS0vxezNhwEAZUrEe8/85qfs9FDXBnimZxPc37k+ur//l9frdyfXxUGPocL/7552KJ0Yj1KJ8W7DiM94sitWpZ70e6yR9MuTV+GGDxYWPH9SG83z+Z5N8f4c7xFu37ytFZrHSPWOq5hO/BVLJwa97qNXN8CYRd4TjROFI3Vkb6Qdzwwr8Q/pUXQ7dV8jP97cxv8QwP7ud9WvWjif8oNdk/CJxxDcJbUScol495Ky86qjQin335hznJwbWtXA10sLJ+OJF8Etl/uOr02dSmhTp5Lf2COp2aXl8eKNzfDe7O14ukcTlCvpSIFVy/uuourfqb4hcRktpqt6qvv5zwS8B3AK5scWKXWrRN+MPBR5wfw///rUVQZE4jBuQDI+uqcdGlcvV/jbUEALbWhjpYBXejfH5/06oG3dSiiVGIcv+nfAY9c2xL8D3FR+49aWmPq4Yy7Zt25vhceuaYhrmxbeGG1cvRz+3sF7IhWzPNilAR6+qgEeu7awA1ZulI7kqZeYTvyBWvW8/4/LUc3lxFC+VNFXB/deWQ9A4Y23cD3doyku92jGRrFHRApKzP60MrCvSY/mNXDr5bUw97lrMVDrdZoQL+ijzQ+bECd45OqGuLHVpZj2RFdse7MXypRIwLBezVG9vKMu39eAlwO6JKFdPccN5KrlSmLYTc0RH1d47K/e3AIlE+K932iS0iXi8erNLVCmRExXeAQU80f+Zp+WeHX6Zq/lvdvUROs6FfHh3J1o4VJPGozilg1uvbwWOje6BI98k4Kth84Uc2sUzZ7r2RT//m2b1/IHuiQh/aRjLuD+nepjfIA5it/s0zLiJ4h+nerj0OksPN6tMeLjBEfPXsRjHjeSfZk55Gos2hncxPP/vqM1GlYri6sscKP0Hx3qIPXYeYxbbI/q3pgu8QNA/85JPpcnxsehUbVy+Oiedl4tJwBAxLsnobOeMyHEcb5fvqmwSV6tiqVQIiEOtSuVxm9PXx3Sdsh6Kpdx3FS9v7N7XfGIW1ti7ABHB6Hbfcwn66p/58ISdaSUSnSUesuWTECpxHi8oj0uSvOaFTDwmqJPEIBW+u/VPGrHxXfl/DzsIuYTP+BoNfD6LS3w5N8aY0iPJpgyyLvpG+Bo9+ycs9P5VR3oMhDTc9c3xaNXN8Df2xc2Mxt+k/vwrwOvaehWhXRjy0vRpHphq4DxHu2DP9NxzPCitKod2pUOhe62drXx6NUN8MINzfBAlyQAwEs3urfNb1unEh7vFlwyJX299/c2+N7AnsNmsUXib1OnEh7s6vjxPdezKZKTfHdE+XFQl4KbvM76P+dl6vUtaqBCqUQM793CrS3wo9c0RL9Ojrr//9zZBi/f1NytzfTn/TsUlHh6tqiBRtXKue2zV+uaSB3ZG81qhNZkrEKpBL9XHq43tQMNVBdMHeeou7w70ejVIfqDu9vqs2Ed7HnnpqDWK5EQh+G9W6BCqUSMuLUlUkf2xmCPJB8XJ3jR42RA5rjrirroZGDPYbPYIvGHIiE+DsN6XYZpTzhaKXRtXBWDuzXCOx6z57zZp2VBd/N/3nAZBl7TEH3aFl6yT3j4yoJEVrCN2/23jPiifwe0ql0BtSqWQvlS3gn5krIl8N2jV6Jzw0vQomYF/DS4C+Y8d23B673b1MRrN7dAmzoVMevpq/FQ1wbo07YW5j3XDY9e3QDP92yKmUMKW5A81b0xPurbDqPuutyttPnxve3c9tulkXv97ANdkrBq+HV45KoGqFwmUTs+3z+Ujg2qoGFV7xEmuzS6BJ/3c1zpXObSRjpfKXzno8fkK72bo3al0pj2RNeCk2ywfN1EH9C5fsGVnSvnOOv3dKyHwd0a4ZXe7ldzP7pcKcbFCYb2ugx92tbCkB5NMOHhKzHjya5oXL0cpj8R3nguH/Zti28e6hjWe4lCIcoC44wmJyerlJQUs8MwzJq0k7jj06VuyyI1cqFzDtxA2+s3dgUW7zqGKYM6I6lqWSS/NRd1KpfG4pe6F7ldV6kje+PjP3fiv3/swD9vaIb//L4dADDyjtbo27EwgQ/7eQMmrdyPD+5ui9va1fbalmesztffuq0VXpm2qWB5YrwgJ8/9+zzv+WvRw6WDUevaFfHLU1dh0c6j6D9uZcD9eO4vdWTvoD4/omghIquVUsmey2O+VY8Vta1TCU91b4yKpRNx/Hw2GntUDxXHT4M7Y2fGuYDrjLrrcny7bB/a16uMuDjBP29oVjCEgD9v394KLWpWwOp9J1G+VEJBAn74qoY4fSEHD3VtgOnrDmBHxjmvVlFDezVHhVKJ6N3GsY9vH+qIwRNW4507WqNSGe+qqs/7dcDrMzbhH8l1kJuXjxG/bAEA3NmhLpbtPoYezWtg55FzuLdjvYIrjrZ1K2Hd/lOYoF1RdG1UFfUvKYMh3Zvg9IUcXNnQ/zg0vzx5Fdbud/Qsvb5FDdStUsbvukRWYEqJX0RuBPAhgHgAY5VSIwOtb7cSf6x6bfomfLtsH967sw3uSq4bse1+szQVr8/YjEHXNvIa1IzIzqKmxC8i8QA+AdATQDqAVSIyQym1xehYyFgv3NAMpUvE47a2ke3FefcVdXHw1AU85aPenoi8mXFztyOAXUqpPUqpbADfA+hjQhxksAqlEjGsV/OIz2JUKjEew25qHlQ7dCIyJ/HXBrDf5Xm6tsyNiAwUkRQRSTl6NLiegkREVDQzEr+vVuBeNxqUUqOVUslKqeRq1aw9UTMRUTQxI/GnA3C9s1cHwEET4iAisiUzEv8qAE1EpIGIlADQF8AME+IgIrIlw++GKaVyReRJAL/D0ZzzS6WU9/CZRESkC1OaQSilZgGYZca+iYjsjmP1EBHZDBM/EZHNWGKQNhE5CsD/FEWBVQVwLILhGM3q8QPWPwbGbz6rH4NZ8ddXSnm1h7dE4i8OEUnxNVaFVVg9fsD6x8D4zWf1Y4i2+FnVQ0RkM0z8REQ2Y4fEP9rsAIrJ6vED1j8Gxm8+qx9DVMUf83X8RETkzg4lfiIicsHET0RkMzGd+EXkRhHZLiK7RGSo2fE4iciXInJERDa5LKsiInNEZKf2t7LLa8O0Y9guIje4LO8gIhu11z4SEV9DXusRf10RmS8iW0Vks4g8baVjEJFSIrJSRNZr8b9hpfhd9h0vImtF5FeLxp+q7XudiKRY7RhEpJKITBGRbdpvobNl4ldKxeQ/OAaA2w2gIYASANYDaGF2XFps1wBoD2CTy7L3AAzVHg8F8K72uIUWe0kADbRjitdeWwmgMxxzHPwGoJdB8dcE0F57XB7ADi1OSxyDtq9y2uNEACsAdLJK/C7H8RyA7wD8arXvkLbvVABVPZZZ5hgAfAPgEe1xCQCVrBK/If/BZvzTPsjfXZ4PAzDM7Lhc4kmCe+LfDqCm9rgmgO2+4oZjVNPO2jrbXJbfA+ALk45lOhxzKFvuGACUAbAGwJVWih+OeSzmAeiOwsRvmfi1/aXCO/Fb4hgAVACwF1oDGavFH8tVPUFN8RhFaiilDgGA9re6ttzfcdTWHnsuN5SIJAFoB0ep2TLHoFWTrANwBMAcpZSl4gfwAYAXAeS7LLNS/IBj5r0/RGS1iAzUllnlGBoCOArgK626bayIlIVF4o/lxB/UFI8W4O84TD8+ESkH4CcAzyilzgRa1ccyU49BKZWnlGoLR8m5o4i0CrB6VMUvIjcDOKKUWh3sW3wsi4bvUFelVHsAvQA8ISLXBFg32o4hAY7q2s+UUu0AnIejasefqIo/lhO/1aZ4zBCRmgCg/T2iLfd3HOnaY8/lhhCRRDiS/kSl1M/aYksdAwAopU4BWADgRlgn/q4AbhWRVADfA+guIhNgnfgBAEqpg9rfIwCmAugI6xxDOoB07UoRAKbAcSKwRPyxnPitNsXjDAADtMcD4Kg3dy7vKyIlRaQBgCYAVmqXkWdFpJPWCuB+l/foStvfOABblVKjrHYMIlJNRCppj0sDuA7ANqvEr5QappSqo5RKguN7/adSqp9V4gcAESkrIuWdjwFcD2CTVY5BKXUYwH4RaaYt6gFgi1XiN+Qmjln/ANwER4uT3QCGmx2PS1yTABwCkAPHGf9hAJfAcbNup/a3isv6w7Vj2A6XO/4AkuH4sewG8DE8bjTpGP9VcFyObgCwTvt3k1WOAUAbAGu1+DcBeE1bbon4PY6lGwpv7lomfjjqyNdr/zY7f58WO4a2AFK079E0AJWtEj+HbCAisplYruohIiIfmPiJiGyGiZ+IyGaY+ImIbIaJn4jIZpj4KaaJSJ42+qPzX8BRWkVkkIjcH4H9popI1TDed4OIjBCRyiIyq7hxEPmSYHYARDq7oBxDMwRFKfW5jrEE42oA8+EYwXWJybFQjGLiJ1vShjuYDOBv2qJ7lVK7RGQEgHNKqf+KyBAAgwDkAtiilOorIlUAfAlHB6RMAAOVUhtE5BI4OuZVg2OYXXHZVz8AQ+AYuncFgMeVUnke8dwNxwiODQH0AVADwBkRuVIpdasenwHZF6t6KNaV9qjqudvltTNKqY5w9Jb8wMd7hwJop5RqA8cJAADeALBWW/YygG+15a8DWKwcA3bNAFAPAESkOYC74RiQrC2APAD3ee5IKTUZhXM0tIajJ2c7Jn3SA0v8FOsCVfVMcvn7Px+vbwAwUUSmwdElH3AMV/F3AFBK/Skil4hIRTiqZu7Qls8UkZPa+j0AdACwSptYqTQKB+7y1ASObvsAUEYpdbaogyMKBxM/2Zny89ipNxwJ/VYAr4pISwQeRtfXNgTAN0qpYYECEcfUg1UBJIjIFgA1tfkCnlJKLQp4FEQhYlUP2dndLn+Xub4gInEA6iql5sMx4UklAOUALIRWVSMi3QAcU465CFyX94JjwC7AMVDXnSJSXXutiojU9wxEKZUMYCYc9fvvwTFoWVsmfdIDS/wU60prJWen2UopZ5POkiKyAo4C0D0e74sHMEGrxhEA/1NKndJu/n4lIhvguLnrHIL3DQCTRGQNgL8ApAGAUmqLiLwCx0xTcXCMyPoEgH0+Ym0Px03gxwGM8vE6UURwdE6yJa1VT7JS6pjZsRAZjVU9REQ2wxI/EZHNsMRPRGQzTPxERDbDxE9EZDNM/ERENsPET0RkM/8PPtdUhSGRZ8kAAAAASUVORK5CYII=\n" + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "def plot_scores():\n", + " scores = np.vstack(ddpg_agents_tester.scores)\n", + " scores = np.max(scores, 1)\n", + " fig = plt.figure()\n", + " ax = fig.add_subplot(111)\n", + " plt.plot(np.arange(1, len(scores) + 1), scores)\n", + " plt.ylabel('Score')\n", + " plt.xlabel('Episode #')\n", + " plt.show()\n", + "\n", + "plot_scores()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5. Watch a Smart Agent" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "test_env = MyUnityEnvironment(file_name=ENV_FILE_NAME, worker_id=1)\n", + "test_env.set_timescale(1.0)\n", + "test_env.set_display_size(width=DISPLAY_SIZE[0], height=DISPLAY_SIZE[1])\n", + "ddpg_agents_tester.myenv = test_env\n", + "ddpg_agents.load_checkpoint(filename=CHECKPOINT_FILENAME)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Score: [20.0000003]\n", + "Score: [20.0000003]\n", + "Score: [20.0000003]\n", + "Score: [20.0000003]\n", + "Score: [20.0000003]\n", + "Score: [20.0000003]\n", + "Score: [20.0000003]\n", + "Score: [20.0000003]\n", + "Score: [20.0000003]\n", + "Score: [20.0000003]\n" + ] + } + ], + "source": [ + "ddpg_agents_tester.test_agent(n_episodes=10, max_t=200)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "When finished, you can close the environment." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "train_env.close()\n", + "test_env.close()" + ] + } + ], + "metadata": { + "kernelspec": { + "name": "rl39", + "language": "python", + "display_name": "rl39" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/notebooks/3DBall_parallel_environment.ipynb b/notebooks/3DBall_parallel_environment.ipynb new file mode 100644 index 0000000..a22b4f5 --- /dev/null +++ b/notebooks/3DBall_parallel_environment.ipynb @@ -0,0 +1,618 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3D Balance Ball in parallel environments\n", + "\n", + "In this notebook, we will run the [3D Balance Ball example](https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Learning-Environment-Examples.md#3dball-3d-balance-ball) from [Unity ML Agents](https://unity.com/products/machine-learning-agents) in parallel environments. Please check the README file to setup this project.\n", + "\n", + "### 1. Start the Environment\n", + "\n", + "We begin by importing the necessary packages:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "from parallel_unity_environment import ParallelUnityEnvironment\n", + "from model import Actor, Critic\n", + "from ddpg_agents import DDPGAgents\n", + "from ddpg_agent import DDPGAgent\n", + "from replay_buffer import ReplayBuffer\n", + "from utilities import convert_to_tensor\n", + "import numpy as np\n", + "import torch\n", + "import torch.nn.functional as f\n", + "import random\n", + "from collections import deque\n", + "import time\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we will start the environment. Before running the code cell below, change the `ENV_FILE_NAME` parameter to match the location of the Unity environment that you [downloaded](README.md) or [created](../README.md#creating-a-custom-unity-executable) yourself. For example:\n", + "```\n", + "ENV_FILE_NAME = \"3DBall_Windows_x86_64/UnityEnvironment.exe\"\n", + "```\n", + "Four new windows should pop up, one for each environment. Don't worry if the windows become unresponsive." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "ENV_FILE_NAME = \"3DBall_Windows_x86_64/UnityEnvironment.exe\"\n", + "NUM_ENVS = 4 # number of environments to run in parallel\n", + "CHECKPOINT_FILENAME = \"checkpoint-3dball-parallel.pth\" # this is used for saving and loading the model\n", + "DISPLAY_SIZE = [1024, 768] # The width and height of the Unity windows\n", + "\n", + "test_env = ParallelUnityEnvironment(num_envs=NUM_ENVS, seeds=list(range(NUM_ENVS)),\n", + " file_name=ENV_FILE_NAME, no_graphics=False)\n", + "test_env.set_timescale(1.0)\n", + "test_env.set_display_size(width=DISPLAY_SIZE[0], height=DISPLAY_SIZE[1])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Examine the State and Action Spaces\n", + "\n", + "In this environment, an agent must balance a ball on its head for as long as possible.\n", + "\n", + "**Agent Reward Function:**\n", + "- +0.1 for every step the ball remains on its head.\n", + "- -1.0 if the ball falls off.\n", + "\n", + "**Behavior Parameters:**\n", + "- Vector Observation space: 8 variables corresponding to rotation of the agent cube, and position and velocity of ball.\n", + "- Actions: 2 continuous actions, with one value corresponding to X-rotation, and the other to Z-rotation.\n", + "\n", + "Run the code cell below to print some information about the environment:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of agents: 1\n", + "Size of each action: 2\n", + "States look like: [-0.01467304 -0.01468306 -0.52082086 4. -0.79952097 0.\n", + " 0. 0. ]\n", + "States have shape: (8,)\n" + ] + } + ], + "source": [ + "def examine_environment(env: ParallelUnityEnvironment):\n", + " # number of agents in the first behavior:\n", + " print('Number of agents:', env.num_agents_list[0])\n", + "\n", + " # number of actions\n", + " print('Size of each action:', env.behavior_specs[0].action_spec.continuous_size)\n", + "\n", + " # examine the state space\n", + " print('States look like:', env.get_observations(0, 0)[0])\n", + " print('States have shape:', env.behavior_specs[0].observation_specs[0].shape)\n", + "\n", + "examine_environment(test_env)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Take Random Actions in the Parallel Environment\n", + "\n", + "Run the code cell below, to watch a random agent in action." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Score from environment 3, episode 0: 1.40\n", + "Score from environment 0, episode 1: 1.90\n", + "Score from environment 2, episode 2: 2.10\n", + "Score from environment 1, episode 3: 2.70\n", + "Score from environment 3, episode 4: 1.40\n", + "Score from environment 2, episode 5: 2.10\n", + "Score from environment 1, episode 6: 1.70\n", + "Score from environment 3, episode 7: 1.30\n", + "Score from environment 0, episode 8: 3.40\n", + "Score from environment 2, episode 9: 0.70\n", + "Time elapsed: 8.16\n" + ] + } + ], + "source": [ + "def test_random_agents(env: ParallelUnityEnvironment, n_episodes: int, max_t: int):\n", + " start_time = time.time()\n", + " current_episode = 0\n", + " current_timestep_list = [0] * env.num_envs\n", + " scores = np.zeros(env.num_envs)\n", + " reset_list = [True] * env.num_envs\n", + " reset_env = True\n", + " while current_episode < n_episodes:\n", + " # reset environments if needed:\n", + " if reset_env:\n", + " env.reset(reset_list)\n", + " for env_index, reset in enumerate(reset_list):\n", + " reset_list[env_index] = False\n", + "\n", + " # set actions for each environment:\n", + " for env_index in range(env.num_envs):\n", + " actions = np.random.randn(env.num_agents_list[0], env.behavior_specs[0].action_spec.continuous_size)\n", + " actions = np.clip(actions, -1, 1)\n", + " env.set_actions(behavior_index=0, env_index=env_index, continuous=actions)\n", + "\n", + " # step forward in all environments:\n", + " env.step()\n", + "\n", + " for env_index in range(env.num_envs):\n", + " # collect experiences from environment:\n", + " _, rewards, dones = env.get_experiences(behavior_index=0, env_index=env_index)\n", + " scores[env_index] += rewards.squeeze()\n", + " current_timestep_list[env_index] += 1\n", + "\n", + " # check if episode has ended:\n", + " if current_timestep_list[env_index] >= max_t or np.any(dones):\n", + " print(f\"Score from environment {env_index}, episode {current_episode}: \"\n", + " f\"{scores[env_index]:.2f}\")\n", + " current_timestep_list[env_index] = 0\n", + " reset_list[env_index] = True\n", + " reset_env = True\n", + " scores[env_index] = 0.0\n", + " current_episode += 1\n", + "\n", + " print(f\"Time elapsed: {time.time() - start_time:.2f}\")\n", + "\n", + "test_random_agents(test_env, n_episodes=10, max_t=200)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "outputs": [], + "source": [ + "test_env.close()" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "### 4. Train the Agent with DDPG\n", + "\n", + "Run the code cells below to train the agent from scratch.\n", + "\n", + "Alternatively, you can skip to the next step below (**5. Watch a Smart Agent**), to load the saved model weights from a pre-trained agent." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "class DDPGAgentsTester:\n", + " def __init__(self, ddpg_agents: DDPGAgents,\n", + " env: ParallelUnityEnvironment,\n", + " buffer_size=int(1.0e6), # replay buffer size\n", + " noise_start=1.0\n", + " ):\n", + " self.ddpg_agents = ddpg_agents\n", + " self.env = env\n", + " self.buffer_size = buffer_size\n", + " self.scores = []\n", + " self.scores_deque = deque(maxlen=100)\n", + " self.episode = 0\n", + " self.noise = noise_start\n", + " self.replay_buffer = ReplayBuffer(buffer_size)\n", + "\n", + " def train_agents(self, n_episodes, max_t, goal=float(\"inf\"), print_every=1000, update_every=1,\n", + " num_updates=1, batch_size=64, noise_decay=6.93e-6):\n", + " \"\"\" Multi Agent Deep Deterministic Policy Gradient algorithm.\n", + "\n", + " Params\n", + " ======\n", + " n_episodes (int): maximum number of training episodes\n", + " max_t (int): maximum number of timesteps per episode\n", + " goal (float): the algorithm will stop when the goal is reached\n", + " print_every (int) : print intermediate results every %print_every episodes\n", + " update_every (int): update the neural networks every %update_every time steps\n", + " num_updates: How many updates to do in a row\n", + " batch_size (int): minibatch size\n", + " noise_decay (float): noise decay factor = 1.0 - %noise_decay\n", + " \"\"\"\n", + " noise_decay = 1.0 - noise_decay\n", + " stop_episode = self.episode + n_episodes\n", + " timesteps = 0\n", + " start_time = time.time()\n", + " last_print_time = 0\n", + " current_timestep_list = [0] * self.env.num_envs\n", + " scores = np.zeros((self.env.num_envs, len(self.ddpg_agents)))\n", + " states_list = [np.ndarray((0,))] * self.env.num_envs\n", + " actions_list = [np.ndarray((0,))] * self.env.num_envs\n", + " reset_list = [True] * self.env.num_envs\n", + " reset_env = True\n", + " while self.episode < stop_episode:\n", + " # reset environments if needed:\n", + " if reset_env:\n", + " self.env.reset(reset_list)\n", + " for env_index, reset in enumerate(reset_list):\n", + " if reset:\n", + " states_list[env_index] = self.env.get_observations(behavior_index=0, env_index=env_index)\n", + " reset_list[env_index] = False\n", + "\n", + " # get a batch of states from all environments:\n", + " env_states = np.stack([states for env_index, states in enumerate(states_list)], axis=1)\n", + " # get actions from all agents:\n", + " env_actions = self.ddpg_agents.act(convert_to_tensor(env_states), self.noise)\n", + "\n", + " # set actions for each environment:\n", + " for env_index in range(self.env.num_envs):\n", + " actions_list[env_index] = env_actions[:, env_index, :]\n", + " self.env.set_actions(behavior_index=0, env_index=env_index, continuous=actions_list[env_index])\n", + "\n", + " # step forward in all environments:\n", + " self.env.step()\n", + "\n", + " for env_index in range(self.env.num_envs):\n", + " # collect experiences from environment:\n", + " next_states, rewards, dones = self.env.get_experiences(behavior_index=0, env_index=env_index)\n", + "\n", + " # add sample to replay buffer:\n", + " sample = (states_list[env_index].copy(), actions_list[env_index].copy(), rewards, next_states, dones)\n", + " self.replay_buffer.add(sample)\n", + "\n", + " # update networks every %update_every time steps:\n", + " if timesteps % update_every == 0 and len(self.replay_buffer) > batch_size * 100:\n", + " for _ in range(num_updates):\n", + " samples = [self.replay_buffer.sample(batch_size) for _ in range(len(self.ddpg_agents))]\n", + " self.ddpg_agents.step(samples)\n", + " #soft update the target network towards the actual networks:\n", + " self.ddpg_agents.update_target_networks()\n", + "\n", + " states_list[env_index] = next_states\n", + " self.noise *= noise_decay\n", + " scores[env_index] += rewards.squeeze()\n", + " current_timestep_list[env_index] += 1\n", + " timesteps += 1\n", + "\n", + " # check if episode has ended:\n", + " if current_timestep_list[env_index] >= max_t or np.any(dones):\n", + " self.scores_deque.append(scores[env_index, :].copy())\n", + " self.scores.append(scores[env_index, :].copy())\n", + " current_timestep_list[env_index] = 0\n", + " reset_list[env_index] = True\n", + " reset_env = True\n", + " scores[env_index, :] = 0.0\n", + " self.episode += 1\n", + "\n", + " average_scores = np.mean(self.scores_deque, 0) # average score over last 100 episodes for each agent\n", + " if time.time() - last_print_time > 1.0:\n", + " time_per_step = (time.time() - start_time) / timesteps\n", + " print('\\rEpisode {}\\tSteps: {}\\tTime per step: {:.6f}\\tAverage Scores: {:.3f}'\n", + " .format(self.episode, timesteps, time_per_step, *average_scores), end=\"\")\n", + " last_print_time = time.time()\n", + " if self.episode % print_every == 0:\n", + " print(\"\\r\" + \" \" * 80, end=\"\")\n", + " print('\\rEpisode {}\\tAverage Scores: {:.3f}'.format(self.episode, *average_scores))\n", + " if np.max(average_scores) >= goal:\n", + " print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}\\tTime elapsed: {}'.format(\n", + " self.episode, np.max(average_scores), time.time() - start_time))\n", + " return\n", + "\n", + " def test_agents(self, n_episodes, max_t):\n", + " current_episode = 0\n", + " current_timestep_list = [0] * self.env.num_envs\n", + " scores = np.zeros((self.env.num_envs, len(self.ddpg_agents)))\n", + " states_list = [np.ndarray((0,))] * self.env.num_envs\n", + " actions_list = [np.ndarray((0,))] * self.env.num_envs\n", + " reset_list = [True] * self.env.num_envs\n", + " reset_env = True\n", + " while current_episode < n_episodes:\n", + " # reset environments if needed:\n", + " if reset_env:\n", + " self.env.reset(reset_list)\n", + " for env_index, reset in enumerate(reset_list):\n", + " if reset:\n", + " states_list[env_index] = self.env.get_observations(behavior_index=0, env_index=env_index)\n", + " reset_list[env_index] = False\n", + "\n", + " # get a batch of states from all environments:\n", + " env_states = np.stack([states for env_index, states in enumerate(states_list)], axis=1)\n", + " # get actions from all agents:\n", + " env_actions = self.ddpg_agents.act(convert_to_tensor(env_states), self.noise)\n", + "\n", + " # set actions for each environment:\n", + " for env_index in range(self.env.num_envs):\n", + " actions_list[env_index] = env_actions[:, env_index, :]\n", + " self.env.set_actions(behavior_index=0, env_index=env_index, continuous=actions_list[env_index])\n", + "\n", + " # step forward in all environments:\n", + " self.env.step()\n", + "\n", + " for env_index in range(self.env.num_envs):\n", + " # collect experiences from environment:\n", + " next_states, rewards, dones = self.env.get_experiences(behavior_index=0, env_index=env_index)\n", + " states_list[env_index] = next_states\n", + " scores[env_index] += rewards.squeeze()\n", + " current_timestep_list[env_index] += 1\n", + "\n", + " # check if episode has ended:\n", + " if current_timestep_list[env_index] >= max_t or np.any(dones):\n", + " print(f\"Score from environment {env_index}, episode {current_episode}: \"\n", + " f\"{scores[env_index, 0]:.2f}\")\n", + " current_timestep_list[env_index] = 0\n", + " reset_list[env_index] = True\n", + " reset_env = True\n", + " scores[env_index, :] = 0.0\n", + " current_episode += 1" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "outputs": [], + "source": [ + "random_seed = 1\n", + "np.random.seed(random_seed)\n", + "torch.manual_seed(random_seed)\n", + "random.seed(random_seed)\n", + "train_env = ParallelUnityEnvironment(num_envs=NUM_ENVS, seeds=list(range(random_seed, random_seed + NUM_ENVS)),\n", + " file_name=ENV_FILE_NAME, no_graphics=True, worked_id_start=10)\n", + "train_env.set_timescale(100.0)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "code", + "execution_count": 8, + "outputs": [], + "source": [ + "actor1 = Actor(state_size=8, action_size=2, hidden_layer_sizes=[400, 300], activation_func=f.leaky_relu)\n", + "critic1 = Critic(state_size=8, action_size=2, hidden_layer_sizes=[400, 300], activation_func=f.leaky_relu,\n", + " inject_layer=0)\n", + "ddpg_agent1 = DDPGAgent(actor1, critic1, gamma=0.99, tau=1.0e-3, lr_actor=1.0e-4, lr_critic=1.0e-3, weight_decay=1.0e-2)\n", + "ddpg_agent_list = [ddpg_agent1]\n", + "ddpg_agents = DDPGAgents(ddpg_agent_list)\n", + "ddpg_agents_tester = DDPGAgentsTester(ddpg_agents, train_env, buffer_size=int(1.0e6), noise_start=1.0)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "You can skip this cell, if you don't want to train the agent from scratch. It may take 30 to 45 minutes:" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Episode 1000\tAverage Scores: 1.270 \n", + "Episode 2000\tAverage Scores: 1.220 \n", + "Episode 3000\tAverage Scores: 1.101 \n", + "Episode 4000\tAverage Scores: 2.310 \n", + "Episode 5000\tAverage Scores: 5.038 \n", + "Episode 6000\tAverage Scores: 4.930 \n", + "Episode 6708\tSteps: 234123\tTime per step: 0.009527\tAverage Scores: 9.930\n", + "Environment solved in 6710 episodes!\tAverage Score: 10.00\tTime elapsed: 2231.088708639145\n" + ] + } + ], + "source": [ + "ddpg_agents_tester.env = train_env\n", + "ddpg_agents_tester.train_agents(n_episodes=int(1.0e5), max_t=100, goal=10.0, update_every=1,\n", + " num_updates=1, batch_size=64, noise_decay=6.93e-6)\n", + "ddpg_agents.save_checkpoint(filename=CHECKPOINT_FILENAME)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "data": { + "text/plain": "
", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEGCAYAAACQO2mwAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAAAy7ElEQVR4nO3deXgUVdYH4N/Jxr4Fwr6EHUQQMICAIAiMSBRGHAdxXBgdcUGR0W8UlBll3BjHcRx3GQQdFUQQAdlkB0G2AGELWyCBhASSAAlryHa/P6q6093p7lR319p13ufhoVPprjpJd+rUvXXvuSSEAGOMMeZNhNEBMMYYMy9OEowxxnziJMEYY8wnThKMMcZ84iTBGGPMpyijA1CiQYMGIj4+3ugwGGPMUnbt2pUnhIgLZR+WSBLx8fFISkoyOgzGGLMUIjoZ6j64u4kxxphPnCQYY4z5xEmCMcaYT5wkGGOM+cRJgjHGmE+aJQkimkVEOUR0wGVbLBGtJqJj8v/1tDo+Y4yx0GnZkvgSwHCPbZMBrBVCtAewVv6aMcaYSWk2T0IIsYmI4j02jwIwSH78FYANAF7SKgbGtHD5egnWpJzFb3s0AwCsO3wWnRrXRtO61YLeZ2FxKZbuy8a9PZuBiILax+qUszhwugB/HtYBP+3NQnz9Gvhi8wksSs7CuH7x+E2XRsi/Wox1h3Nw4HQBDp+5hCcGtsFNLeqiRpUo5F8twpmCQjxxW9uAj73+cA46NK6FSCKMm70Dh89cAgB0bVYHjWpXRXJGPvIuX6/wurG9W2LT0Vw8Pbgtrl4vxZvLD7l9v0pUBBY/0x+dGtcO6ncSjNIygbEztqFp3apYlJwFADj6xp2IidLmmjq74BoOZV/EzF/SQATc3NK9g+Wens3RukENTY6tBGm5noScJJYKIW6Uv84XQtR1+f4FIYTXLiciGg9gPAC0bNny5pMnQ54TwpgqnvtuDxYnZ2HJM/3RrXldxE9ehgY1Y5A0dVjQ+/z7TymYtSUNs8Yl4PZOjYLaR/zkZQCANc/fhqHvbQw6luUTB+CGpoGdlOMnL0OdatEouFYc9HH9SZ+eqMl+vflicxpeX5riti2hVT0seKqfJsdLeGONWwL1vEaYPa4XBnVsGNS+iWiXECIhlPhMO+NaCDEDwAwASEhI4JWRmGlkFxQCAK5cL3Vuy7tcFNI+c+WTxKXCkpD2A0itklBcC/L1WiUIvZ3z0uJJP3dVs+N5trDS3tYvISqh9+ims0TUBADk/3N0Pj5jIQuuM4hZm32vU/VOEksAPCI/fgTAYp2Pz5hqhI1PHMw+tBwCOxfAVgAdiSiTiB4DMB3AMCI6BmCY/DVjluLsM+YcwWxAy9FNY318a4hWx2RMDyR3OHGOsBP7djLyjGvGAhTkCFVmEZz83XGSYCxIGo4eZ8w0OEkwFiBHS4JvXNuJfd9rThKMBYhs3D/N7IeTBGNB4u4mZgecJBgLUHl3E2Phj5MEY0HSsu4ZY2bBSYKxAAVbpZUxK+IkwViQuB3B7ICTBGMB4nYEsxNOEowxVin7XhpwkmAsWNzfZCP2fbM5STAWIJ5xHd540Jo7ThKMBci+HQ/2ZefEwUmCsSDZ+cQRzry1EO086pmTBGMBcsyT4CTB7ICTBGMB4oXpwhsXcHTHSYKxANm568HhwOkC/JqaZ3QYTAeaLV/KWLizc+2muz7cDABIn55ocCRMa9ySYCwA20+cw5pDOQC4u8lObHw9wEmCsUCMmbHN6BBsKS3vitEh2BYnCcaCpObVJd/m8K+4tMzoEGyLkwRjQVMvS5ipN4NvzDNXnCQYC5JZ+6nNGhezJk4SjDE3nGQqulZcanQIhuEkwRhjLryV5bBz4uQkwViQBOw9V8JO7HyfhpMEY4xVwsY5gpMEY8HiRgSzA04SjMmKSsrwxeY0lPCYfNMx+kreztcDnCQYk83cfAKvL03Bt9tPKX4Ntyb0wb9m4xiSJIjoz0R0kIgOENFcIqpqRByMubpUWAIAuHy9RNHzy4TArC1pWobETMLoloyRdE8SRNQMwEQACUKIGwFEArhf7zgYC9VPe7PwxrJDRofBmKaM6m6KAlCNiKIAVAeQZVAcjAXNrBOs7Dxck6lP9yQhhDgN4F0ApwBkAygQQqzyfB4RjSeiJCJKys3N1TtMxipl1vsRZo2LWZMR3U31AIwC0BpAUwA1iOhBz+cJIWYIIRKEEAlxcXF6h8mYrvji30Q4yboxortpKIA0IUSuEKIYwEIA/QyIgzHTMNN5yYzdVUaHRGb8pejEiCRxCsAtRFSdpN/8EAB8948xlXirPcRC41p+JfPCVcxPyjAwGn3pvsa1EGI7ES0AsBtACYA9AGboHQdjnrgvnykx5vNtOJ1/DSO7N0WVqEijw9Gc7kkCAIQQrwJ41YhjM6aWcO2B4GRZ0ZUiaSTb+StFOJ1/zeBo9MUzrhmTBXrS55Op/Uyal2x0CLrjJMGYLFxO+ltSzxkdgurM8tbkXy0yOgTdcZJgzIPVu5H+sfKw0SGwMGL7JFFcWobPNh7H9RJzzp5l5sWjiMKUxS8S1Gb7JPHttpOYvuIw/rvphNGhMJMIl26ncKLreZvffze2TxKOUQuO/xlTisL0kvPkuStGh2AofzkiPN9x/2yfJBjzpPSehJm6m07kXlZtX89/v1e1fTHr4yTBWBgoKTNPwrI6X9cIC3Zl6hqHWXCSYMwE7NiNYTX/N9+eLSxOEox52HSUS9Mz71KyLxodgu44STDmYduJ87ofkzuLzMPfe1Fcar93ipMEY4wxnzhJMBYknk/B7ICTBGNhgBMW0wonCRmPLmGBsnqNJxaacJ1M6YmTBGMAysoEPtt4PKDXmOnq/UpRidEh2I6ZJlNqiZOEzB5vN/Pl0BlrD2189+cjRocQNoSZsr8JcJJgDEBhse/aXUIIfLrhuKnXErh8nVsSauEc4c6Q5UsZM5v5Sb5LLmw9fg7/WHkY+0/n6xdQgPjEph6+1+SOWxKMwX9Loqi0DABwqdD9at3zxCyEdF8j7/L1CvsoLi3DB2uP4ZpG1YbTDarcaueumQ/XpqKopMzoMDTHSYKxSpDCS8u9mQWYvuIwXvBSRXXezgy8t/ooPlp/TO3wAFRMYHpJzVGv+qw/Zry6/2h9Kr7edtLoMDTHSYIxKFtDoLKL5hK5xeHt/oCjpXKtyPuVpwnPgYqU2rglAfhvgYYLThKMVcJxFWuXIY8AkHnhqtEhuNEzFwVyLDO2cNTGSYKxSkTIZwI7XTQLAeRfLcJH646hjNeq8MkOE+o4ScjC/61m/vhLAEq7m5Sw0pXn1EUH8O6qo9icmmd0KKZlpfczWJwkGIOyyZSe3U0Vv3a34UiO4rUpzHitfkW+t1JSFv4jeIJlgxzB8yQYq5TjnoTCM7njxDFu9k4AQPr0RPVjYpoJJGFzS4Ixm/D3t+7od/Y8efhKGmZsFWjl840nkHOx0OgwmIa4JSGz0x82C4zzarGSD4kNLior+HHPaZy+cE3z4+h5xW7H99EfbkkwVglfJ41ATlxqjYxadfAMftXhRnIgP1u41Y0KqLvJBimFWxKMKVTZPAklJ5dQTynjv94FQPv7HEJw61oJviehESKqS0QLiOgwER0ior5GxMFYIHamX1D0PDXPG2VlAu+vOWrqCrRldppAYkNGdTf9B8BKIUQnADcBOGRQHIwBCO6qWY9z48ajuXh/zTH8dfFB7Q8WpMNnLhkdgmGU1vWyMt27m4ioNoCBAMYBgBCiCIDhl0nh/1Yzf4KpZro97bwGkbgrlutBaVU91heznfv0bKwE0jIK9dd09Kz5E6wRLYk2AHIBzCaiPUQ0k4hqeD6JiMYTURIRJeXmKpuQxJgW7HC1yMptVDgBEgg9mQ5/f1NoO9CBEUkiCkBPAJ8KIXoAuAJgsueThBAzhBAJQoiEuLg4vWNkNldwrVjV/VV205vTkH965mlH602JUMOyQlksI5JEJoBMIcR2+esFkJIGY6bx1rLy22RqnqCs0iixc+uJ78O70z1JCCHOAMggoo7ypiEAUvSOgzF/CkvK7wEoPWn8e/VR1eM4ec74kt12mAvgipOEO6PmSTwL4FsiigFwAsAfDYqDMdX8evyc6vt8c7nUormi84Q1Oy9LGgg7tLgMGQIrhEiW7zd0E0L8VgihbAA6YwbQ4zxg5lPy+2uPeV23O1ydzldeZmTO9lMaRmIOXJZDZuY/UqY9s7//el+wul4h783Ix1/mV1y3mwFHLDCENVScJBhTmbcTuq/em1mb05Cac1nbgFRwzQZrOQfL0RV4vaQU76w8jKtF4VXLSnGSIKJqLjebGQtr5OOxEn5XuXPJIEII/H1pCkZ9tDnAIzAz+XBdKgDg222n8MmG4/hk/XGDI1KXoiRBRHcDSAawUv66OxEt0TAu3YX/7SdmVld0nk2thPn+HswXkcN1eSScY35FkcJ5FheuGF5oQhGlLYnXAPQGkA9IN54BxGsREGN2EcgAIiMG0ZhrgJOpgnET7O/p1SXmrcflSmmSKBFCFGgaCWMmsig5CyUBzLwNRiA1gsxyws4uuIb/rDlmdBim98OuTOxM91/bq9Ai93mUJokDRPQAgEgiak9EHwL4VcO4GNOXl5Pw0n3ZQe1K6VX/1hPqz6vQiiNJTfh2N/69Rv1Jg+Hmhfl7cd9nW40OQxVKk8SzALoAuA5gDoACAJM0ikkTZWUC7/58BLmX7DPem4WmpEwg7/J1/GtVYCfFnekX8M22k86vF+7OxDYvCaGkNIBqoybpkr9WrG3ryk4ydVj2VQ2VzrgmokgAS4QQQwG8on1I2tiWdg4frU/FoeyL+GJcL6PDYSbjqwDf5B/2BXXFP3XRAefj578vn2PgcynUgI+gL0eSMnucahvXLx5f/pru9zmO2emB9gimZF8MLiidVdqSEEKUArhKRHV0iEczpXK5xeslfCXElCvU8Mq5ssqwZnTojDVObGrpFR9rdAiGU1q7qRDAfiJaDam0NwBACDFRk6gY05m3G8Na1y8KaHSTztfwvrq3zHIDnelHaZJYJv8LO1zIjOkpOSMfc7afwgN9WrptX7I3y6CIrMKYji4lrT3PZ4Rbl5yiJCGE+Equ2NpB3nRECKHuqiw6sWITn2nP25WzFhU+t6edx/a083igT0vTz5Ngyni+j+F2hlE643oQgGMAPgbwCYCjRDRQu7DUUVJahrdXHEL+1SKfzXU7lPpl5hTIySQ5I9/n99Yfzgk5Fk/vrDziVg3Vrg1uJd18X287ics6l3LXk9Lupn8B+I0Q4ggAEFEHAHMB3KxVYGr4+eBZfL7xBHIvXcfoHs2NDodZjJm6Ii8V+j4J/fHLnaof78c9p1XfZ2jM81548+HaY6hXIwZA+HU3KZ0nEe1IEAAghDgKIFqbkNRTUiaNTCkuFdzNxEwnlCT0xtIUXDNhzSe7Kg5gzovVKG1JJBHRFwC+lr/+A4Bd2oTEmP5M1GhQZObmNNSrEYMJg9sZHUpYG9ChgaLnhXOvtdKWxFMADgKYCOA5SGtSP6lVUFow6zq910tK8cbSFFwqtOQ4gLDhK0lo+cfvLy99siEVxypZ0Ka0TGDGJn3KUm9P81+HKFzViFF2HW3Os4s6lLYkogD8RwjxHuCchV1Fs6hsZMGuTMzcnIZSIfDq3V2MDod50LKF4Wvf0uI1R/DphsoTwFvLD6scFWPulLYk1gKo5vJ1NQBr1A/HfhwzwQOp48PChf/3/DrXSXJh7mt11xbn55tOGBeIBpQmiapCCOcai/Lj6tqEpC2r9T0zZkcvj+hkdAhMpjRJXCGino4viCgBgDVKGMp8lxmQssYnG46jrIwzCHMXzjckzax9w1pGhwBAeftlUXIWvqqkEKBVKb0nMQnAfCLKgtRGbgpgjFZBGWVn+nn0aVPf6DCYSWh9yRBqqzac81esPOfAKsJ5CQK/LQki6kVEjYUQOwF0AjAPQAmkta7TdIhPNUr+ILkdYU+bjuZi5cEzFba/vjRF99XDtqTmYc72UwAqXytZ78/rtJ/0W24zMqLyFHgwqwCfbdRndJedVdaS+BzAUPlxXwAvQ1qAqDuAGQB+p1lkNsH3SIz38KwdXrdfKizBzvQLmh3X21v/fVKm8tfr/NmZvSVd3wNWIvGDzQCAJ29ra3Ak4a2yJBEphHAMkB4DYIYQ4gcAPxBRsqaRqcxX3zLXbmJG4QsE3yr+bso3pOZcwvL9FVt+WuDTg4IkQURRQogSAEMAjA/gtUwB/hCyYNmp1Ixr0hjz+Tacu1JkXDA2U9mJfi6AjUSUB2k00y8AQETtIK1zzRgLkp1O8oHyvHhy/U0VWWh1ycLiUlSNjjQ6jJD4vXEthHgTwAsAvgRwqyivSBYB6d6E5XATn4ULO32WrfqzOgYhWFmlXUZCiG1eth3VJhztcK8OM5tAT3zL9mW7fX3q/FUVozG3U+evYnHyafzljo66HjfUe5ZlPt7kkkpGrpmJLe4r+HubjV4zwKpXSCx0gb71E+bsdvvafGs+aOfx/yUBAEb3bGZwJIHxlWTWabBQlFaUzrhWHRFFEtEeIlqq9bEEzD8Hgm9gM2YfvloYZmRYkoBUcvyQgcdnzFBGt2LN6uG+rYwOQTXhcO1nSJIgouYAEgHM1OV4KH+ztp44h5Ssi3ocNiB8vmBMMrhTQ6NDYC6Makm8D+BFAD7v3hDReCJKIqKk3NzckA7mef4d9fHmkPanJu5mYsx+rHRRqHuSIKK7AOQIIfwufyqEmCGESBBCJMTFxakaQ3GpCLo18eWWNGw9fk7VeBhjyvxr1dEKF31m7rZzvQgsKxN4Y2kKMi9Ya1SaES2J/gBGElE6gO8A3E5E32h5QG8X68G2Jl77KQVj/1thVHDQTPz5Zsx0Vhw4g8vXS9y2mflvyPXcs/90AWZuTsPEuXsMiycYuicJIcQUIURzIUQ8gPsBrBNCPKjpMYEKmaJYXglulkmKlnG3k72UlJaZ+uTG1PHN9lPYfCwPQHm39+5T+fjSQmtPGDm6yRTOu9SA4fM008svqXlGhxA2zJxrU3Mu48EvtgNwP79sTzvv/QUmZOhkOiHEBgAbtD4OAYo+SWb+sLEwI7h2ky9GXqxtOKLNJLenvtmF47mXK3+iCdlixrUVcNcDY8GRblyrk1rGzd6pyn48rTigrLT5Y7e21uT4obBNkkjJNt/cCIDvRdjV8v3ZOBvGS16y4Iy8qanRIVRgmyTxxjJzTu7mFoQ9zd+lfAU65h//CWnL1jeuv9thnjK+3KJgTBLoSf/dVUcCmiux8Wguvt6aHuBR9GHGhGfrJDF54X6jQ2CMhejzjScqzJ3w55FZO/DXxQc1jCi82DpJhJsle7OwONk+5aNZeCIE3rL+2+KDKA5yjYZdJ8/jkw2pQb3WDjhJhJGJc/fgue+SjQ6DsZAFeq/uxz2nsfZQcMNX7/10K95ZeSSo19oBJwnGWJgIrUd/vLywEXPHSYIxFhZCHSm4KuWsOoGEGU4SLniAEWPmYNfRfmasaBvWSSLQ37f53h7GmFL896uNsE4SDqUqZefSMnU/hkUlZXh1CQ/FY8wVBdmMeH1pimXrI5lZWCcJx2dtm0qLBCWlq1u5cd1h7gNlTC3ZBYX401fWvvnctVkdo0OoIKyThNrde2o3Z7edKE86qTmX8frSFFP2STJmFY6/HyEE3l5+CIdMWrPNl6hI852SzReRBpS2Xit7mtr30lwXHvn1+Dl8sTkNmReuqXwUxuzn4rUSfL7pBO6fod4qknYV1knCiiMkrBgzY2aRfu4q4icvw6nz5etIf7j2GHZ66Sren1mgZ2iWFdZJQu3RTcHeUGOMKdevbf2Q9/HATKkFQQT8a/VR3PfZ1grPGRnkOvd2E9ZJolxgJ/eci4WY/MM+FJUEVwuGMRa8aBX65aMipL/5/KvFbttzXdbwEALIuVQY8rHCnU2SRGBeXXIQ3+3MwJpD7qOPuCHBmD4oxDuAkRHeT21vLXdfV+aNpeZcZ8ZMbJEk8i4rWwHM8bF0dFM9/e1unDp31efzWbmyMoFXFx/gcerMFHz9zf+4h6skB8oWSSIUk+btcT7mhoRvx3Mv46utJ7lIGlOF4PnTpsFJwoW3j+XuU/kQQuDY2UuY9lOKc/uvqXn4aN0xxfteuDsT85MyVIjSnBy/uwgL9cllF1zDSwv2GR0G08l7q49W2LZkb5YBkViLbda4DsXVolI8MmsHsgrKb3I9MHM7AOCZ29sr2sfz3+8FANyX0EL9AE3A0UVnoRyBqT8ewNrDwa1BwLQV6j0Jbz5Yq/yijpXjloSLA6elcdOpHv3qROYuHiaEQPzkZYYce8rCfej39lqUyVni6Fm+J8FYOAnrJBFov+Yby6SRDqk51jrRnc43bpb23B0ZyCootORMcTMnfsbMIqyTRDB8jWZSq6TSrpMX8PYK38Pu1JiwZ0T9J645xVh4CuskEcx56/nvkytsI5Bqoy3u/fRXfL7xhCr7cvD8Oa8Ulaq6f0Ux6H5EFs58THNgBgjrtyKYJOHtJdkF/rtSDmVfxN8WH6hwNb31+DmM/GhzQKOgJv+wD99sO6n4+Wbh+qPziBEWKiuNkgt3PLrJg7duk2k/pfhNOA/P2oHcS9cxYXA7NKpd1bl97H+l+jH7Aigk9suxPPxyLA8P3tJKedCmUP4Lmjh3D0be1NTAWJjVRRicIx66pRUa1a5ibBAmEdYtCTV51oBxFcznedA/11f6nD2nLmDaTwedieudlYexVaUFlEKRXXANf56XjOsl5d1ary1J8fMKY328PhVreJF7SzG6JdG7dazi4e3hLqyTRDD95N5eQwQUlfou9uf4PAfSvZWuoNzHvZ/+itlb0uFYNfWTDcedrRMjvbbkIH7ccxrrXeYYnLlo3kJp//z5CP7EM8EtJdLgpgT3dpXTPUkQUQsiWk9Eh4joIBE9p9WxghlxE8wy1o6JP2qWEsg4f9UZy9RF+51zONTa96Tv9gRd5dYR1/HcKwG97sDpArzy435NRkItTj6NLzanOb++VlSKZ+fu8Vrl858/H8bmY3luLSEWuJeGd9Js30a3JLSYzGdVRtyTKAHwghBiNxHVArCLiFYLIUzbX6H046Lmue+xr3Y6H8/dkYHVKnaXTF10ABuP5mJUj2YY3LFhwK/fm5EPQLpCD8TDs3bg/JUiPD+sA+rXVLe/97nvkgEAj93aGgCwZO9p/LQ3C9WiK14Hfbz+OD5ef1zV4zN11K4qnZKMbkkMu6GR83GTOlWRXaBtS3nikPa4tV0DTY8RLN1bEkKIbCHEbvnxJQCHADTT5FhBvajiq9YfyfX59Oe/T3bONtZyGOi1AIa17kiT7lvkXrqOCXN248r1Euf3dp+6gI1Hff88SgR7lef4PU1fcTig183ekoYf92QGdUwm6dioltEhKDLn8VsAGJ8kYqLKT41jemlfSuf5YR3Qu3Ws5scJhqH3JIgoHkAPANu9fG88ESURUVJubmgntUAEeqJfuPs0cuSFTNTsRvFs7gYy9+HRL6X+9/dWH8Wyfdlu5ZF/72WFrkAF+/fruPk/f1dgJ/xpP6Xgz/P2BndQBkDqY7/v5uZGh6FYw1pV0KmxORLbnwa0MToEQxmWJIioJoAfAEwSQlz0/L4QYoYQIkEIkRAXFxfUMTLPB74WRCDDVT255oi5O04FvZ/KfL01XdHzHDFMXXQAS/dl4fukDJQEc9PFQzCzwvVc5e/NZSleR4Et3ZeFTzak6haH2cQ3qKHq/rS8bUBEWDlpoHYHCEDNKvaeKWDIT09E0ZASxLdCiIVaHeeDdcadEKYs3B/S6/39Af518UE81Dc+oP09M2dPxY06TpPedkK/obv//SXN63avvwOb8EzsLWKrIeN8aPW2Hu7bKuCuQ2Y9RoxuIgBfADgkhHhP7+NbxeEzlxQ9L/fSdTzx9S7Vjvv60hRsSc1z21ZWJvB/8/eGNMIqSqc+Zs9quN8n8b0MAPBcNvqJgW1D3mf1GPWuMW9oUlu1fTF1GdHd1B/AQwBuJ6Jk+d8IA+IICx+tO4aU7Aq9dcp4OW9/sTkNf5jpfovodP41LNiVGVIyijB6Cq3NvTyis9vXvw/TdU3U8ECflkaHYCpGjG7aLIQgIUQ3IUR3+d9yvePQgqM4YJkK/f6VmTBnN15csNfnvI5g1peYstB9lbbC4lI8/e0uZMj3dk7nX8OFK0WK9uW4if/B2mNYui+rwoz1TzccxyIv6w0XFpdiwre7kXnhKkrLBJ6fl+z83qI9p/HpBmno6sXCYjzxdRLyLl/n+Q4+pE9PdD6OrRHjNirNdfQOc9c2rqbRIZiKve/IqGxn+gUAcI520tKyfdkAgLtVrJE0d4f78qqbj+Vh+f4zbgsJzdrivb/fU2mZQFQkOZeM7Nmyrtv3/7FS6sv+bQ/30c8bjuRg2f5sFJeWYfKdnbDQJZFMkhPGU4PaYt6ODPx88Cxa1KuOW9ubc3y5mXRoWAstY6s7f++MKcVJQmUv/7gfP+lYBTWkThwBrE45i8f/l4Tm9ap5+7b0fxBDez1vlO4+le/1eUv2ZiE97woiIwjzdmY4k8mqlLNYpWAC4aqUs5i5WVnisrOICFL1HoLaoiLN0x1pnkjMwbyfGouas127oa9aeFyuaeRvZbmgamAJASV/bhPnuo84OhXgsOVAn293Xz3a29l9aCYfP9ATX/2azjewTYiThMWFsnZDWp7v2kvJGfl4Qb7HcsKlRpPSq6zOf1uJDX8ZHHRs/kz76SAOZgV5s97mbusQ3JwjrbWIrY6pd91gdBjMC757ZWN/X+q7XNa42TtwsbDE5/crU1wq8I5G/d+zt6RjR9p5TfbNlHn9tzfqcpybW9VD49pV8c7vuulyPEC/CrCv3W2NpMhJgnnla/2MMiGNclJicTKvUBeuHtJpUawfnuqHbS8P0XXIrl4VaMf1b63LcULFSYIFJPOC+fqzzaJJnaqVP0lnn/yhJ/5zf3ev35s+uitmj+vl/DquVhUseLJvpft0PYf+79HezsqtC57si6XP3hpSvGbgraDfpKH2XYCI70mwgCzi1oFPPVvVcw5NNosRXZv4/N79vd0njd15Y2MkxMeianQECot919pyrSg7sEMc9r12R+iBmkjV6MgK2/q1bYD31yhfqz6ccJJgzOb+ckdHrDiQjReGdQRQ+boo88b7b21ERhBKdZhQamVTE6UZ8HMe74PUnMuVPNtY3N3EmEqMXk0tWBMGt8PSZwegTvVot+2eXUfTR3dF+vTECs/z9IhcfDKulroLSxnJUVVGrZ+paV1pXlK/tg3wcIDFOvXGSYIxFTw+oHWFWeVGULOQYpu4GnhqUFv0kRfDUZoDJw1rj0f6tsKqSQPxcF/1b3D/sX+81+0fPdBD8T6eGuRe4NDXPh16tqyHxwe0xuIJ/Z3bJt8Z/PKtGqzgqxlOEox5CKZB8EriDYpaEh+OVX4iC8afh3UIeR8xcslYAuGl4Z3QJk5ah0JpS6l21WhMG3Uj6tWIwd9HqT9U9uZW9bxuv6ube4kafwMJOjQqr890e6eGGFBJaZeICMIriTc4WwAA8ORtwVfStVKjk5MEYx4a11Y2Sslz5TQlf/g3NK04o7h3fKyq8wAWT+iPl0cEf5X7w9P98PywDqgWI93AfWl4J4zrF49R3TVZZVgxR4vmji6N8f6Y7vjvwwl4fIA0jHS0XAMsxrMmug+JXcsTyi1tYjGwfRzqVIvGu/fdpFq8b4/u6nX7o/1bu62hbXZ845oxD77O9SueG4Bn5uzGcXkGeodGtRSv++HP90/2RWmZwIsL9lX+5EpERRBualEXN7WoG/Q+OjSqhQ4uI5jqVo/BayO7hBxbqOY9UX7D3FEYctgNjfBKYvmktIEdGmDNoRwAQOM6VVFwrRhXvSz9GxMVgfED22DGphOIiohAVGQE9r76G1XjHdu7pdfFx/5mkUl0DmHdkrjzxsZGh8AsyNfyrJ0a18JfXUpHjB8orX3cqLbym5m++qIjIwgJcjdKfP3qivfnqmVsddPfBNWa4/fbvF41vHVPVyx8up/ze/+UW2uO+lDP3t4O4/rFq7J+RN1KbuYD0n2rf49Rr6Wil7BOEo1NOLmJWUtTl88QESG2RgwA4MZmtVGnmnRicKyB7EgtD97i+6Tjr6LuhMHtAChfi7p3fKzb16+NvMHZRWRXjmqyfx/VBZ2b1EanxuXde/cltED69EQsf24AAKBW1Wi8NrKL13kRSjnGCcRWj6n0ua8k3oB7ejQP+lhGCevuJiuNIGDmMn10V0xeuB+lHh8icumMal6vGibe3g6je7r/4Qsh1Tbq0LAm0vKuYLJLl4Pr3qYmdnareioCrLcrIPCPe7vipR9CW089nLx5T1e0qFcdA9uXFzJ87/c3oZ6Ck7hSMx9OwJUiqa6ZYzrI5w/djIUui2I57kfMf7Iv7vtsq2rHNkJYJwme0MOCdXvnhgCAqAj3xrajJyo6MgJEhOd/09H5vSj5pmlUBDlrG/VpU98tSTjc2Kw2/jSgjddjKx34EhURgTG9WmLtoRysSjnrs5vM7DxbRKFoULNKhWqynkk8VEO93HRuEVsdLw3v5EwSY+XZ7L1U/NmMEtZJoqPH6JNQje7RzG2lNBa+4mpWwaSh7XH3TU1x5XqJs+psl6a18dSgtnjQS4G70T2b4ejZS5g01Pcw1PYNa2LC4La4v1dg/eCf/qEnYmvE4MN1qTiUfRHDb2yMZ2+X6gm9eU9XtKrvfvVsJZUNP7UCf/NTXhnRGbe0qa9jNOoK6yTx4C2tMHXRAdX293JiZ9WTxE3N62BvZoGq+2ShIyK3k3235nWd218a7n14aZWoSLx6t/9RQESEv9zh/fWOeQhRXoZx3inXYOrj5WQTV6uK2wgfK/jdzc2xYFcmACA6DNbbjvSTJB4f6L3FaBVhnSTUVqZB91WtqpWPimD2MKB9HMYPbIPHB7TBhMHtsPvkBVSNjnROZrOqeeNvwc708/h620mcvSit/z60cyPEREWgsKgU4/rFGxugCqza1acEJ4kAaHGLI9pEa/syY0VGEF4eIRV+i6tVBd1DmOtgJn3a1EefNvXxzO3tET95GQDpc//WPd4nmzFzsX47TyO1qlbMn6VCOIdAquUtH7MygzG6pzEzYt8f092Q46rl6UFtnWPoASBC5b+K5+VSGa+PMn5CmlkM6tjQ6BBUsfTZW50VXcOV7ZLEkE7KPpzexk4LIZDopz5/MKpEqTeuXe3YlHLMfrWqF4d3wtDO5SNW1K7mOnFIe6RPT8RDNp/o5spfH76V3Nisjs9RauHCVkmiU+NaePverhUqPnrOghzauSHmPt7Hbdukoe3RrG41/N8d5UMeB3UMbTSJr9ouSs146Ga3r6uFMCkoWPfKwwu/fqy37sfWSnicvsxp9rhemhc5NNL/Hu1t+Za1J1sliRub1UHDWlXx6t1dnF0z//xdtwqzIJ+4rS3aNXQfPjtpaAcQkXOWLQB8+cfQTox339S08if58ZsujTHFtVwxSTcJ9dQiVqqKOcCiwy8dXBsPVl0XwgoGd2oY8ufezAZ2iLN8y9qTbZLEEwPbuPUdOkYqOZq9Hz/QE2+P7oqnB7XFzS2lGjrfPNan4o48fP1Yb5+li4Hy+j6e/nJHR9SsEoWyIKaFTxvZxXnl/ojLyBACIcJPM95bLINDbA25+tGlTo6egjmnTxvZBff3aoH58prOdavH4B75j5tzBGPlbJMkpozojLouU/Oj5bHojv8TuzXB2N4t8eLwTs4T7a0KJvkMaB+H6X66jVzXA3Zo17Cms07P9RLfawkDQHWXWjxVoiKQPj0Rj/SLd165V42OxMAO0uPICPI5qWdU96Z4eURnpE9PxDSXip7eruqm+FhMpU61aBx+fbjbtmiXMf09WtZDqyCL04Ui7e1EHH9rRIXtVaN9f7wf6ReP6fd2c5sR63hPQqnlw1i4CfshsJ/+oafXyUmvJHZGbM2YSivFLp7QH/sy8/0+p13Dmpg4pD2O51xGxoWrKBMCB05fBCBNFHppeCc0ql0FlwpLkH+12HnFCkgF5CYNbY/RPZpj6uID2HQ0Fwuf7oeYyAjsSDuPAe0bYNi/NwEAfvJYTtLh3d91w1db051VRJ+9vR1+n9ACX287idgaMViTchavuUzyGtOrBV5dchCAVGeoXcOaSM25jAiS7jH8aUAbLNmbhYNZ0s+w7oXbsCrlLIZ2boiq0ZF4aXgnDGjfAIuTT+PR/q3dYvnqj73x+abjmLsjw+/vzFWf1rH4bY9mXssq+/Jo/9aYtSXN+XVkBGHKnZ3w9orDmHJnJ+Rdvo5rxaX4ZtspVI2OwBeP9EJyRj7++fMRn/tsG1cDzw1pj/sSrFeEjTGtkL+qlGaRkJAgkpKSjA7DyTHWO316os/nPDNnN5buy8YHY3tgZIh9sEqOF6gpC/dh7o4MvD+mO778NR3JGflY8dwAdHYpOBfKceMnL0NMVASWPNMfw9//BTe1qIu9GfmoHhOJEV2bOGfbAsBzQ9rjz8M6OI9XGUc8lcX31vJDmLHpBF4f1cU5skiL3yVjZkVEu4QQCaHsI+xbElpYNvFWbD9xXrfjLX32ViSlq3u8yXd2Ru2q0Ujs1gS9W8fiu50ZFVZa+9tdN6BPm+AKlL0yojMGdohDh0Y18dyQ9vh9rxZYti8Lgzs2RFytKqhfMwa1qkRh24nzeOI26V7Jgif7YsORXHy0PhUA8OLwjii4WowOjWrhhfl7AQCfPVg+ouute7qiUxPf9bkmDpFqG/2+VwvntqmJndG/nfVrBTGmF0NaEkQ0HMB/AEQCmCmEmO7v+WZrSSjxwvd78cPuTHz8QE8kdjNm/oJV8dU+Y+qwZEuCiCIBfAxgGIBMADuJaIkQIkXvWLT017s6I65WFdzRxTpr2ZrFiucG4Nfj54wOgzEGY7qbegNIFUKcAAAi+g7AKABhlSTqVo/BZB+jhJh/nZvUdrs3whgzjhFDYJsBcB36kilvc0NE44koiYiScnNzdQuOMcZYOSOShLeB/BVujAghZgghEoQQCXFx1p7NyxhjVmVEksgE0MLl6+YAsgyIgzHGWCWMSBI7AbQnotZEFAPgfgBLDIiDMcZYJXS/cS2EKCGiZwD8DGkI7CwhxEG942CMMVY5QybTCSGWA1huxLEZY4wpZ5sCf4wxxgLHSYIxxphPlijwR0S5AE4G+fIGAPJUDEcPVowZsGbcVowZsGbcHLN+HHG3EkKENIfAEkkiFESUFGrtEr1ZMWbAmnFbMWbAmnFzzPpRM27ubmKMMeYTJwnGGGM+2SFJzDA6gCBYMWbAmnFbMWbAmnFzzPpRLe6wvyfBGGMseHZoSTDGGAsSJwnGGGM+hXWSIKLhRHSEiFKJaLLBscwiohwiOuCyLZaIVhPRMfn/ei7fmyLHfYSI7nDZfjMR7Ze/9wEReSu9rlbMLYhoPREdIqKDRPSc2eMmoqpEtIOI9soxTzN7zC7HiySiPUS01EIxp8vHSyaiJCvETUR1iWgBER2WP9t9LRBzR/l37Ph3kYgm6RK3ECIs/0EqHngcQBsAMQD2ArjBwHgGAugJ4IDLtncATJYfTwbwD/nxDXK8VQC0ln+OSPl7OwD0hbQuxwoAd2oYcxMAPeXHtQAclWMzbdzy/mvKj6MBbAdwi5ljdon9eQBzACy1wudDPl46gAYe20wdN4CvAPxJfhwDoK7ZY/aIPxLAGQCt9Ihb8x/IqH/yL+Fnl6+nAJhicEzxcE8SRwA0kR83AXDEW6yQKub2lZ9z2GX7WACf6xj/Ykhrk1sibgDVAewG0MfsMUNaV2UtgNtRniRMHbN8jHRUTBKmjRtAbQBpkAftWCFmLz/DbwBs0SvucO5uUrRMqsEaCSGyAUD+v6G83VfszeTHnts1R0TxAHpAujI3ddxyt00ygBwAq4UQpo8ZwPsAXgRQ5rLN7DED0qqSq4hoFxGNl7eZOe42AHIBzJa79mYSUQ2Tx+zpfgBz5ceaxx3OSULRMqkm5St2Q34mIqoJ4AcAk4QQF/091cs23eMWQpQKIbpDujrvTUQ3+nm64TET0V0AcoQQu5S+xMs2oz4f/YUQPQHcCWACEQ3081wzxB0Fqdv3UyFEDwBXIHXT+GKGmJ1IWqhtJID5lT3Vy7ag4g7nJGGFZVLPElETAJD/z5G3+4o9U37suV0zRBQNKUF8K4RYaJW4AUAIkQ9gA4DhMHfM/QGMJKJ0AN8BuJ2IvjF5zAAAIUSW/H8OgB8B9DZ53JkAMuXWJQAsgJQ0zByzqzsB7BZCnJW/1jzucE4SVlgmdQmAR+THj0Dq83dsv5+IqhBRawDtAeyQm5OXiOgWeUTCwy6vUZ18jC8AHBJCvGeFuIkojojqyo+rARgK4LCZYxZCTBFCNBdCxEP6nK4TQjxo5pgBgIhqEFEtx2NIfeUHzBy3EOIMgAwi6ihvGgIgxcwxexiL8q4mR3zaxq3HjRaj/gEYAWlEznEArxgcy1wA2QCKIWXzxwDUh3Sz8pj8f6zL81+R4z4Cl9EHABIg/SEeB/ARPG7AqRzzrZCaovsAJMv/Rpg5bgDdAOyRYz4A4G/ydtPG7BH/IJTfuDZ1zJD69/fK/w46/sYsEHd3AEnyZ2QRgHpmj1k+XnUA5wDUcdmmedxcloMxxphP4dzdxBhjLEScJBhjjPnESYIxxphPnCQYY4z5xEmCMcaYT5wkWFgjolKP6pl+qwET0ZNE9LAKx00nogZBvO4OInqNiOoR0fJQ42AsVFFGB8CYxq4JqUSHIkKIzzSMRYkBANZDqhq8xeBYGOMkwexJLoExD8BgedMDQohUInoNwGUhxLtENBHAkwBKAKQIIe4nolgAsyBNJLsKYLwQYh8R1Yc0YTIOUilmcjnWgwAmQipLvR3A00KIUo94xkCq3NkGwCgAjQBcJKI+QoiRWvwOGFOCu5tYuKvm0d00xuV7F4UQvSHNOn3fy2snA+ghhOgGKVkAwDQAe+RtLwP4n7z9VQCbhVQ0bgmAlgBARJ0BjIFUCK87gFIAf/A8kBBiHsrXG+kKaUZsD04QzGjckmDhzl9301yX///t5fv7AHxLRIsglW8ApFIl9wKAEGIdEdUnojqQuodGy9uXEdEF+flDANwMYKe8AFg1lBdh89QeUqkEAKguhLhU2Q/HmNY4STA7Ez4eOyRCOvmPBPBXIuoC/6WWve2DAHwlhJjiLxCSlv5sACCKiFIANJHXxHhWCPGL35+CMQ1xdxOzszEu/291/QYRRQBoIYRYD2kxoLoAagLYBLm7iIgGAcgT0hobrtvvhFQ0DpCKrv2OiBrK34slolaegQghEgAsg3Q/4h1IxfK6c4JgRuOWBAt31eQrcoeVQgjHMNgqRLQd0sXSWI/XRQL4Ru5KIgD/FkLkyze2ZxPRPkg3rh1lmqcBmEtEuwFsBHAKAIQQKUQ0FdLqbRGQqgBPAHDSS6w9Id3gfhrAe16+z5juuAossyV5dFOCECLP6FgYMzPubmKMMeYTtyQYY4z5xC0JxhhjPnGSYIwx5hMnCcYYYz5xkmCMMeYTJwnGGGM+/T/L2Gn3hHrqlwAAAABJRU5ErkJggg==\n" + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "def plot_scores():\n", + " scores = np.vstack(ddpg_agents_tester.scores)\n", + " scores = np.max(scores, 1)\n", + " fig = plt.figure()\n", + " ax = fig.add_subplot(111)\n", + " plt.plot(np.arange(1, len(scores) + 1), scores)\n", + " plt.ylabel('Score')\n", + " plt.xlabel('Episode #')\n", + " plt.show()\n", + "\n", + "plot_scores()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5. Watch a Smart Agent" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "test_env = ParallelUnityEnvironment(num_envs=NUM_ENVS, seeds=list(range(NUM_ENVS)),\n", + " file_name=ENV_FILE_NAME, no_graphics=False)\n", + "test_env.set_timescale(1.0)\n", + "test_env.set_display_size(width=DISPLAY_SIZE[0], height=DISPLAY_SIZE[1])\n", + "ddpg_agents_tester.env = test_env\n", + "ddpg_agents.load_checkpoint(filename=CHECKPOINT_FILENAME)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Score from environment 0, episode 0: 20.00\n", + "Score from environment 1, episode 1: 20.00\n", + "Score from environment 2, episode 2: 20.00\n", + "Score from environment 3, episode 3: 20.00\n", + "Score from environment 0, episode 4: 20.00\n", + "Score from environment 1, episode 5: 20.00\n", + "Score from environment 2, episode 6: 20.00\n", + "Score from environment 3, episode 7: 20.00\n", + "Score from environment 0, episode 8: 20.00\n", + "Score from environment 1, episode 9: 20.00\n", + "Score from environment 2, episode 10: 20.00\n", + "Score from environment 3, episode 11: 20.00\n" + ] + } + ], + "source": [ + "ddpg_agents_tester.test_agents(n_episodes=10, max_t=200)" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "When finished, you can close the environments:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "train_env.close()\n", + "test_env.close()" + ] + } + ], + "metadata": { + "kernelspec": { + "name": "rl39", + "language": "python", + "display_name": "rl39" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/notebooks/README.md b/notebooks/README.md new file mode 100644 index 0000000..55094ff --- /dev/null +++ b/notebooks/README.md @@ -0,0 +1,7 @@ +# Unity Executables: + +Here are some download links for the Unity Executables: + +- [3DBall Windows 64](https://www.dropbox.com/s/hydogwlr9ok0nbb/3DBall_Windows_x86_64.zip?dl=1) + +If you're not on Windows, you have to [create the executable](../README.md#creating-a-custom-unity-executable) yourself. Btw. you can help this project by sharing it here. \ No newline at end of file diff --git a/notebooks/checkpoint-3dball-parallel.pth b/notebooks/checkpoint-3dball-parallel.pth new file mode 100644 index 0000000..a56f398 Binary files /dev/null and b/notebooks/checkpoint-3dball-parallel.pth differ diff --git a/notebooks/checkpoint-3dball.pth b/notebooks/checkpoint-3dball.pth new file mode 100644 index 0000000..81aeabe Binary files /dev/null and b/notebooks/checkpoint-3dball.pth differ diff --git a/parallel_unity_environment.py b/parallel_unity_environment.py new file mode 100644 index 0000000..45e3bf7 --- /dev/null +++ b/parallel_unity_environment.py @@ -0,0 +1,95 @@ +from my_unity_environment import MyUnityEnvironment +import numpy as np +import concurrent +from concurrent.futures import ThreadPoolExecutor, Future +from typing import Tuple, List, Optional, Any + + +class ParallelUnityEnvironment: + def __init__(self, num_envs: int, seeds: List[int], file_name=None, no_graphics=False, worked_id_start=0): + """ + :param num_envs: number of environments to run in parallel + :param seeds: a list of random seeds for each environment + :param file_name: The filename of the Unity executable, or None when using the Unity editor + (press Play to connect). + :param no_graphics: Whether to use graphics windows or not. + :param worked_id_start: The id of the first Unity thread to create. + For example, a value of 4 would create threads with ids: 4, 5, 6 etc. + """ + if len(seeds) != num_envs: + raise ValueError() + + def _init_env(_file_name, _no_graphics, _seed, _worker_id): + return MyUnityEnvironment(file_name=_file_name, no_graphics=_no_graphics, seed=_seed, worker_id=_worker_id) + + self.num_envs = num_envs + self.executor = ThreadPoolExecutor(max_workers=num_envs + 2, thread_name_prefix="Unity_") + self.futures: List[Future[Any]] = [ + self.executor.submit(_init_env, file_name, no_graphics, seed, worker_id) + for seed, worker_id in zip(seeds, range(worked_id_start, worked_id_start + num_envs))] + self.envs: List[MyUnityEnvironment] = [future.result() for future in self.futures] + self.behavior_names = self.envs[0].behavior_names + self.behavior_specs = self.envs[0].behavior_specs + self.num_agents_list = self.envs[0].num_agents_list + + def set_timescale(self, time_scale: float): + """ Set the timescale at which the physics simulation runs. + + :param time_scale: a value of 1.0 means the simulation runs in realtime. + """ + for env in self.envs: + env.set_timescale(time_scale=time_scale) + + def set_display_size(self, width: int, height: int): + for env in self.envs: + env.set_display_size(width=width, height=height) + + def reset(self, reset_list: List[bool]): + """Resets all environments where reset_list[env_index] == True """ + + def _reset(env: MyUnityEnvironment): + env.reset() + + for env_index, reset in enumerate(reset_list): + if reset: + self.futures[env_index] = self.executor.submit(_reset, self.envs[env_index]) + concurrent.futures.wait(self.futures) + + def get_observations(self, behavior_index: int, env_index: int): + """ Get observations for each environment. + + :return: np.ndarray[num_agents, observation_size]""" + + return self.envs[env_index].get_observations(behavior_index) + + def set_actions(self, behavior_index: int, env_index: int, continuous: Optional[np.ndarray] = None, + discrete: Optional[np.ndarray] = None): + self.envs[env_index].set_actions(behavior_index, continuous, discrete) + + def get_experiences(self, behavior_index: int, env_index: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ Get experiences for environment %env_index with behavior %behavior_index. + + :param behavior_index: + :param env_index: + :return: Tuple of (observations, rewards, dones). Each element is ndarray[num_agents, *] + """ + return self.envs[env_index].get_experiences(behavior_index) + + def step(self): + """ Step forward in all environments.""" + + def _step(myenv: MyUnityEnvironment): + myenv.step() + + for env_index, env in enumerate(self.envs): + self.futures[env_index] = self.executor.submit(_step, env) + concurrent.futures.wait(self.futures) + + def close(self): + def _close(_env: MyUnityEnvironment): + _env.close() + + for env_index, env in enumerate(self.envs): + self.futures[env_index] = self.executor.submit(_close, env) + concurrent.futures.wait(self.futures) + self.executor.shutdown() diff --git a/plot.png b/plot.png new file mode 100644 index 0000000..62f2a36 Binary files /dev/null and b/plot.png differ diff --git a/pytorch_device.py b/pytorch_device.py new file mode 100644 index 0000000..ecd895c --- /dev/null +++ b/pytorch_device.py @@ -0,0 +1,3 @@ +import torch + +pytorch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu") diff --git a/replay_buffer.py b/replay_buffer.py new file mode 100644 index 0000000..6d31f6e --- /dev/null +++ b/replay_buffer.py @@ -0,0 +1,31 @@ +from utilities import convert_to_tensor +import numpy as np +import torch +from collections import deque +import random +from typing import Tuple, Deque, Union + + +class ReplayBuffer: + """Fixed-size buffer to store experience tuples.""" + + def __init__(self, size: int): + """Initialize a ReplayBuffer object. + + :param size: maximum size of buffer + """ + self.deque: Deque[Tuple[np.ndarray, ...]] = deque(maxlen=size) + + def add(self, sample: Tuple[np.ndarray, ...]): + """Add a new sample to the buffer.""" + self.deque.append(sample) + + def sample(self, batch_size: int) -> Tuple[torch.Tensor, ...]: + """Randomly sample a batch of samples from the buffer.""" + samples = random.sample(self.deque, k=batch_size) + samples_transposed = tuple(zip(*samples)) + return tuple(convert_to_tensor(np.stack(np_array_list)) for np_array_list in samples_transposed) + + def __len__(self): + """Return the current size of the buffer.""" + return len(self.deque) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..a3eb1d9 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +numpy +torch +matplotlib diff --git a/utilities.py b/utilities.py new file mode 100644 index 0000000..75a244c --- /dev/null +++ b/utilities.py @@ -0,0 +1,21 @@ +from pytorch_device import pytorch_device +import torch +import numpy as np + + +def convert_to_tensor(x) -> torch.Tensor: + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float().to(pytorch_device) + elif isinstance(x, torch.Tensor): + return x + else: + return torch.tensor(x).float().to(pytorch_device) + + +def convert_to_numpy(x) -> np.ndarray: + if isinstance(x, torch.Tensor): + return x.detach().cpu().numpy() + elif isinstance(x, np.ndarray): + return x + else: + return np.array(x)