1
+ import logging
1
2
import gym
2
3
import numpy as np
3
4
from mlagents .envs import UnityEnvironment
4
- from gym import error , spaces , logger
5
+ from gym import error , spaces
5
6
6
7
7
8
class UnityGymException (error .Error ):
@@ -11,6 +12,10 @@ class UnityGymException(error.Error):
11
12
pass
12
13
13
14
15
+ logging .basicConfig (level = logging .INFO )
16
+ logger = logging .getLogger ("gym_unity" )
17
+
18
+
14
19
class UnityEnv (gym .Env ):
15
20
"""
16
21
Provides Gym wrapper for Unity Learning Environments.
@@ -44,7 +49,11 @@ def __init__(self, environment_filename: str, worker_id=0, use_visual=False, mul
44
49
if use_visual and brain .number_visual_observations == 0 :
45
50
raise UnityGymException ("`use_visual` was set to True, however there are no"
46
51
" visual observations as part of this environment." )
47
- self .use_visual = brain .number_visual_observations == 1 and use_visual
52
+ self .use_visual = brain .number_visual_observations >= 1 and use_visual
53
+
54
+ if brain .number_visual_observations > 1 :
55
+ logger .warning ("The environment contains more than one visual observation. "
56
+ "Please note that only the first will be provided in the observation." )
48
57
49
58
if brain .num_stacked_vector_observations != 1 :
50
59
raise UnityGymException (
@@ -114,7 +123,8 @@ def step(self, action):
114
123
if not isinstance (action , list ):
115
124
raise UnityGymException ("The environment was expecting `action` to be a list." )
116
125
if len (action ) != self ._n_agents :
117
- raise UnityGymException ("The environment was expecting a list of {} actions." .format (self ._n_agents ))
126
+ raise UnityGymException (
127
+ "The environment was expecting a list of {} actions." .format (self ._n_agents ))
118
128
else :
119
129
action = np .array (action )
120
130
@@ -136,17 +146,19 @@ def _single_step(self, info):
136
146
else :
137
147
default_observation = info .vector_observations [0 , :]
138
148
139
- return default_observation , info .rewards [0 ], info .local_done [0 ], {"text_observation" : info .text_observations [0 ],
140
- "brain_info" : info }
149
+ return default_observation , info .rewards [0 ], info .local_done [0 ], {
150
+ "text_observation" : info .text_observations [0 ],
151
+ "brain_info" : info }
141
152
142
153
def _multi_step (self , info ):
143
154
if self .use_visual :
144
155
self .visual_obs = info .visual_observations
145
156
default_observation = self .visual_obs
146
157
else :
147
158
default_observation = info .vector_observations
148
- return list (default_observation ), info .rewards , info .local_done , {"text_observation" : info .text_observations ,
149
- "brain_info" : info }
159
+ return list (default_observation ), info .rewards , info .local_done , {
160
+ "text_observation" : info .text_observations ,
161
+ "brain_info" : info }
150
162
151
163
def render (self , mode = 'rgb_array' ):
152
164
return self .visual_obs
@@ -170,11 +182,13 @@ def seed(self, seed=None):
170
182
171
183
def _check_agents (self , n_agents ):
172
184
if not self ._multiagent and n_agents > 1 :
173
- raise UnityGymException ("The environment was launched as a single-agent environment, however"
174
- "there is more than one agent in the scene." )
185
+ raise UnityGymException (
186
+ "The environment was launched as a single-agent environment, however"
187
+ "there is more than one agent in the scene." )
175
188
elif self ._multiagent and n_agents <= 1 :
176
- raise UnityGymException ("The environment was launched as a mutli-agent environment, however"
177
- "there is only one agent in the scene." )
189
+ raise UnityGymException (
190
+ "The environment was launched as a mutli-agent environment, however"
191
+ "there is only one agent in the scene." )
178
192
if self ._n_agents is None :
179
193
self ._n_agents = n_agents
180
194
logger .info ("{} agents within environment." .format (n_agents ))
0 commit comments