Skip to content

[BugFix] Temporarily fix gym to 0.25.1 to fix CI (#411) #1

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Sep 9, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .circleci/unittest/linux/scripts/environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ dependencies:
- hypothesis
- future
- cloudpickle
- gym
- gym==0.25.1
- pygame
- gym[accept-rom-license]
- gym[atari]
Expand Down
2 changes: 1 addition & 1 deletion .circleci/unittest/linux_stable/scripts/environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ dependencies:
- hypothesis
- future
- cloudpickle
- gym
- gym==0.25.1
- pygame
- gym[accept-rom-license]
- gym[atari]
Expand Down
111 changes: 111 additions & 0 deletions test/test_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
PinMemoryTransform,
CenterCrop,
UnsqueezeTransform,
SqueezeTransform,
)

TIMEOUT = 10.0
Expand Down Expand Up @@ -443,6 +444,116 @@ def test_unsqueeze(self, keys, size, nchannels, batch, device, unsqueeze_dim):
for key in keys:
assert observation_spec[key].shape == expected_size

@pytest.mark.parametrize("unsqueeze_dim", [1, -2])
@pytest.mark.parametrize("nchannels", [1, 3])
@pytest.mark.parametrize("batch", [[], [2], [2, 4]])
@pytest.mark.parametrize("size", [[], [4]])
@pytest.mark.parametrize(
"keys", [["next_observation", "some_other_key"], ["next_observation_pixels"]]
)
@pytest.mark.parametrize("device", get_available_devices())
@pytest.mark.parametrize(
"keys_inv", [[], ["action", "some_other_key"], ["next_observation_pixels"]]
)
def test_unsqueeze_inv(
self, keys, keys_inv, size, nchannels, batch, device, unsqueeze_dim
):
torch.manual_seed(0)
keys_total = set(keys + keys_inv)
unsqueeze = UnsqueezeTransform(
unsqueeze_dim, keys_in=keys, keys_inv_in=keys_inv
)
td = TensorDict(
{
key: torch.randn(*batch, *size, nchannels, 16, 16, device=device)
for key in keys_total
},
batch,
)

unsqueeze.inv(td)

expected_size = [*size, nchannels, 16, 16]
for key in keys_total.difference(keys_inv):
assert td.get(key).shape[len(batch) :] == torch.Size(expected_size)

if expected_size[unsqueeze_dim] == 1:
del expected_size[unsqueeze_dim]
for key in keys_inv:
assert td.get(key).shape[len(batch) :] == torch.Size(expected_size)

@pytest.mark.parametrize("squeeze_dim", [1, -2])
@pytest.mark.parametrize("nchannels", [1, 3])
@pytest.mark.parametrize("batch", [[], [2], [2, 4]])
@pytest.mark.parametrize("size", [[], [4]])
@pytest.mark.parametrize(
"keys", [["next_observation", "some_other_key"], ["next_observation_pixels"]]
)
@pytest.mark.parametrize("device", get_available_devices())
@pytest.mark.parametrize(
"keys_inv", [[], ["action", "some_other_key"], ["next_observation_pixels"]]
)
def test_squeeze(self, keys, keys_inv, size, nchannels, batch, device, squeeze_dim):
torch.manual_seed(0)
keys_total = set(keys + keys_inv)
squeeze = SqueezeTransform(squeeze_dim, keys_in=keys, keys_inv_in=keys_inv)
td = TensorDict(
{
key: torch.randn(*batch, *size, nchannels, 16, 16, device=device)
for key in keys_total
},
batch,
)
squeeze(td)

expected_size = [*size, nchannels, 16, 16]
for key in keys_total.difference(keys):
assert td.get(key).shape[len(batch) :] == torch.Size(expected_size)

if expected_size[squeeze_dim] == 1:
del expected_size[squeeze_dim]
for key in keys:
assert td.get(key).shape[len(batch) :] == torch.Size(expected_size)

@pytest.mark.parametrize("squeeze_dim", [1, -2])
@pytest.mark.parametrize("nchannels", [1, 3])
@pytest.mark.parametrize("batch", [[], [2], [2, 4]])
@pytest.mark.parametrize("size", [[], [4]])
@pytest.mark.parametrize(
"keys", [["next_observation", "some_other_key"], ["next_observation_pixels"]]
)
@pytest.mark.parametrize("device", get_available_devices())
@pytest.mark.parametrize(
"keys_inv", [[], ["action", "some_other_key"], ["next_observation_pixels"]]
)
def test_squeeze_inv(
self, keys, keys_inv, size, nchannels, batch, device, squeeze_dim
):
torch.manual_seed(0)
keys_total = set(keys + keys_inv)
squeeze = SqueezeTransform(squeeze_dim, keys_in=keys, keys_inv_in=keys_inv)
td = TensorDict(
{
key: torch.randn(*batch, *size, nchannels, 16, 16, device=device)
for key in keys_total
},
batch,
)
squeeze.inv(td)

expected_size = [*size, nchannels, 16, 16]
for key in keys_total.difference(keys_inv):
assert td.get(key).shape[len(batch) :] == torch.Size(expected_size)

if squeeze_dim < 0:
expected_size.insert(len(expected_size) + squeeze_dim + 1, 1)
else:
expected_size.insert(squeeze_dim, 1)
expected_size = torch.Size(expected_size)

for key in keys_inv:
assert td.get(key).shape[len(batch) :] == torch.Size(expected_size)

@pytest.mark.skipif(not _has_tv, reason="no torchvision")
@pytest.mark.parametrize(
"keys", [["next_observation", "some_other_key"], ["next_observation_pixels"]]
Expand Down
104 changes: 91 additions & 13 deletions torchrl/envs/transforms/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -1032,12 +1032,13 @@ def __repr__(self) -> str:


class UnsqueezeTransform(Transform):
"""Flatten adjacent dimensions of a tensor.
"""Inserts a dimension of size one at the specified position.

Args:
unsqueeze_dim (int): dimension to unsqueeze.
"""

invertible = True
inplace = False

@classmethod
Expand All @@ -1050,6 +1051,8 @@ def __init__(
unsqueeze_dim: int,
keys_in: Optional[Sequence[str]] = None,
keys_out: Optional[Sequence[str]] = None,
keys_inv_in: Optional[Sequence[str]] = None,
keys_inv_out: Optional[Sequence[str]] = None,
):
if not _has_tv:
raise ImportError(
Expand All @@ -1059,7 +1062,12 @@ def __init__(
)
if keys_in is None:
keys_in = IMAGE_KEYS # default
super().__init__(keys_in=keys_in, keys_out=keys_out)
super().__init__(
keys_in=keys_in,
keys_out=keys_out,
keys_inv_in=keys_inv_in,
keys_inv_out=keys_inv_out,
)
self._unsqueeze_dim_orig = unsqueeze_dim

def set_parent(self, parent: Union[Transform, EnvBase]) -> None:
Expand All @@ -1086,22 +1094,92 @@ def _apply_transform(self, observation: torch.Tensor) -> torch.Tensor:
observation = observation.unsqueeze(self.unsqueeze_dim)
return observation

def inv(self, tensordict: TensorDictBase) -> TensorDictBase:
if self._unsqueeze_dim_orig >= 0:
self._unsqueeze_dim = self._unsqueeze_dim_orig + tensordict.ndimension()
return super().inv(tensordict)

def _inv_apply_transform(self, observation: torch.Tensor) -> torch.Tensor:
observation = observation.squeeze(self.unsqueeze_dim)
return observation

def _transform_spec(self, spec: TensorSpec) -> None:
if isinstance(spec, CompositeSpec):
for key in spec:
self._transform_spec(spec[key])
else:
self._unsqueeze_dim = self._unsqueeze_dim_orig
space = spec.space
if isinstance(space, ContinuousBox):
space.minimum = self._apply_transform(space.minimum)
space.maximum = self._apply_transform(space.maximum)
spec.shape = space.minimum.shape
else:
spec.shape = self._apply_transform(torch.zeros(spec.shape)).shape

def transform_action_spec(self, action_spec: TensorSpec) -> TensorSpec:
if "action" in self.keys_inv_in:
self._transform_spec(action_spec)
return action_spec

def transform_input_spec(self, input_spec: TensorSpec) -> TensorSpec:
for key in self.keys_inv_in:
self._transform_spec(input_spec[key])
return input_spec

def transform_reward_spec(self, reward_spec: TensorSpec) -> TensorSpec:
if "reward" in self.keys_in:
self._transform_spec(reward_spec)
return reward_spec

@_apply_to_composite
def transform_observation_spec(self, observation_spec: TensorSpec) -> TensorSpec:
self._unsqueeze_dim = self._unsqueeze_dim_orig
space = observation_spec.space
if isinstance(space, ContinuousBox):
space.minimum = self._apply_transform(space.minimum)
space.maximum = self._apply_transform(space.maximum)
observation_spec.shape = space.minimum.shape
else:
observation_spec.shape = self._apply_transform(
torch.zeros(observation_spec.shape)
).shape
self._transform_spec(observation_spec)
return observation_spec

def __repr__(self) -> str:
return f"{self.__class__.__name__}(unsqueeze_dim={int(self.unsqueeze_dim)})"
s = (
f"{self.__class__.__name__}(keys_in={self.keys_in}, keys_out={self.keys_out},"
f" keys_inv_in={self.keys_inv_in}, keys_inv_out={self.keys_inv_out})"
)
return s


class SqueezeTransform(UnsqueezeTransform):
"""Removes a dimension of size one at the specified position.

Args:
squeeze_dim (int): dimension to squeeze.
"""

invertible = True
inplace = False

def __init__(
self,
squeeze_dim: int,
keys_in: Optional[Sequence[str]] = None,
keys_out: Optional[Sequence[str]] = None,
keys_inv_in: Optional[Sequence[str]] = None,
keys_inv_out: Optional[Sequence[str]] = None,
):
super().__init__(
unsqueeze_dim=squeeze_dim,
keys_in=keys_inv_in,
keys_out=keys_out,
keys_inv_in=keys_in,
keys_inv_out=keys_inv_out,
)

@property
def squeeze_dim(self):
return super().unsqueeze_dim

def forward(self, tensordict: TensorDictBase) -> TensorDictBase:
return super().inv(tensordict)

def inv(self, tensordict: TensorDictBase) -> TensorDictBase:
return super().forward(tensordict)


class GrayScale(ObservationTransform):
Expand Down