Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,8 @@ class TimmWrapperPreTrainedModel(PreTrainedModel):
main_input_name = "pixel_values"
input_modalities = ("image",)
config: TimmWrapperConfig
_no_split_modules = []
# add WA here as `timm` does not support model parallelism
_no_split_modules = ["TimmWrapperModel"]
model_tags = ["timm"]

# used in Trainer to avoid passing `loss_kwargs` to model forward
Expand Down
3 changes: 2 additions & 1 deletion tests/models/pe_audio/test_modeling_pe_audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,8 @@ def seq_length(self):

def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.num_channels, self.audio_seq_length])
valid_lengths = ids_tensor([self.batch_size], self.audio_seq_length)
# Generate valid_lengths in range [1, self.audio_seq_length] to ensure at least one valid frame
valid_lengths = ids_tensor([self.batch_size], self.audio_seq_length - 1) + 1
padding_mask = torch.arange(self.audio_seq_length, device=torch_device)[None, :] < valid_lengths[:, None]
padding_mask = padding_mask.int()
config = self.get_config()
Expand Down
10 changes: 8 additions & 2 deletions tests/models/pe_audio_video/test_modeling_pe_audio_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,8 @@ def seq_length(self):

def prepare_config_and_inputs(self):
input_values = floats_tensor([self.batch_size, self.num_audio_channels, self.audio_seq_length])
valid_audio_lengths = ids_tensor([self.batch_size], self.audio_seq_length)
# Generate valid_lengths in range [1, self.audio_seq_length] to ensure at least one valid frame
valid_audio_lengths = ids_tensor([self.batch_size], self.audio_seq_length - 1) + 1
padding_mask = torch.arange(self.audio_seq_length, device=torch_device)[None, :] < valid_audio_lengths[:, None]
padding_mask = padding_mask.int()

Expand All @@ -162,7 +163,8 @@ def prepare_config_and_inputs(self):
self.config_kwargs["video_config"]["vision_config"]["model_args"]["img_size"][1],
]
)
valid_video_lengths = ids_tensor([self.batch_size], self.num_frames)
# Generate valid_lengths in range [1, self.num_frames] to ensure at least one valid frame
valid_video_lengths = ids_tensor([self.batch_size], self.num_frames - 1) + 1
padding_mask_videos = (
torch.arange(self.num_frames, device=torch_device)[None, :] < valid_video_lengths[:, None]
)
Expand Down Expand Up @@ -258,6 +260,10 @@ def test_feed_forward_chunking(self):
def test_save_load(self):
pass

@unittest.skip(reason="TimmWrapperModel does not support model parallelism")
def test_model_parallelism(self):
pass

Comment on lines +263 to +266
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no, no, I meant to keep the changes and skip the tests. With the proposed diff, we can support model parallelism but the tests fail because of the way it is designed
Can you revert the prev diff and "move no_split_module under a timm's PreTrainedModel" instead of PE?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it OK now?

@unittest.skip(reason="@eustlb this is not really expected")
def test_batching_equivalence(self):
pass
Expand Down
11 changes: 7 additions & 4 deletions tests/models/pe_video/test_modeling_pe_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,10 +103,9 @@ def prepare_config_and_inputs(self):
self.config_kwargs["vision_config"]["model_args"]["img_size"][1],
]
)
valid_lengths = ids_tensor([self.batch_size], self.num_frames)
padding_mask_videos = (
torch.ones([self.batch_size, self.num_frames], device=torch_device) < valid_lengths[:, None]
)
# Generate valid_lengths in range [1, num_frames] to ensure at least one valid frame
valid_lengths = ids_tensor([self.batch_size], self.num_frames - 1) + 1
padding_mask_videos = torch.arange(self.num_frames, device=torch_device).unsqueeze(0) < valid_lengths[:, None]
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here we should use arange instead of ones

padding_mask_videos = padding_mask_videos.int()
config = self.get_config()

Expand Down Expand Up @@ -187,6 +186,10 @@ def test_feed_forward_chunking(self):
def test_save_load(self):
pass

@unittest.skip(reason="TimmWrapperModel does not support model parallelism")
def test_model_parallelism(self):
pass

@unittest.skip(reason="@eustlb this is not really expected")
def test_batching_equivalence(self):
pass
Expand Down