Skip to content

Adding test presets for mms + support for any encoder in reference model #1249

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/fairseq2/models/wav2vec2/asr/_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,12 @@ def bib1143_300m() -> Wav2Vec2AsrConfig:
config.vocab_info.size = 3335
return config

@wav2vec2_asr_arch("300m_bib1143_3292")
def bib1143_300m_3292() -> Wav2Vec2AsrConfig:
config = bib1143_300m()
config.vocab_info.size = 3292
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why this vocab size ? can you leave a comment ?

return config

@wav2vec2_asr_arch("1b_bib61")
def bib61_1b() -> Wav2Vec2AsrConfig:
config = base_10h()
Expand Down
7 changes: 6 additions & 1 deletion src/fairseq2/recipes/wav2vec2/asr/_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,9 @@ class Wav2Vec2AsrTrainDatasetSection(DatasetSection):
extras: dict[str, object] = field(default_factory=dict)
"""The dataset-specific extra options."""

max_num_batches: int | None = None
"""The maximum number of batches for the dataloader to return."""

# Upsampling
beta_corpus: float | None = None
beta_language: float | None = None
Expand Down Expand Up @@ -288,7 +291,7 @@ def load_wav2vec2_asr_trainer(

# If we start the training with an empty ASR model, use the weights of a
# pretrained wav2vec 2.0 model.
if model.is_empty_initialized:
if model.is_empty_initialized and config.pretrained_encoder.name:
tp = AsrModel if config.pretrained_encoder_is_ctc else Wav2Vec2Model
pt_model = load_reference_model(
tp,
Expand Down Expand Up @@ -427,6 +430,7 @@ def load_wav2vec2_asr_trainer(
spec_aug_p=config.dataset.spec_aug_p,
spec_aug_freq_mask_param=config.dataset.spec_aug_freq_mask_param,
spec_aug_time_mask_param=config.dataset.spec_aug_time_mask_param,
max_num_batches=config.dataset.max_num_batches,
n_context_examples=config.dataset.n_context_examples,
bucket_size=config.dataset.bucket_size_train,
deterministic_context=False,
Expand Down Expand Up @@ -472,6 +476,7 @@ def load_wav2vec2_asr_trainer(
sync_mode=SyncMode.UNTIL_LAST,
num_prefetch=config.dataset.num_prefetch,
seed=seed,
max_num_batches=config.dataset.max_num_batches,
extras=config.dataset.extras,
n_context_examples=config.dataset.n_context_examples,
bucket_size=config.dataset.bucket_size_eval,
Expand Down
Loading