-
Notifications
You must be signed in to change notification settings - Fork 1
/
train_text2pose.sh
48 lines (43 loc) · 2.83 KB
/
train_text2pose.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# python -m train_text2pose \
# --gpus 1 \
# --batchSize 5 \
# --data_path "Data/how2sign" \
# --text_path "data/text2gloss/" \
# --vocab_file "data/text2gloss/how2sign_vocab.txt" \
# --pose_vqvae "logs/phoneix_spl_seperate_SeqLen_1/lightning_logs/version_3/checkpoints/epoch=123-step=50095.ckpt" \
# --hparams_file "logs/phoneix_spl_seperate_SeqLen_1/lightning_logs/version_3/hparams.yaml" \
# --resume_ckpt "" \
# --default_root_dir "text2pose_logs/test" \
# --max_steps 300000 \
# --max_frames_num 200 \
# --gpu_ids "0" \
python -m train_text2pose \
--gpus 1 \
--batchSize 8 \
--data_path "Data/ProgressiveTransformersSLP" \
--vocab_file "Data/ProgressiveTransformersSLP/src_vocab.txt" \
--pose_vqvae "/Dataset/everybody_sign_now_experiments/pose2text_logs/stage1/lightning_logs/seperate_vit/checkpoints/epoch=56-step=33743-val_wer=0.0000-val_rec_loss=0.0138-val_ce_loss=0.0000.ckpt" \
--vqvae_hparams_file "/Dataset/everybody_sign_now_experiments/pose2text_logs/stage1/lightning_logs/seperate_vit/hparams.yaml" \
--resume_ckpt "/Dataset/everybody_sign_now_experiments/text2pose_logs/stage2/lightning_logs/version_1/checkpoints/epoch=174-step=155224-val_ce_loss=0.0000-val_wer=0.000000.ckpt" \
--default_root_dir "/Dataset/everybody_sign_now_experiments/text2pose_logs/stage2" \
--max_steps 300000 \
--max_frames_num 200 \
--gpu_ids "0" \
--backmodel2 "/Dataset/everybody_sign_now_experiments/pose2text_logs/backmodel/lightning_logs/joint_model/checkpoints/epoch=13-step=8287-val_wer=0.5971.ckpt" \
--backmodel_hparams_file2 "/Dataset/everybody_sign_now_experiments/pose2text_logs/backmodel/lightning_logs/joint_model/hparams.yaml" \
--backmodel "/Dataset/everybody_sign_now_experiments/pose2text_logs/backmodel/lightning_logs/heatmap_model/checkpoints/epoch=7-step=28383-val_wer=0.6861.ckpt" \
--backmodel_hparams_file "/Dataset/everybody_sign_now_experiments/pose2text_logs/backmodel/lightning_logs/heatmap_model/hparams.yaml" \
# python -m train_text2pose \
# --gpus 1 \
# --batchSize 8 \
# --data_path "Data/ProgressiveTransformersSLP" \
# --vocab_file "Data/ProgressiveTransformersSLP/src_vocab.txt" \
# --pose_vqvae "/Dataset/everybody_sign_now_experiments/text2pose_logs/ctc_nat/lightning_logs/freeze_emb/checkpoints/epoch=98-step=87812.ckpt" \
# --vqvae_hparams_file "/Dataset/everybody_sign_now_experiments/text2pose_logs/ctc_nat/lightning_logs/freeze_emb/hparams.yaml" \
# --resume_ckpt "" \
# --default_root_dir "/Dataset/everybody_sign_now_experiments/text2pose_logs/nat_distill" \
# --max_steps 300000 \
# --max_frames_num 200 \
# --gpu_ids "0" \
# --ctc_model "pose2text_logs/lightning_logs/version_1/checkpoints/epoch=28-step=1623.ckpt" \
# --ctc_hparams_file "pose2text_logs/lightning_logs/version_1/hparams.yaml" \