|
| 1 | +#### Compute Default Results for ViT-B/32 and ViT-L/14. |
| 2 | +## For larger models, simply set e.g. --clip ViT-bigG-14 |
| 3 | + |
| 4 | +# Compute Results on CIRR. |
| 5 | +# Note: This will create a submission JSON-file to upload to the evaluation server: https://cirr.cecs.anu.edu.au/test_process/ |
| 6 | +# Note: To compute results on the validation split, simply set --split val |
| 7 | +datapath=/mnt/datasets_r/CIRR |
| 8 | +python src/main.py --dataset cirr --split test --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.contextual_modifier_prompt --clip ViT-B-32 |
| 9 | +python src/main.py --dataset cirr --split test --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.contextual_modifier_prompt --clip ViT-L-14 |
| 10 | + |
| 11 | +# Compute Results on the CIRCO test set. |
| 12 | +# Note: This will create a submission JSON-file to upload to the evaluation server: https://circo.micc.unifi.it/evaluation |
| 13 | +# Note: To compute results on the validation split, simply set --split val |
| 14 | +datapath=/mnt/datasets_r/CIRCO |
| 15 | +python src/main.py --dataset circo --split test --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.structural_modifier_prompt --clip ViT-B-32 |
| 16 | +python src/main.py --dataset circo --split test --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.structural_modifier_prompt --clip ViT-L-14 |
| 17 | + |
| 18 | +# Compute Results on each FASHIONIQ dataset. |
| 19 | +# Note: FashionIQ results are reported on the validation set. |
| 20 | +datapath=/mnt/datasets_r/FASHIONIQ |
| 21 | +python src/main.py --dataset fashioniq_dress --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.structural_modifier_prompt_fashion --clip ViT-B-32 |
| 22 | +python src/main.py --dataset fashioniq_dress --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.structural_modifier_prompt_fashion --clip ViT-L-14 |
| 23 | + |
| 24 | +python src/main.py --dataset fashioniq_shirt --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.structural_modifier_prompt_fashion --clip ViT-B-32 |
| 25 | +python src/main.py --dataset fashioniq_shirt --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.structural_modifier_prompt_fashion --clip ViT-L-14 |
| 26 | + |
| 27 | +python src/main.py --dataset fashioniq_toptee --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.structural_modifier_prompt_fashion --clip ViT-B-32 |
| 28 | +python src/main.py --dataset fashioniq_toptee --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.structural_modifier_prompt_fashion --clip ViT-L-14 |
| 29 | + |
| 30 | +### Compute Results on each GeneCIS benchmark |
| 31 | +# Note: GeneCIS results are reported on the validation set. |
| 32 | +datapath=/mnt/datasets_r/GENECIS |
| 33 | +# Change Attribute |
| 34 | +python src/main.py --dataset genecis_change_attribute --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.short_modifier_prompt --clip ViT-B/32 |
| 35 | +# Focus Attribute |
| 36 | +python src/main.py --dataset genecis_focus_attribute --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.short_modifier_prompt --clip ViT-B/32 |
| 37 | +# Change Object |
| 38 | +python src/main.py --dataset genecis_change_object --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.short_modifier_prompt --clip ViT-B/32 |
| 39 | +# Focus Object |
| 40 | +python src/main.py --dataset genecis_focus_object --split val --dataset-path $datapath --preload img_features captions mods --llm_prompt prompts.short_focus_object_modifier_prompt --clip ViT-B/32 |
0 commit comments