Skip to content

Commit b326f0f

Browse files
datumboxfacebook-github-bot
authored andcommitted
[fbsync] Remove the unused/buggy --train-center-crop flag from Classification preset (#6642)
Summary: * Fixing inverted center_crop check on Classification preset * Remove the `--train-center-crop` flag. Reviewed By: YosuaMichael Differential Revision: D39885428 fbshipit-source-id: 0ac6b72d55b05f014d690ac51645cdbd3f29d2b9
1 parent 1f7dd24 commit b326f0f

File tree

3 files changed

+3
-15
lines changed

3 files changed

+3
-15
lines changed

references/classification/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ Note that `--val-resize-size` was optimized in a post-training step, see their `
248248
### MaxViT
249249
```
250250
torchrun --nproc_per_node=8 --n_nodes=4 train.py\
251-
--model $MODEL --epochs 400 --batch-size 128 --opt adamw --lr 3e-3 --weight-decay 0.05 --lr-scheduler cosineannealinglr --lr-min 1e-5 --lr-warmup-method linear --lr-warmup-epochs 32 --label-smoothing 0.1 --mixup-alpha 0.8 --clip-grad-norm 1.0 --interpolation bicubic --auto-augment ta_wide --policy-magnitude 15 --train-center-crop --model-ema --val-resize-size 224
251+
--model $MODEL --epochs 400 --batch-size 128 --opt adamw --lr 3e-3 --weight-decay 0.05 --lr-scheduler cosineannealinglr --lr-min 1e-5 --lr-warmup-method linear --lr-warmup-epochs 32 --label-smoothing 0.1 --mixup-alpha 0.8 --clip-grad-norm 1.0 --interpolation bicubic --auto-augment ta_wide --policy-magnitude 15 --model-ema --val-resize-size 224\
252252
--val-crop-size 224 --train-crop-size 224 --amp --model-ema-steps 32 --transformer-embedding-decay 0 --sync-bn
253253
```
254254
Here `$MODEL` is `maxvit_t`.

references/classification/presets.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,8 @@ def __init__(
1616
ra_magnitude=9,
1717
augmix_severity=3,
1818
random_erase_prob=0.0,
19-
center_crop=False,
2019
):
21-
trans = (
22-
[transforms.RandomResizedCrop(crop_size, interpolation=interpolation)]
23-
if center_crop
24-
else [transforms.CenterCrop(crop_size)]
25-
)
20+
trans = [transforms.RandomResizedCrop(crop_size, interpolation=interpolation)]
2621
if hflip_prob > 0:
2722
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
2823
if auto_augment_policy is not None:

references/classification/train.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -113,11 +113,10 @@ def _get_cache_path(filepath):
113113
def load_data(traindir, valdir, args):
114114
# Data loading code
115115
print("Loading data")
116-
val_resize_size, val_crop_size, train_crop_size, center_crop = (
116+
val_resize_size, val_crop_size, train_crop_size = (
117117
args.val_resize_size,
118118
args.val_crop_size,
119119
args.train_crop_size,
120-
args.train_center_crop,
121120
)
122121
interpolation = InterpolationMode(args.interpolation)
123122

@@ -136,7 +135,6 @@ def load_data(traindir, valdir, args):
136135
dataset = torchvision.datasets.ImageFolder(
137136
traindir,
138137
presets.ClassificationPresetTrain(
139-
center_crop=center_crop,
140138
crop_size=train_crop_size,
141139
interpolation=interpolation,
142140
auto_augment_policy=auto_augment_policy,
@@ -501,11 +499,6 @@ def get_args_parser(add_help=True):
501499
parser.add_argument(
502500
"--train-crop-size", default=224, type=int, help="the random crop size used for training (default: 224)"
503501
)
504-
parser.add_argument(
505-
"--train-center-crop",
506-
action="store_true",
507-
help="use center crop instead of random crop for training (default: False)",
508-
)
509502
parser.add_argument("--clip-grad-norm", default=None, type=float, help="the maximum gradient norm (default None)")
510503
parser.add_argument("--ra-sampler", action="store_true", help="whether to use Repeated Augmentation in training")
511504
parser.add_argument(

0 commit comments

Comments
 (0)