Skip to content

Commit

Permalink
fix checkpoints clean issue
Browse files Browse the repository at this point in the history
  • Loading branch information
david8862 committed Jul 7, 2021
1 parent 65a2a79 commit da43a98
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 8 deletions.
13 changes: 8 additions & 5 deletions common/backbones/ghostnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def SqueezeExcite(input_x, se_ratio=0.25, reduced_base_chs=None, divisor=4, name
return x


def ConvBnAct(input_x, out_chs, kernel_size, stride = (1,1), name=None):
def ConvBnAct(input_x, out_chs, kernel_size, stride=(1,1), name=None):
x = YoloConv2D(filters=out_chs,
kernel_size=kernel_size,
strides=stride,
Expand All @@ -125,7 +125,7 @@ def ConvBnAct(input_x, out_chs, kernel_size, stride = (1,1), name=None):
return x


def GhostModule(input_x,output_chs,kernel_size=1,ratio = 2,dw_size = 3,stride = (1,1),act = True,name = None):
def GhostModule(input_x, output_chs, kernel_size=1, ratio=2, dw_size=3, stride=(1,1), act=True, name=None):
init_channels = int(math.ceil(output_chs / ratio))
new_channels = int(init_channels * (ratio - 1))
x1 = primary_conv(input_x,
Expand Down Expand Up @@ -226,7 +226,7 @@ def GhostNet(input_shape=None,
input_tensor=None,
cfgs=DEFAULT_CFGS,
width=1.0,
dropout=0.2,
dropout_rate=0.2,
pooling=None,
classes=1000,
**kwargs):
Expand All @@ -251,6 +251,9 @@ def GhostNet(input_shape=None,
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
cfgs: model structure config list
width: controls the width of the network
dropout_rate: fraction of the input units to drop on the last layer
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
Expand Down Expand Up @@ -351,8 +354,8 @@ def GhostNet(input_shape=None,
name='conv_head')(x)
x = ReLU(name='relu_head')(x)

if dropout > 0.:
x = Dropout(dropout, name='dropout_1')(x)
if dropout_rate > 0.:
x = Dropout(dropout_rate, name='dropout_1')(x)
x = Flatten()(x)
x = Dense(units=classes, activation='softmax',
use_bias=True, name='classifier')(x)
Expand Down
6 changes: 3 additions & 3 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def main(args):
checkpoint_clean = CheckpointCleanCallBack(log_dir, max_val_keep=5, max_eval_keep=2)
terminate_on_nan = TerminateOnNaN()

callbacks=[logging, checkpoint, checkpoint_clean, reduce_lr, early_stopping, terminate_on_nan]
callbacks = [logging, checkpoint, reduce_lr, early_stopping, terminate_on_nan, checkpoint_clean]

# get train&val dataset
dataset = get_dataset(annotation_file)
Expand Down Expand Up @@ -137,7 +137,7 @@ def main(args):
# prepare online evaluation callback
if args.eval_online:
eval_callback = EvalCallBack(args.model_type, dataset[num_train:], anchors, class_names, args.model_image_size, args.model_pruning, log_dir, eval_epoch_interval=args.eval_epoch_interval, save_eval_checkpoint=args.save_eval_checkpoint, elim_grid_sense=args.elim_grid_sense)
callbacks.append(eval_callback)
callbacks.insert(-1, eval_callback) # add before checkpoint clean

# prepare train/val data shuffle callback
if args.data_shuffle:
Expand Down Expand Up @@ -210,7 +210,7 @@ def main(args):
save_weights_only=False,
save_best_only=True,
period=1)
callbacks.append(avg_checkpoint)
callbacks.insert(-1, avg_checkpoint) # add before checkpoint clean

steps_per_epoch = max(1, num_train//args.batch_size)
decay_steps = steps_per_epoch * (args.total_epoch - args.init_epoch - args.transfer_epoch)
Expand Down

0 comments on commit da43a98

Please sign in to comment.