1
+ # SSD with Mobilenet v2 configuration for MSCOCO Dataset.
2
+ # Users should configure the fine_tune_checkpoint field in the train config as
3
+ # well as the label_map_path and input_path fields in the train_input_reader and
4
+ # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
5
+ # should be configured.
6
+
7
+ model {
8
+ ssd {
9
+ num_classes: 10
10
+ box_coder {
11
+ faster_rcnn_box_coder {
12
+ y_scale: 10.0
13
+ x_scale: 10.0
14
+ height_scale: 5.0
15
+ width_scale: 5.0
16
+ }
17
+ }
18
+ matcher {
19
+ argmax_matcher {
20
+ matched_threshold: 0.40
21
+ unmatched_threshold: 0.40
22
+ ignore_thresholds: false
23
+ negatives_lower_than_unmatched: true
24
+ force_match_for_each_row: true
25
+ }
26
+ }
27
+ similarity_calculator {
28
+ iou_similarity {
29
+ }
30
+ }
31
+ anchor_generator {
32
+ ssd_anchor_generator {
33
+ num_layers: 6
34
+ min_scale: 0.1
35
+ max_scale: 0.95
36
+ aspect_ratios: 1.6
37
+ }
38
+ }
39
+ image_resizer {
40
+ fixed_shape_resizer {
41
+ height: 604
42
+ width: 960
43
+ }
44
+ }
45
+ box_predictor {
46
+ convolutional_box_predictor {
47
+ min_depth: 0
48
+ max_depth: 0
49
+ num_layers_before_predictor: 0
50
+ use_dropout: false
51
+ dropout_keep_probability: 0.8
52
+ kernel_size: 1
53
+ box_code_size: 4
54
+ apply_sigmoid_to_scores: false
55
+ conv_hyperparams {
56
+ activation: RELU_6,
57
+ regularizer {
58
+ l2_regularizer {
59
+ weight: 0.00004
60
+ }
61
+ }
62
+ initializer {
63
+ truncated_normal_initializer {
64
+ stddev: 0.03
65
+ mean: 0.0
66
+ }
67
+ }
68
+ batch_norm {
69
+ train: true,
70
+ scale: true,
71
+ center: true,
72
+ decay: 0.9997,
73
+ epsilon: 0.001,
74
+ }
75
+ }
76
+ }
77
+ }
78
+ feature_extractor {
79
+ type: 'ssd_mobilenet_v2'
80
+ min_depth: 16
81
+ depth_multiplier: 1.0
82
+ use_depthwise: true
83
+ conv_hyperparams {
84
+ activation: RELU_6,
85
+ regularizer {
86
+ l2_regularizer {
87
+ weight: 0.00004
88
+ }
89
+ }
90
+ initializer {
91
+ truncated_normal_initializer {
92
+ stddev: 0.03
93
+ mean: 0.0
94
+ }
95
+ }
96
+ batch_norm {
97
+ train: true,
98
+ scale: true,
99
+ center: true,
100
+ decay: 0.9997,
101
+ epsilon: 0.001,
102
+ }
103
+ }
104
+ }
105
+ loss {
106
+ classification_loss {
107
+ weighted_sigmoid {
108
+ }
109
+ }
110
+ localization_loss {
111
+ weighted_smooth_l1 {
112
+ }
113
+ }
114
+ hard_example_miner {
115
+ num_hard_examples: 3000
116
+ iou_threshold: 0.99
117
+ loss_type: CLASSIFICATION
118
+ max_negatives_per_positive: 3
119
+ min_negatives_per_image: 3
120
+ }
121
+ classification_weight: 1.0
122
+ localization_weight: 1.0
123
+ }
124
+ normalize_loss_by_num_matches: true
125
+ post_processing {
126
+ batch_non_max_suppression {
127
+ score_threshold: 1e-8
128
+ iou_threshold: 0.85
129
+ max_detections_per_class: 100
130
+ max_total_detections: 100
131
+ }
132
+ score_converter: SIGMOID
133
+ }
134
+ }
135
+ }
136
+
137
+ train_config: {
138
+ batch_size: 12
139
+ optimizer {
140
+ rms_prop_optimizer: {
141
+ learning_rate: {
142
+ exponential_decay_learning_rate {
143
+ initial_learning_rate: 0.004
144
+ decay_steps: 800720
145
+ decay_factor: 0.95
146
+ }
147
+ }
148
+ momentum_optimizer_value: 0.9
149
+ decay: 0.9
150
+ epsilon: 1.0
151
+ }
152
+ }
153
+ fine_tune_checkpoint: "ssd_mobilenet_v2_coco_2018_03_29/model.ckpt"
154
+ fine_tune_checkpoint_type: "detection"
155
+ # Note: The below line limits the training process to 200K steps, which we
156
+ # empirically found to be sufficient enough to train the pets dataset. This
157
+ # effectively bypasses the learning rate schedule (the learning rate will
158
+ # never decay). Remove the below line to train indefinitely.
159
+ num_steps: 2000000
160
+ #data_augmentation_options {
161
+ # random_horizontal_flip {
162
+ # }
163
+ #}
164
+ #data_augmentation_options {
165
+ # ssd_random_crop {
166
+ # }
167
+ #}
168
+ }
169
+
170
+ train_input_reader: {
171
+ tf_record_input_reader {
172
+ input_path: "data/train.record"
173
+ }
174
+ label_map_path: "data/object-detection.pbtxt"
175
+ }
176
+
177
+ eval_config: {
178
+ num_examples: 8000
179
+ # Note: The below line limits the evaluation process to 10 evaluations.
180
+ # Remove the below line to evaluate indefinitely.
181
+ max_evals: 10
182
+ }
183
+
184
+ eval_input_reader: {
185
+ tf_record_input_reader {
186
+ input_path: "data/test.record"
187
+ }
188
+ label_map_path: "training/object-detection.pbtxt"
189
+ shuffle: false
190
+ num_readers: 1
191
+ }
0 commit comments