Skip to content

Commit

Permalink
Fix yapf for readers.py and losses.py
Browse files Browse the repository at this point in the history
  • Loading branch information
XericZephyr committed Aug 21, 2019
1 parent 98d6799 commit c863a13
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 17 deletions.
8 changes: 4 additions & 4 deletions losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,11 +92,11 @@ def calculate_loss(self, predictions, labels, **unused_params):
epsilon = 10e-8
float_labels = tf.cast(labels, tf.float32)
# l1 normalization (labels are no less than 0)
label_rowsum = tf.maximum(
tf.reduce_sum(float_labels, 1, keep_dims=True), epsilon)
label_rowsum = tf.maximum(tf.reduce_sum(float_labels, 1, keep_dims=True),
epsilon)
norm_float_labels = tf.div(float_labels, label_rowsum)
softmax_outputs = tf.nn.softmax(predictions)
softmax_loss = tf.negative(
tf.reduce_sum(
tf.multiply(norm_float_labels, tf.log(softmax_outputs)), 1))
tf.reduce_sum(tf.multiply(norm_float_labels, tf.log(softmax_outputs)),
1))
return tf.reduce_mean(softmax_loss)
26 changes: 13 additions & 13 deletions readers.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,22 +286,21 @@ def prepare_serialized_examples(self,
start_times = contexts["segment_start_times"].values
# Here we assume all the segments that started at the same start time has
# the same segment_size.
uniq_start_times, seg_idxs = tf.unique(
start_times, out_idx=tf.dtypes.int64)
uniq_start_times, seg_idxs = tf.unique(start_times,
out_idx=tf.dtypes.int64)
# TODO(zhengxu): Ensure the segment_sizes are all same.
segment_size = self.segment_size
# Range gather matrix, e.g., [[0,1,2],[1,2,3]] for segment_size == 3.
range_mtx = tf.expand_dims(
uniq_start_times, axis=-1) + tf.expand_dims(
tf.range(0, segment_size, dtype=tf.int64), axis=0)
range_mtx = tf.expand_dims(uniq_start_times, axis=-1) + tf.expand_dims(
tf.range(0, segment_size, dtype=tf.int64), axis=0)
# Shape: [num_segment, segment_size, feature_dim].
batch_video_matrix = tf.gather_nd(video_matrix,
tf.expand_dims(range_mtx, axis=-1))
num_segment = tf.shape(batch_video_matrix)[0]
batch_video_ids = tf.reshape(
tf.tile([contexts["id"]], [num_segment]), (num_segment,))
batch_frames = tf.reshape(
tf.tile([segment_size], [num_segment]), (num_segment,))
batch_video_ids = tf.reshape(tf.tile([contexts["id"]], [num_segment]),
(num_segment,))
batch_frames = tf.reshape(tf.tile([segment_size], [num_segment]),
(num_segment,))

# For segment labels, all labels are not exhausively rated. So we only
# evaluate the rated labels.
Expand All @@ -317,17 +316,18 @@ def prepare_serialized_examples(self,
sparse_label_weights = tf.sparse.SparseTensor(
label_indices, tf.ones_like(label_values, dtype=tf.float32),
(num_segment, self.num_classes))
batch_label_weights = tf.sparse.to_dense(
sparse_label_weights, validate_indices=False)
batch_label_weights = tf.sparse.to_dense(sparse_label_weights,
validate_indices=False)
else:
# Process video-level labels.
label_indices = contexts["labels"].values
sparse_labels = tf.sparse.SparseTensor(
tf.expand_dims(label_indices, axis=-1),
tf.ones_like(contexts["labels"].values, dtype=tf.bool),
(self.num_classes,))
labels = tf.sparse.to_dense(
sparse_labels, default_value=False, validate_indices=False)
labels = tf.sparse.to_dense(sparse_labels,
default_value=False,
validate_indices=False)
# convert to batch format.
batch_video_ids = tf.expand_dims(contexts["id"], 0)
batch_video_matrix = tf.expand_dims(video_matrix, 0)
Expand Down

0 comments on commit c863a13

Please sign in to comment.