Skip to content

Commit

Permalink
Add functionality to predict single sample
Browse files Browse the repository at this point in the history
  • Loading branch information
fschaeffler93 committed Apr 16, 2019
1 parent 526a31a commit 5993133
Show file tree
Hide file tree
Showing 2 changed files with 182 additions and 40 deletions.
94 changes: 94 additions & 0 deletions test_single.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
#!/usr/bin/env python
# -*- coding:UTF-8 -*-

import glob
import argparse
import os
import time
import tensorflow as tf

from config import cfg
from model import RPN3D

from utils import *
from utils.kitti_loader import iterate_data, sample_test_data



parser = argparse.ArgumentParser(description='testing')
parser.add_argument('-n', '--tag', type=str, nargs='?', default='pre_trained_car',
help='set log tag')
parser.add_argument('-t', '--data-tag', type=str, nargs='?', default='000000',
help='set data tag')
parser.add_argument('-o', '--output-path', type=str, nargs='?',
default='./predictions', help='results output dir')
parser.add_argument('-v', '--vis', type=bool, nargs='?', default=True,
help='set the flag to True if dumping visualizations')
args = parser.parse_args()


dataset_dir = cfg.DATA_DIR
test_dir = os.path.join(dataset_dir, 'testing')
save_model_dir = os.path.join('.', 'save_model', args.tag)

os.makedirs(args.output_path, exist_ok=True)
os.makedirs(os.path.join(args.output_path, 'data'), exist_ok=True)
if args.vis:
os.makedirs(os.path.join(args.output_path, 'vis'), exist_ok=True)

def main(_):

with tf.Graph().as_default():

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=cfg.GPU_MEMORY_FRACTION,
visible_device_list=cfg.GPU_AVAILABLE,
allow_growth=True)

config = tf.ConfigProto(
gpu_options=gpu_options,
device_count={
"GPU": cfg.GPU_USE_COUNT,
},
allow_soft_placement=True,
)

with tf.Session(config=config) as sess:
model = RPN3D(
cls=cfg.DETECT_OBJ,
single_batch_size=1,
avail_gpus=cfg.GPU_AVAILABLE.split(',')
)
# param init/restore
if tf.train.get_checkpoint_state(save_model_dir):
print("Reading model parameters from %s" % save_model_dir)
model.saver.restore(
sess, tf.train.latest_checkpoint(save_model_dir))


batch = sample_single_data(test_dir, args.data_tag)

if args.vis:
tags, results, front_images, bird_views, heatmaps = model.predict_step(sess, batch, summary=False, vis=True)
else:
tags, results = model.predict_step(sess, batch, summary=False, vis=False)

for tag, result in zip(tags, results):
of_path = os.path.join(args.output_path, 'data', tag + '.txt')
with open(of_path, 'w+') as f:
labels = box3d_to_label([result[:, 1:8]], [result[:, 0]], [result[:, -1]], coordinate='lidar')[0]
for line in labels:
f.write(line)
print('write out {} objects to {}'.format(len(labels), tag))
# dump visualizations
if args.vis:
for tag, front_image, bird_view, heatmap in zip(tags, front_images, bird_views, heatmaps):
front_img_path = os.path.join( args.output_path, 'vis', tag + '_front.jpg' )
bird_view_path = os.path.join( args.output_path, 'vis', tag + '_bv.jpg' )
heatmap_path = os.path.join( args.output_path, 'vis', tag + '_heatmap.jpg' )
cv2.imwrite( front_img_path, front_image )
cv2.imwrite( bird_view_path, bird_view )
cv2.imwrite( heatmap_path, heatmap )


if __name__ == '__main__':
tf.app.run(main)
128 changes: 88 additions & 40 deletions utils/kitti_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

class Processor:
def __init__(self, data_tag, f_rgb, f_lidar, f_label, data_dir, aug, is_testset):
self.data_tag=data_tag
self.data_tag = data_tag
self.f_rgb = f_rgb
self.f_lidar = f_lidar
self.f_label = f_label
Expand All @@ -38,6 +38,8 @@ def __call__(self,load_index):
ret = [tag, rgb, raw_lidar, voxel, labels]
return ret



def iterate_data(data_dir, shuffle=False, aug=False, is_testset=False, batch_size=1, multi_gpu_sum=1):
f_rgb = glob.glob(os.path.join(data_dir, 'image_2', '*.png'))
f_lidar = glob.glob(os.path.join(data_dir, 'velodyne', '*.bin'))
Expand All @@ -47,41 +49,42 @@ def iterate_data(data_dir, shuffle=False, aug=False, is_testset=False, batch_siz
f_label.sort()

data_tag = [name.split(os.sep)[-1].split('.')[-2] for name in f_rgb]

assert len(data_tag) != 0, "dataset folder is not correct"
assert len(data_tag) == len(f_rgb) == len(f_lidar) , "dataset folder is not correct"

nums = len(f_rgb)

indices = list(range(nums))
if shuffle:
np.random.shuffle(indices)

num_batches = int(math.floor( nums / float(batch_size) ))

proc=Processor(data_tag, f_rgb, f_lidar, f_label, data_dir, aug, is_testset)

for batch_idx in range(num_batches):
start_idx = batch_idx * batch_size
excerpts = indices[start_idx:start_idx + batch_size]

rets = []
for excerpt in excerpts:
rets.append(proc(excerpt))

tag = [ ret[0] for ret in rets ]
rgb = [ ret[1] for ret in rets ]
raw_lidar = [ ret[2] for ret in rets ]
voxel = [ ret[3] for ret in rets ]
labels = [ ret[4] for ret in rets ]

vox_feature, vox_number, vox_coordinate = [], [], []
single_batch_size = int(batch_size / multi_gpu_sum)
for idx in range(multi_gpu_sum):
_, per_vox_feature, per_vox_number, per_vox_coordinate = build_input(voxel[idx * single_batch_size:(idx + 1) * single_batch_size])
vox_feature.append(per_vox_feature)
vox_number.append(per_vox_number)
vox_coordinate.append(per_vox_coordinate)

ret = (
np.array(tag),
np.array(labels),
Expand All @@ -91,7 +94,7 @@ def iterate_data(data_dir, shuffle=False, aug=False, is_testset=False, batch_siz
np.array(rgb),
np.array(raw_lidar)
)

yield ret


Expand All @@ -106,52 +109,96 @@ def sample_test_data(data_dir, batch_size=1, multi_gpu_sum=1):

data_tag = [name.split(os.sep)[-1].split('.')[-2] for name in f_rgb]

assert(len(data_tag) == len(f_rgb) == len(f_lidar)), "dataset folder is not correct"
assert len(data_tag) != 0, "dataset folder is not correct"
assert len(data_tag) == len(f_rgb) == len(f_lidar) , "dataset folder is not correct"

nums = len(f_rgb)

indices = list(range(nums))
np.random.shuffle(indices)

num_batches = int(math.floor( nums / float(batch_size) ))


proc_val=Processor(data_tag, f_rgb, f_lidar, f_label, data_dir, False, False)

for batch_idx in range(num_batches):
start_idx = batch_idx * batch_size
excerpts = indices[start_idx:start_idx + batch_size]
excerpts = indices[0:1]

rets = []
for excerpt in excerpts:
rets.append(proc_val(excerpt))
rets = []
for excerpt in excerpts:
rets.append(proc_val(excerpt))

tag = [ ret[0] for ret in rets ]
rgb = [ ret[1] for ret in rets ]
raw_lidar = [ ret[2] for ret in rets ]
voxel = [ ret[3] for ret in rets ]
labels = [ ret[4] for ret in rets ]
tag = [ ret[0] for ret in rets ]
rgb = [ ret[1] for ret in rets ]
raw_lidar = [ ret[2] for ret in rets ]
voxel = [ ret[3] for ret in rets ]
labels = [ ret[4] for ret in rets ]

vox_feature, vox_number, vox_coordinate = [], [], []
single_batch_size = int(batch_size / multi_gpu_sum)
for idx in range(multi_gpu_sum):
_, per_vox_feature, per_vox_number, per_vox_coordinate = build_input(voxel[idx * single_batch_size:(idx + 1) * single_batch_size])
vox_feature.append(per_vox_feature)
vox_number.append(per_vox_number)
vox_coordinate.append(per_vox_coordinate)

ret = (
np.array(tag),
np.array(labels),
np.array(vox_feature),
np.array(vox_number),
np.array(vox_coordinate),
np.array(rgb),
np.array(raw_lidar)
)

return ret

vox_feature, vox_number, vox_coordinate = [], [], []
single_batch_size = int(batch_size / multi_gpu_sum)
for idx in range(multi_gpu_sum):
_, per_vox_feature, per_vox_number, per_vox_coordinate = build_input(voxel[idx * single_batch_size:(idx + 1) * single_batch_size])
vox_feature.append(per_vox_feature)
vox_number.append(per_vox_number)
vox_coordinate.append(per_vox_coordinate)

ret = (
np.array(tag),
np.array(labels),
np.array(vox_feature),
np.array(vox_number),
np.array(vox_coordinate),
np.array(rgb),
np.array(raw_lidar)
)

def sample_single_data(data_dir, data_tag):
f_rgb = glob.glob(os.path.join(data_dir, 'image_2', data_tag + '.png'))
f_lidar = glob.glob(os.path.join(data_dir, 'velodyne', data_tag + '.bin'))
f_label = glob.glob(os.path.join(data_dir, 'label_2', data_tag + '.txt'))

data_tag = [data_tag]

assert len(data_tag) != 0, "dataset folder is not correct"
assert len(data_tag) == len(f_rgb) == len(f_lidar) , "dataset folder is not correct"

indices = list(range(1))

proc_dat=Processor(data_tag, f_rgb, f_lidar, f_label, data_dir, False, True)

excerpts = indices[0:1]

rets = []
for excerpt in excerpts:
rets.append(proc_dat(excerpt))

tag = [ ret[0] for ret in rets ]
rgb = [ ret[1] for ret in rets ]
raw_lidar = [ ret[2] for ret in rets ]
voxel = [ ret[3] for ret in rets ]
labels = [ ret[4] for ret in rets ]

vox_feature, vox_number, vox_coordinate = [], [], []
_, per_vox_feature, per_vox_number, per_vox_coordinate = build_input(voxel[0:1])
vox_feature.append(per_vox_feature)
vox_number.append(per_vox_number)
vox_coordinate.append(per_vox_coordinate)

ret = (
np.array(tag),
np.array(labels),
np.array(vox_feature),
np.array(vox_number),
np.array(vox_coordinate),
np.array(rgb),
np.array(raw_lidar)
)

return ret



def build_input(voxel_dict_list):
batch_size = len(voxel_dict_list)

Expand All @@ -170,5 +217,6 @@ def build_input(voxel_dict_list):
return batch_size, feature, number, coordinate



if __name__ == '__main__':
pass

0 comments on commit 5993133

Please sign in to comment.