forked from EdwardSmith1884/Multi-View-Silhouette-and-Depth-Decomposition-for-High-Resolution-3D-Object-Representation
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathSREval.py
92 lines (65 loc) · 3.52 KB
/
SREval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import tensorflow as tf
import os
import sys
sys.path.insert(0, './scripts/')
import tensorlayer as tl
import numpy as np
import random
from utils import *
from models import *
import argparse
parser = argparse.ArgumentParser(description='3D-GAN implementation for 32*32*32 voxel output')
parser.add_argument('-o','--object', default='chair', help='The name of the current experiment, this will be used to create folders and save models.')
parser.add_argument('-b','--batchsize', default=32, help ='The batch size.', type=int)
parser.add_argument('-depth','--depth', default='best', help ='Epoch from which to load the depth map predictor, if you want the best leave default.' )
parser.add_argument('-occ','--occ', default='best', help ='Epoch from which to load the occupancy map predictor, if you want the best leave default.' )
parser.add_argument('-dis','--distance', default=70, help ='The range in which distances will be predicted.', type=int)
parser.add_argument('-high', default= 256, help='The size of the high dimension objects.', type= int)
parser.add_argument('-low', default= 32, help='The size of the low dimension object.', type= int)
args = parser.parse_args()
checkpoint_dir = "checkpoint/" + args.object +'/'
data_dir = 'data/voxels/' + args.object+ '/test'
batchsize = args.batchsize
high = args.high
low = args.low
distance = args.distance
ratio = high // low
#######inputs##########
scope_depth = 'depth'
scope_occ = 'occupancy'
images_low = tf.placeholder(tf.float32, [batchsize, low, low, 1], name='images_low') # low res odm input
side = tf.placeholder(tf.float32, [batchsize, low, low, 1], name='side') # the side being considered
combined = tf.concat((images_low, side), axis = 3)
########## network computations #######################
net_depth, depth_pred = upscale(combined, ratio, scope = scope_depth, is_train=False, reuse = False)
net_occ, occ_pred = upscale(combined, ratio, scope = scope_occ, is_train=False, reuse = False)
net_depth.print_params(False)
net_occ.print_params(False)
##### computing #######
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess=tf.Session()
tl.ops.set_gpu_fraction(sess=sess, gpu_fraction=0.999)
sess.run(tf.global_variables_initializer())
# load networks for depth and occupancy
load_networks(checkpoint_dir, sess, net_depth, args.depth, name ='depth')
load_networks(checkpoint_dir, sess, net_occ, args.occ, name = 'occ')
files = grab_files(data_dir)
for idx in (xrange(0, len(files)/args.batchsize)):
odms = []
cur_files = files[idx*batchsize:(idx+1)*batchsize]
# loops over all sides
for k in range(6):
batch, _ = make_batch(cur_files, high, low, side = k)
depths, occs = sess.run([depth_pred,occ_pred], feed_dict={images_low:batch['low'], side: batch['side']})
odms.append(recover_odms(depths, occs, batch['low_up'], high, low, distance, threshold = 1.5*high//low)) # combining depths and occupancy maps to recover full odms
# combining information
odms = zip(odms[0], odms[1], odms[2], odms[3], odms[4], odms[5])
objs, small_objs = make_objs(cur_files) # loading the ground truth object and input object
batch_predictions = zip(odms, objs, small_objs)
# looping over batch
for odm, obj, small_obj in (batch_predictions):
small_obj = upsample(small_obj, high, low)
prediction = apply_occupancy(np.array(small_obj), np.array(odm))
prediction = apply_depth(np.array(prediction),np.array(odm),high,)
evaluate_SR(prediction, obj, small_obj, gt = False) # render model