-
Notifications
You must be signed in to change notification settings - Fork 149
/
utils.py
125 lines (95 loc) · 3.5 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import scipy.misc
import numpy as np
import os
from glob import glob
import tensorflow as tf
import tensorflow.contrib.slim as slim
from keras.datasets import cifar10, mnist
class ImageData:
def __init__(self, load_size, channels):
self.load_size = load_size
self.channels = channels
def image_processing(self, filename):
x = tf.read_file(filename)
x_decode = tf.image.decode_jpeg(x, channels=self.channels)
img = tf.image.resize_images(x_decode, [self.load_size, self.load_size])
img = tf.cast(img, tf.float32) / 127.5 - 1
return img
def load_mnist(size=64):
(train_data, train_labels), (test_data, test_labels) = mnist.load_data()
train_data = normalize(train_data)
test_data = normalize(test_data)
x = np.concatenate((train_data, test_data), axis=0)
# y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)
seed = 777
np.random.seed(seed)
np.random.shuffle(x)
# np.random.seed(seed)
# np.random.shuffle(y)
# x = np.expand_dims(x, axis=-1)
x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
x = np.expand_dims(x, axis=-1)
return x
def load_cifar10(size=64) :
(train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
train_data = normalize(train_data)
test_data = normalize(test_data)
x = np.concatenate((train_data, test_data), axis=0)
# y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)
seed = 777
np.random.seed(seed)
np.random.shuffle(x)
# np.random.seed(seed)
# np.random.shuffle(y)
x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
return x
def load_data(dataset_name, size=64) :
if dataset_name == 'mnist' :
x = load_mnist(size)
elif dataset_name == 'cifar10' :
x = load_cifar10(size)
else :
x = glob(os.path.join("./dataset", dataset_name, '*.*'))
return x
def preprocessing(x, size):
x = scipy.misc.imread(x, mode='RGB')
x = scipy.misc.imresize(x, [size, size])
x = normalize(x)
return x
def normalize(x) :
return x/127.5 - 1
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
# image = np.squeeze(merge(images, size)) # 채널이 1인거 제거 ?
return scipy.misc.imsave(path, merge(images, size))
def inverse_transform(images):
return (images+1.)/2.
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def str2bool(x):
return x.lower() in ('true')