@@ -43,65 +43,80 @@ def lrelu(tensor, leak: float=0.2):
43
43
44
44
# Network Layers
45
45
46
- def conv2d (tensor , output_size : int , name : str = 'conv2d' , norm : bool = True , stddev : float = 0.02 , term : float = 0.01 , summary : bool = True ):
46
+ def conv2d (tensors , output_size : int , name : str = 'conv2d' , norm : bool = True , stddev : float = 0.02 , term : float = 0.01 , summary : bool = True ):
47
47
"""Create a convolutional layer"""
48
48
with tf .variable_scope (name ):
49
- weight , bias = weight_bias ([5 , 5 , int (tensor .get_shape ()[- 1 ]), output_size ], stddev , term , summary )
50
- conv = tf .nn .conv2d (tensor , weight , [1 , 2 , 2 , 1 ], "SAME" )
51
- if norm :
52
- conv = tf .contrib .layers .batch_norm (conv , decay = 0.9 , updates_collections = None , scale = False ,
53
- trainable = True , reuse = True , scope = "normalization" , is_training = True , epsilon = 0.00001 )
54
- return lrelu (tf .nn .bias_add (conv , bias ))
49
+ weight , bias = weight_bias ([5 , 5 , int (tensors [0 ].get_shape ()[- 1 ]), output_size ], stddev , term , summary )
50
+ output = []
51
+ for tensor in tensors :
52
+ conv = tf .nn .conv2d (tensor , weight , [1 , 2 , 2 , 1 ], "SAME" )
53
+ if norm :
54
+ conv = tf .contrib .layers .batch_norm (conv , decay = 0.9 , updates_collections = None , scale = False ,
55
+ trainable = True , reuse = True , scope = "normalization" , is_training = True , epsilon = 0.00001 )
56
+ output .append (lrelu (tf .nn .bias_add (conv , bias )))
57
+ return output
55
58
56
59
def relu (tensor , output_size : int , name : str = 'relu' , stddev : float = 0.02 , term : float = 0.01 , summary : bool = True ):
57
60
"""Create a relu layer"""
58
61
with tf .variable_scope (name ):
59
62
weight , bias = weight_bias ([int (tensor .get_shape ()[- 1 ]), output_size ], stddev , term , summary )
60
63
return tf .nn .relu (tf .matmul (tensor , weight ) + bias )
61
64
62
- def relu_dropout (tensor , output_size : int , dropout : float = 0.4 , name : str = 'relu_dropout' , stddev : float = 0.02 , term : float = 0.01 , summary : bool = True ):
65
+ def relu_dropout (tensors , output_size : int , dropout : float = 0.4 , name : str = 'relu_dropout' , stddev : float = 0.02 , term : float = 0.01 , summary : bool = True ):
63
66
"""Create a relu layer with dropout"""
64
67
with tf .variable_scope (name ):
65
- weight , bias = weight_bias ([int (tensor .get_shape ()[- 1 ]), output_size ], stddev , term , summary )
66
- relu_layer = tf .nn .relu (tf .matmul (tensor , weight ) + bias )
67
- return tf .nn .dropout (relu_layer , dropout )
68
-
69
- def linear (tensor , output_size : int , name : str = 'linear' , stddev : float = 0.02 , term : float = 0.01 , summary : bool = True ):
68
+ weight , bias = weight_bias ([int (tensors [0 ].get_shape ()[- 1 ]), output_size ], stddev , term , summary )
69
+ output = []
70
+ for tensor in tensors :
71
+ relu_layer = tf .nn .relu (tf .matmul (tensor , weight ) + bias )
72
+ output .append (tf .nn .dropout (relu_layer , dropout ))
73
+ return output
74
+
75
+ def linear (tensors , output_size : int , name : str = 'linear' , stddev : float = 0.02 , term : float = 0.01 , summary : bool = True ):
70
76
'''Create a fully connected layer'''
71
77
with tf .variable_scope (name ):
72
- weight , bias = weight_bias ([tensor .get_shape ()[- 1 ], output_size ], stddev , term , summary )
73
- return tf .matmul (tensor , weight ) + bias
78
+ weight , bias = weight_bias ([tensors [ 0 ] .get_shape ()[- 1 ], output_size ], stddev , term , summary )
79
+ return [ tf .matmul (tensor , weight ) + bias for tensor in tensors ]
74
80
75
- def conv2d_transpose (tensor , batch_size = 1 , conv_size = 32 , name : str = 'conv2d_transpose' , norm : bool = True , stddev : float = 0.02 , term : float = 0.01 , summary : bool = True ):
81
+ def conv2d_transpose (tensors , batch_size = 1 , conv_size = 32 , name : str = 'conv2d_transpose' , norm : bool = True , stddev : float = 0.02 , term : float = 0.01 , summary : bool = True ):
76
82
"""Create a transpose convolutional layer"""
77
83
with tf .variable_scope (name ):
78
- tensor_shape = tensor .get_shape ()
84
+ tensor_shape = tensors [ 0 ] .get_shape ()
79
85
filt , bias = filter_bias ([5 , 5 , conv_size , tensor_shape [- 1 ]], stddev , term , summary )
80
86
conv_shape = [batch_size , int (tensor_shape [1 ]* 2 ), int (tensor_shape [2 ]* 2 ), conv_size ]
81
- deconv = tf .nn .conv2d_transpose (tensor , filt , conv_shape , [1 , 2 , 2 , 1 ])
82
- if norm :
83
- deconv = tf .contrib .layers .batch_norm (deconv , decay = 0.9 , updates_collections = None , scale = False ,
84
- trainable = True , reuse = True , scope = "normalization" , is_training = True , epsilon = 0.00001 )
85
- return tf .nn .relu (tf .nn .bias_add (deconv , bias ))
86
-
87
- def conv2d_transpose_tanh (tensor , batch_size = 1 , conv_size = 32 , name : str = 'conv2d_transpose_tanh' , stddev : float = 0.02 , summary : bool = True ):
87
+ output = []
88
+ for tensor in tensors :
89
+ deconv = tf .nn .conv2d_transpose (tensor , filt , conv_shape , [1 , 2 , 2 , 1 ])
90
+ if norm :
91
+ deconv = tf .contrib .layers .batch_norm (deconv , decay = 0.9 , updates_collections = None , scale = False ,
92
+ trainable = True , reuse = True , scope = "normalization" , is_training = True , epsilon = 0.00001 )
93
+ output .append (tf .nn .relu (tf .nn .bias_add (deconv , bias )))
94
+ return output
95
+
96
+ def conv2d_transpose_tanh (tensors , batch_size = 1 , conv_size = 32 , name : str = 'conv2d_transpose_tanh' , stddev : float = 0.02 , summary : bool = True ):
88
97
"""Create a transpose convolutional layer"""
89
98
with tf .variable_scope (name ):
90
- tensor_shape = tensor .get_shape ()
99
+ tensor_shape = tensors [ 0 ] .get_shape ()
91
100
filt = tf .get_variable ('filter' , [5 , 5 , conv_size , tensor_shape [- 1 ]], tf .float32 , tf .random_normal_initializer (0 , stddev ), trainable = True )
92
- conv_shape = [batch_size , int (tensor_shape [1 ]* 2 ), int (tensor_shape [2 ]* 2 ), conv_size ]
93
- deconv = tf .nn .conv2d_transpose (tensor , filt , conv_shape , [1 , 2 , 2 , 1 ])
94
- return tf .nn .tanh (deconv )
95
-
96
- def expand_relu (tensor , out_shape , name : str = 'expand_relu' , norm : bool = True , stddev : float = 0.2 , term : float = 0.01 , summary : bool = True ):
101
+ output = []
102
+ for tensor in tensors :
103
+ conv_shape = [batch_size , int (tensor_shape [1 ]* 2 ), int (tensor_shape [2 ]* 2 ), conv_size ]
104
+ deconv = tf .nn .conv2d_transpose (tensor , filt , conv_shape , [1 , 2 , 2 , 1 ])
105
+ output .append (tf .nn .tanh (deconv ))
106
+ return output
107
+
108
+ def expand_relu (tensors , out_shape , name : str = 'expand_relu' , norm : bool = True , stddev : float = 0.2 , term : float = 0.01 , summary : bool = True ):
97
109
"""Create a layer that expands an input to a shape"""
98
110
with tf .variable_scope (name ) as scope :
99
- weight , bias = weight_bias ([tensor .get_shape ()[- 1 ], np .prod (out_shape [1 :])], stddev , term , summary )
100
- lin = tf .matmul (tensor , weight ) + bias
101
- reshape = tf .reshape (lin , out_shape )
102
- if norm :
103
- reshape = tf .contrib .layers .batch_norm (reshape , decay = 0.9 , updates_collections = None , scale = False ,
104
- trainable = True , reuse = True , scope = scope , is_training = True , epsilon = 0.00001 )
105
- return tf .nn .relu (reshape )
111
+ weight , bias = weight_bias ([tensors [0 ].get_shape ()[- 1 ], np .prod (out_shape [1 :])], stddev , term , summary )
112
+ output = []
113
+ for tensor in tensors :
114
+ lin = tf .matmul (tensor , weight ) + bias
115
+ reshape = tf .reshape (lin , out_shape )
116
+ if norm :
117
+ reshape = tf .contrib .layers .batch_norm (reshape , decay = 0.9 , updates_collections = None , scale = False ,
118
+ trainable = True , reuse = True , scope = scope , is_training = True , epsilon = 0.00001 )
119
+ output .append (tf .nn .relu (reshape ))
120
+ return output
106
121
107
122
0 commit comments