@@ -41,11 +41,7 @@ def __call__(self, inputs):
41
41
with tf .name_scope (self .name ):
42
42
outputs = self ._call (inputs )
43
43
return outputs
44
-
45
- def log_weights (self ):
46
- for w in self .weights :
47
- tf .summary .histogram (self .name + '/weights/' + w , self .weights [w ])
48
-
44
+
49
45
class ConvolutionalLayer (Layer ):
50
46
def __init__ (self , input_dim , output_dim , placeholders , dropout ,
51
47
sparse_inputs , activation , isLast = False , bias = False , featureless = False , ** kwargs ):
@@ -97,47 +93,3 @@ def _call(self, inputs):
97
93
output += self .weights ['bias' ]
98
94
99
95
return self .activation (output )
100
-
101
- class DenseLayer (Layer ):
102
- def __init__ (self , input_dim , output_dim , dropout , sparse_inputs ,
103
- placeholders = None , activation = tf .nn .relu , bias = False , featureless = False , ** kwargs ):
104
- super (DenseLayer , self ).__init__ (** kwargs )
105
- self .dropout = 0.5
106
-
107
- if dropout :
108
- self .dropout = placeholders ['dropout' ]
109
- else :
110
- self .dropout = 0.
111
-
112
- self .activation = activation
113
- self .sparse_inputs = sparse_inputs
114
- self .featureless = featureless
115
- self .bias = bias
116
-
117
- # helper variable for sparse dropout
118
- #self.num_features_nonzero = placeholders['num_features_nonzero']
119
-
120
- with tf .variable_scope (self .name + '_weights' ):
121
- self .weights ['weights' ] = glorot ([input_dim , output_dim ],
122
- name = 'weights' )
123
- if self .bias :
124
- self .weights ['bias' ] = zeros ([output_dim ], name = 'bias' )
125
-
126
-
127
- def _call (self , inputs ):
128
- x = inputs
129
-
130
- # applico il dropout
131
- #if self.sparse_inputs:
132
- # x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
133
- #else:
134
- x = tf .nn .dropout (x , 1 - self .dropout )
135
-
136
- # la moltiplicazione fra features e pesi - in questo consiste il layer dense
137
- output = dot (x , self .weights ['weights' ], sparse = self .sparse_inputs )
138
-
139
- # eventualmente applico il bias sommandolo all'output
140
- if self .bias :
141
- output += self .weights ['bias' ]
142
-
143
- return self .activation (output ) #l'uscita passa prima per la funzione di attivazione - una relu
0 commit comments