Skip to content

Commit

Permalink
Replace literal constant 10 with variable num_classes in example/ (k…
Browse files Browse the repository at this point in the history
…eras-team#8041)

* Replace literal constant 10 with variable num_classes

* Fix PEP8 errors
  • Loading branch information
ozabluda authored and fchollet committed Oct 3, 2017
1 parent 19e1be2 commit 40fd9ca
Show file tree
Hide file tree
Showing 5 changed files with 24 additions and 22 deletions.
2 changes: 1 addition & 1 deletion examples/antirectifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def call(self, inputs):
model.add(layers.Dense(256))
model.add(Antirectifier())
model.add(layers.Dropout(0.1))
model.add(layers.Dense(10))
model.add(layers.Dense(num_classes))
model.add(layers.Activation('softmax'))

# compile the model
Expand Down
19 changes: 10 additions & 9 deletions examples/mnist_acgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@

K.set_image_data_format('channels_first')

num_classes = 10


def build_generator(latent_size):
# we will map a pair of (z, L), where z is a latent vector and L is a
Expand Down Expand Up @@ -79,8 +81,7 @@ def build_generator(latent_size):
# this will be our label
image_class = Input(shape=(1,), dtype='int32')

# 10 classes in MNIST
cls = Flatten()(Embedding(10, latent_size,
cls = Flatten()(Embedding(num_classes, latent_size,
embeddings_initializer='glorot_normal')(image_class))

# hadamard product between z-space and a class conditional embedding
Expand Down Expand Up @@ -124,7 +125,7 @@ def build_discriminator():
# (name=auxiliary) is the class that the discriminator thinks the image
# belongs to.
fake = Dense(1, activation='sigmoid', name='generation')(features)
aux = Dense(10, activation='softmax', name='auxiliary')(features)
aux = Dense(num_classes, activation='softmax', name='auxiliary')(features)

return Model(image, [fake, aux])

Expand Down Expand Up @@ -200,7 +201,7 @@ def build_discriminator():
label_batch = y_train[index * batch_size:(index + 1) * batch_size]

# sample some labels from p_c
sampled_labels = np.random.randint(0, 10, batch_size)
sampled_labels = np.random.randint(0, num_classes, batch_size)

# generate a batch of fake images, using the generated labels as a
# conditioner. We reshape the sampled labels to be
Expand All @@ -220,7 +221,7 @@ def build_discriminator():
# the generator optimize over an identical number of images as the
# discriminator
noise = np.random.uniform(-1, 1, (2 * batch_size, latent_size))
sampled_labels = np.random.randint(0, 10, 2 * batch_size)
sampled_labels = np.random.randint(0, num_classes, 2 * batch_size)

# we want to train the generator to trick the discriminator
# For the generator, we want all the {fake, not-fake} labels to say
Expand All @@ -239,7 +240,7 @@ def build_discriminator():
noise = np.random.uniform(-1, 1, (num_test, latent_size))

# sample some labels from p_c and generate images from them
sampled_labels = np.random.randint(0, 10, num_test)
sampled_labels = np.random.randint(0, num_classes, num_test)
generated_images = generator.predict(
[noise, sampled_labels.reshape((-1, 1))], verbose=False)

Expand All @@ -255,7 +256,7 @@ def build_discriminator():

# make new noise
noise = np.random.uniform(-1, 1, (2 * num_test, latent_size))
sampled_labels = np.random.randint(0, 10, 2 * num_test)
sampled_labels = np.random.randint(0, num_classes, 2 * num_test)

trick = np.ones(2 * num_test)

Expand Down Expand Up @@ -296,7 +297,7 @@ def build_discriminator():
noise = np.random.uniform(-1, 1, (100, latent_size))

sampled_labels = np.array([
[i] * 10 for i in range(10)
[i] * num_classes for i in range(num_classes)
]).reshape(-1, 1)

# get a batch to display
Expand All @@ -305,7 +306,7 @@ def build_discriminator():

# arrange them into a grid
img = (np.concatenate([r.reshape(-1, 28)
for r in np.split(generated_images, 10)
for r in np.split(generated_images, num_classes)
], axis=-1) * 127.5 + 127.5).astype(np.uint8)

Image.fromarray(img).save(
Expand Down
3 changes: 1 addition & 2 deletions examples/mnist_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop


batch_size = 128
num_classes = 10
epochs = 20
Expand All @@ -39,7 +38,7 @@
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.add(Dense(num_classes, activation='softmax'))

model.summary()

Expand Down
14 changes: 8 additions & 6 deletions examples/mnist_siamese_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
from keras.optimizers import RMSprop
from keras import backend as K

num_classes = 10


def euclidean_distance(vects):
x, y = vects
Expand All @@ -47,13 +49,13 @@ def create_pairs(x, digit_indices):
'''
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(10)]) - 1
for d in range(10):
n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
for d in range(num_classes):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, 10)
dn = (d + inc) % 10
inc = random.randrange(1, num_classes)
dn = (d + inc) % num_classes
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
Expand Down Expand Up @@ -97,10 +99,10 @@ def accuracy(y_true, y_pred):
epochs = 20

# create training+test positive and negative pairs
digit_indices = [np.where(y_train == i)[0] for i in range(10)]
digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]
tr_pairs, tr_y = create_pairs(x_train, digit_indices)

digit_indices = [np.where(y_test == i)[0] for i in range(10)]
digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)]
te_pairs, te_y = create_pairs(x_test, digit_indices)

# network definition
Expand Down
8 changes: 4 additions & 4 deletions examples/mnist_tfrecord.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def cnn_layers(x_train_input):
x = layers.Flatten()(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.5)(x)
x_train_out = layers.Dense(classes,
x_train_out = layers.Dense(num_classes,
activation='softmax',
name='x_train_out')(x)
return x_train_out
Expand All @@ -72,7 +72,7 @@ def cnn_layers(x_train_input):
batch_shape = (batch_size, 28, 28, 1)
steps_per_epoch = 469
epochs = 5
classes = 10
num_classes = 10

# The capacity variable controls the maximum queue size
# allowed when prefetching data for training.
Expand Down Expand Up @@ -106,7 +106,7 @@ def cnn_layers(x_train_input):
x_train_batch = tf.reshape(x_train_batch, shape=batch_shape)

y_train_batch = tf.cast(y_train_batch, tf.int32)
y_train_batch = tf.one_hot(y_train_batch, classes)
y_train_batch = tf.one_hot(y_train_batch, num_classes)

x_batch_shape = x_train_batch.get_shape().as_list()
y_batch_shape = y_train_batch.get_shape().as_list()
Expand Down Expand Up @@ -153,5 +153,5 @@ def cnn_layers(x_train_input):

loss, acc = test_model.evaluate(x_test,
keras.utils.to_categorical(y_test),
classes)
num_classes)
print('\nTest accuracy: {0}'.format(acc))

0 comments on commit 40fd9ca

Please sign in to comment.