This repository has been archived by the owner on May 12, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 35
/
args.py
52 lines (38 loc) · 1.39 KB
/
args.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#!/usr/bin/env python3
class Args :
# dataset size... Use positive number to sample subset of the full dataset.
dataset_sz = -1
# Archive outputs of training here for animating later.
anim_dir = "anim"
# images size we will work on. (sz, sz, 3)
sz = 64
# alpha, used by leaky relu of D and G networks.
alpha_D = 0.2
alpha_G = 0.2
# batch size, during training.
batch_sz = 64
# Length of the noise vector to generate the faces from.
# Latent space z
noise_shape = (1, 1, 100)
# GAN training can be ruined any moment if not careful.
# Archive some snapshots in this directory.
snapshot_dir = "./snapshots"
# dropout probability
dropout = 0.3
# noisy label magnitude
label_noise = 0.1
# history to keep. Slower training but higher quality.
history_sz = 8
genw = "gen.hdf5"
discw = "disc.hdf5"
# Weight initialization function.
#kernel_initializer = 'Orthogonal'
#kernel_initializer = 'RandomNormal'
# Same as default in Keras, but good for GAN, says
# https://github.com/gheinrich/DIGITS-GAN/blob/master/examples/weight-init/README.md#experiments-with-lenet-on-mnist
kernel_initializer = 'glorot_uniform'
# Since DCGAN paper, everybody uses 0.5 and for me, it works the best too.
# I tried 0.9, 0.1.
adam_beta = 0.5
# BatchNormalization matters too.
bn_momentum = 0.3