-
Notifications
You must be signed in to change notification settings - Fork 3
/
gan.py
152 lines (113 loc) · 5.2 KB
/
gan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# Large amount of credit goes to:
# https://github.com/eriklindernoren/Keras-GAN/blob/master/gan/gan.py
# which I've used as a reference for this implementation
from __future__ import print_function, division
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
from libs.architectures import build_generator, build_discriminator
from PIL import Image
import matplotlib.pyplot as plt
import sys
import numpy as np
class GAN():
def __init__(self, shape, architecture='dense', save_path='images/'):
self.img_rows = shape[0]
self.img_cols = shape[1]
self.channels = shape[2]
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
self.save_path = save_path
self.architecture = architecture
self.compile()
def compile(self):
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = build_discriminator(self.architecture, self.img_shape)
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = build_generator(self.architecture, self.latent_dim, self.img_shape)
# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines validity
validity = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def train(self, X_train, epochs, batch_size=128, sample_interval=50):
# Rescale -1 to 1
X_train = X_train / 127.5 - 1.
#X_train = np.expand_dims(X_train, axis=3)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
d_losses = []
d_acc = []
g_losses = []
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Generate a batch of new images
gen_imgs = self.generator.predict(noise)
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the generator (to have the discriminator label samples as valid)
g_loss = self.combined.train_on_batch(noise, valid)
# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
d_losses.append(d_loss[0])
d_acc.append(100*d_loss[1])
g_losses.append(g_loss)
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
return d_losses, d_acc, g_losses
def sample_images(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
if gen_imgs.shape[3] < 3:
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray') # grayscale
else:
axs[i,j].imshow(gen_imgs[cnt]) # color with or without alpha
axs[i,j].axis('off')
cnt += 1
fig.savefig(self.save_path + "%d.png" % epoch)
plt.close()
def load_weights(self, g_weights, d_weights):
self.generator.load_weights(g_weights)
self.discriminator.load_weights(d_weights)
def save_weights(self, g_weights, d_weights):
self.generator.save_weights(g_weights)
self.discriminator.save_weights(d_weights)
if __name__ == '__main__':
(X_train,_), (_,_) = mnist.load_data()
gan = GAN(shape=X_train[0].shape)
gan.train(X_train=X_train, epochs=30000, batch_size=32, sample_interval=200)