Building a Generating Adversarial Network Using Keras



This article will demonstrate how to build a Generative Adversarial Network using the Keras library. The dataset used — it is a CIFAR10 Image dataset that is pre-loaded into Keras. You can read about the dataset here .

Step 1: Import the required libraries

import numpy as np

import matplotlib.pyplot as plt

import keras

from keras.layers import Input , Dense, Reshape, Flatten, Dropout

from keras.layers import BatchNormalization, Activation, ZeroPadding2D

from keras.layers.advanced_activations import LeakyReLU

from keras.layers.convolutional import UpSampling2D, Conv2D

from keras.models import Sequential, Model

from keras.optimizers import Adam, SGD

Step 2: Load data

# Loading CIFAR10 data

(X, y), (_, _) = keras.datasets.cifar 10. load_data ()

 
# Select one image class
# The number was randomly selected and any number
You can choose from 1 to 10

X = X [y.flatten () = = 8 ]

Step 3: Define the parameters to be used in subsequent processes

# Define the input form

image_shape = ( 32 , 32 , 3 )

 

latent_dimensions = 100

Step 4: Define a utility function to build the generator

def build_generator ():

 

model = Sequential ()

  

# Building input layer

model.add (Dense ( 128 * 8 * 8 , activation = "relu" ,

input_dim = latent_dimensions))

model.add (Reshape (( 8 , 8 , 128 )))

 

model.add (UpSampling2D ( ))

  

  model.add (Conv2D ( 128 , kernel_size = 3 , padding = "same" ) )

model.add (BatchNormalization (momentum = 0.78 ))

  model.add (Activation ( "relu" ))

 

model .add (UpSampling2D ())

 

  model.add (C onv2D ( 64 , kernel_size = 3 , padding = "same" ))

model.add (BatchNormalization (momentum = 0.78 ))

model.add (Activation ( "relu" ))

 

  model.add (Conv2D ( 3 , kernel_size = 3 , padding = "same" ))

model.add (Activation ( "tanh" ))

 

 

# Generate output images

noise = Input (shape = ( latent_dimensions,))

image = model (noise)

 

return Model (noise, image)

Step 5: Define a utility function for building the Discriminator

def build_discriminator ():

 

# Building convolutional layers

  # to determine if the image is real or fake

model = Sequential ()

 

model.add (Conv2D ( 32 , kernel_size = 3 , strides = 2 ,

input_shape = image_shape, padding = "same" ) )

model.add (LeakyReLU (alpha = 0.2 ))

  model.add (Dropout ( 0.25 ))

 

model.add (Conv2D ( 64 , kernel_size = 3 , strides = 2 , padding = "same" ))

model.add (ZeroPadding2D (padding = ( ( 0 , 1 ), ( 0 , 1 ))))

model.add (BatchNormalization (momentum = 0.82 ))

model.add (LeakyReLU (alpha = 0.25 ))

model.add (Dropout ( 0.25 ))

 

model.add (Conv2D ( 128 , kernel_size = 3 , strides = 2 , padding = " same " ))

model.add (BatchNormalization (momentum = 0.82 ))

model.add (LeakyReLU (alpha = 0.2 ))

model.add (Dropout ( 0.25 ))

 

model.add (Conv2D ( 256 , kernel_size = 3 , strides = 1 , padding = "same" ))

model.add (BatchNormalization (momentum = 0.8 ))

model.add (LeakyReLU (alpha = 0.25 ))

  model.add (Dropout ( 0.25 ))

 

  # Build the output layer

model.add (Flatten ())

model.add (Dense ( 1 , activation = `sigmoid` ))

 

image = Input (shape = image_shape)

validity = model (image )

 

return Model (image, validity)

Step 6: Define a utility function to display generated images

def display_images ():

r, c = 4 , 4

noise = np.random.normal ( 0 , 1 , (r * c, latent_dimensions))

generated_images = generator.predict (noise)

 

# Scaling generated images

generated_images = 0.5 * generated_images + 0.5

 

fig, axs = plt.subplots (r, c)

count = 0

for i in range (r):

for j in range (c):

axs [i, j] .imshow (generated_images [count,:,:,])

axs [i, j] .axis ( ` off` )

count + = 1

plt.show ()

plt.close ()

Step 7: Build a Generating Adversarial Network

# Building and compiling the discriminator

discriminator = build_discriminator ()

discriminator. compile (loss = ` binary_crossentropy` ,

optimizer = Adam ( 0.0002 , 0.5 ),

metrics = [ ` accuracy` ])

 
# Make the discriminator non-learning
# so that the generator can learn from a fixed gradient

discriminator.trainable = False

 
# Building the generator

generator = build_generator ()

 
# Define input for the generator
# and image generation

z = Input (shape = (latent_dimensions,))

image = generator (z)

 

 
# Validate the generated image

valid = discriminator (image)

  
# Define a combined Generator and Discriminator model

combined_network = Model (z, valid)

combined_network. compile (loss = `binary_crossentropy` ,

  optimizer = Adam ( 0.0002 , 0.5 ))

Step 8: Train the network

num_epochs = 15000

batch_size = 32

display_interval = 2500

losses = []

  
# Input normalization

X = (X / 127.5 ) - 1.

 

 
# Defining hostile core truths

valid = np.ones ((batch_size, 1 ))

  
# Adding noise

valid + = 0.05 * np.random.random (valid.shape)

fake = np.zeros ((batch_size, 1 ))

fake + = 0.05 * np.random.random (fake.shape)

 

for epoch in range (num_epochs):

  

# Training the Discriminator

  

  # Sample a random half of the images

index = np .random.randint ( 0 , X.shape [ 0 ], batch_size)

images = X [index]

 

# Sample noise and create a series of new images

noise = np.random.normal ( 0 , 1 , (batch_size, latent_dimensions))

generated_images = generator.predict (noise)

  

  

  # Train the discriminator for more accurate detection

# is the generated image real or fake

discm_loss_real = discriminator.train_on_batch (images, valid)

discm_loss_fake = discriminator.train_on_batch (generated_images, fake)

discm_loss = 0.5 * np.add (discm_loss_real, discm_loss_fake)

 

# Teach the generator

 

# Train the generator to generate images

  # that pass the identity test

  genr_loss = combined_network.train_on_batch (noise, valid)

 

  # Track progress

if epoch % display_interval = = 0 :

                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                        display_images ()

Age 0:

Epoch 2500:

Epoch 5000:

Epoch 7500:

Epoch 10000:

Epoch 12500:

Please note that image quality increases with each epoch.

Step 8: Performance Assessment

Network performance will be assessed by visually comparing images from a past era with original images.

a) Printing original images

# Applying some original images

s = X [: 40 ]

s = 0.5 * s