how can I make this model generate 512×512 px images or bigger? Now it generates 64x64px images.I tried changing some values in the model but it didn’t work.And please can someone explain me how these convolutional layers work especially Conv2D and Conv2DTranspose, because i dont undersatand how the image is resized in those layers.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
cd /content/drive/MyDrive
dataset = keras.preprocessing.image_dataset_from_directory(
directory = 'Humans', label_mode = None, image_size = (64,64), batch_size = 32,
shuffle = True
).map(lambda x: x/255.0)
discriminator = keras.models.Sequential(
[
keras.Input(shape = (64,64,3)),
layers.Conv2D(64, kernel_size = 4, strides = 2, padding = 'same'),
layers.LeakyReLU(0.2),
layers.Conv2D(128, kernel_size = 4, strides = 2, padding = 'same'),
layers.LeakyReLU(0.2),
layers.Conv2D(128, kernel_size = 4, strides = 2, padding = 'same'),
layers.LeakyReLU(0.2),
layers.Flatten(),
layers.Dropout(0.2),
layers.Dense(1,activation = 'sigmoid')
]
)
latent_dim = 128
generator = keras.models.Sequential(
[
layers.Input(shape = (latent_dim,)),
layers.Dense(8*8*128),
layers.Reshape((8,8,128)),
layers.Conv2DTranspose(128, kernel_size = 4, strides = 2, padding = 'same'),
layers.LeakyReLU(0.2),
layers.Conv2DTranspose(256, kernel_size = 4, strides = 2, padding = 'same'),
layers.LeakyReLU(0.2),
layers.Conv2DTranspose(512, kernel_size = 4, strides = 2, padding = 'same'),
layers.LeakyReLU(0.2),
layers.Conv2D(3, kernel_size = 5,padding = 'same',activation = 'sigmoid')
]
)
opt_gen = keras.optimizers.Adam(1e-4)
opt_disc = keras.optimizers.Adam(1e-4)
loss_fn = keras.losses.BinaryCrossentropy()
for epoch in range(500):
for idx, real in enumerate(tqdm(dataset)):
batch_size = real.shape[0]
random_latent_vectors = tf.random.normal(shape = (batch_size,latent_dim))
fake = generator(random_latent_vectors)
if idx % 50 == 0:
img = keras.preprocessing.image.array_to_img(fake[0])
img.save(f'gen_images/generated_img{epoch}_{idx}_.png')
with tf.GradientTape() as disc_tape:
loss_disc_real = loss_fn(tf.ones((batch_size,1)), discriminator(real))
loss_disc_fake = loss_fn(tf.zeros(batch_size,1), discriminator(fake))
loss_disc = (loss_disc_real+loss_disc_fake)/2
grads = disc_tape.gradient(loss_disc, discriminator.trainable_weights)
opt_disc.apply_gradients(
zip(grads, discriminator.trainable_weights)
)
with tf.GradientTape() as gen_tape:
fake = generator(random_latent_vectors)
output = discriminator(fake)
loss_gen = loss_fn(tf.ones(batch_size,1),output)
grads = gen_tape.gradient(loss_gen, generator.trainable_weights)
opt_gen.apply_gradients(
zip(grads, generator.trainable_weights)
)
I tried changing the image size and some values in the conv layers but it didnt work
New contributor
Andrei Arseni is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.