I wrote a code on GAN and had previously trained in the Colab environment. I leave the Colab code below.
def build_generator(latent_dim):
"""Build the generator model."""
model = models.Sequential([
layers.Dense(256 * 8 * 8, input_dim=latent_dim),
layers.LeakyReLU(alpha=0.2),
layers.BatchNormalization(momentum=0.8),
layers.Reshape((8, 8, 256)),
layers.Conv2DTranspose(256, (4, 4), strides=(2, 2), padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.BatchNormalization(momentum=0.8),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.BatchNormalization(momentum=0.8),
layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.BatchNormalization(momentum=0.8),
layers.Conv2D(3, (3, 3), activation='tanh', padding='same')
])
return model
def generate_and_visualize_images(generator, latent_dim, num_samples=10, save_path='./generated_images3/'):
"""Generate and visualize sample images and save them."""
os.makedirs(save_path, exist_ok=True)
noise = np.random.normal(0, 1, (num_samples, latent_dim))
generated_images = generator.predict(noise)
for i in range(num_samples):
plt.imshow((generated_images[i] * 127.5 + 127.5).astype(np.uint8))
plt.axis('off')
plt.tight_layout()
plt.savefig(f'{save_path}/generated_image_{i}.png')
plt.close()
latent_dim = 50
epochs = 10000
batch_size = 64
# Assuming build_generator is a function that returns a compiled generator model
generator = build_generator(latent_dim)
discriminator = models.Sequential([
layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same', input_shape=(64, 64, 3)),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.4),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.4),
layers.Conv2D(256, (3, 3), strides=(2, 2), padding='same'),
layers.LeakyReLU(alpha=0.2),
layers.Dropout(0.4),
layers.Flatten(),
layers.Dense(1, activation='sigmoid')
])
discriminator.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.0004, beta_1=0.5), metrics=['accuracy'])
discriminator.trainable = False
gan_input = layers.Input(shape=(latent_dim,))
gan_output = discriminator(generator(gan_input))
gan = Model(gan_input, gan_output)
gan.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.0001, beta_1=0.5))
# Load and preprocess dataset
folder_path = "/content/drive/MyDrive/alan/deep learning/resimdosyaları/IMAGES2/IMAGES2/"
images = []
if not os.path.exists(folder_path):
raise FileNotFoundError(f"Error: The folder {folder_path} does not exist.")
for filename in os.listdir(folder_path):
img_path = os.path.join(folder_path, filename)
if os.path.isfile(img_path):
try:
img = cv2.imread(img_path)
if img is not None:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (64, 64))
images.append(img)
except Exception as e:
print(f"Error reading {img_path}: {e}")
x_train_s = np.array(images, dtype=np.float32)
x_train_s = (x_train_s - 127.5) / 127.5 # Normalize to [-1, 1]
# Lists to store loss values
discriminator_losses = []
generator_losses = []
for epoch in range(epochs):
noise = np.random.normal(0, 1, (batch_size, latent_dim))
fake_images = generator.predict(noise)
real_images = x_train_s[np.random.randint(0, x_train_s.shape[0], batch_size)]
# Apply label smoothing to real labels
real_labels = np.ones((batch_size, 1)) * 0.9
fake_labels = np.zeros((batch_size, 1)) + 0.1
discriminator_loss_real = discriminator.train_on_batch(real_images, real_labels)
discriminator_loss_fake = discriminator.train_on_batch(fake_images, fake_labels)
discriminator_loss = 0.5 * np.add(discriminator_loss_real, discriminator_loss_fake)
discriminator_losses.append(discriminator_loss[0])
noise = np.random.normal(0, 1, (batch_size, latent_dim))
generator_loss = gan.train_on_batch(noise, np.ones((batch_size, 1)) * 0.9)
generator_losses.append(generator_loss)
if epoch % 100 == 0:
print(f"Epoch: {epoch}, Discriminator Loss: {discriminator_loss[0]}, Generator Loss: {generator_loss}")
# Plot the losses
plt.figure(figsize=(10, 5))
plt.plot(discriminator_losses, label='Discriminator Loss')
plt.plot(generator_losses, label='Generator Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.title('Discriminator and Generator Losses')
plt.show()
# Generate final images
generate_and_visualize_images(generator, latent_dim)
An example iteration output of this code is as follows.
2/2 [==============================] – 0s 3ms/step 2/2 [==============================] – 0s 4ms/step 2/2 [==============================] – 0s 3ms/step 2/2 [==============================] – 0s 4ms/step 2/2 [==============================] – 0s 3ms/step 2/2 [==============================] – 0s 3ms/step 2/2 [==============================] – 0s 3ms/step
It works at 3 or 4 ms as well as 3 or 4 ms. However, I bought a new computer and wanted to be trained on my own computer due to disconnections and low resource usage of some. The same code produced the following response for me.
2/2 [==============================] – 0s 72ms/step 2/2 [==============================] – 0s 82ms/step 2/2 [==============================] – 0s 71ms/step 2/2 [==============================] – 0s 74ms/step 2/2 [==============================] – 0s 78ms/step 2/2 [==============================] – 0s 79ms/step
this is very slow and inefficient. I don’t know how to solve it, can you please help?
I used both Pytorch and VS code. I also tried to downgrade tensorflow version.