AIGG-1 / model.py
SkillForge45's picture
Create model.py
4bac1af verified
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# Parameters
IMG_SIZE = 64
LATENT_DIM = 128 # Dimension of the input noise
BATCH_SIZE = 1
# Generator (turns noise into an image)
def build_generator():
model = tf.keras.Sequential([
tf.keras.layers.Dense(8 * 8 * 64, activation='relu', input_dim=LATENT_DIM),
tf.keras.layers.Reshape((8, 8, 64)),
tf.keras.layers.Conv2DTranspose(64, (4,4), strides=2, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(32, (4,4), strides=2, padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(3, (4,4), strides=2, padding='same', activation='sigmoid') # RGB [0,1]
])
return model
# Discriminator (evaluates how "interesting" an image is)
def build_discriminator():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3,3), strides=2, padding='same', input_shape=(IMG_SIZE, IMG_SIZE, 3)),
tf.keras.layers.LeakyReLU(0.2),
tf.keras.layers.Conv2D(64, (3,3), strides=2, padding='same'),
tf.keras.layers.LeakyReLU(0.2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1, activation='sigmoid') # "How cool is this?"
])
return model
# Create models
generator = build_generator()
discriminator = build_discriminator()
# Loss function and optimizers
cross_entropy = tf.keras.losses.BinaryCrossentropy()
g_optimizer = tf.keras.optimizers.Adam(0.0002, beta_1=0.5)
d_optimizer = tf.keras.optimizers.Adam(0.0002, beta_1=0.5)
# Training function
@tf.function
def train_step():
# 1. Generate random noise
noise = tf.random.normal([BATCH_SIZE, LATENT_DIM])
# 2. Train discriminator on "good" data (but we don't have any → just learn to distinguish noise)
with tf.GradientTape() as d_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(tf.random.uniform((BATCH_SIZE, IMG_SIZE, IMG_SIZE, 3)), training=True) # fake "real" data
fake_output = discriminator(generated_images, training=True)
d_loss = cross_entropy(tf.ones_like(fake_output), fake_output) # force it to think generated images are "real"
d_gradients = d_tape.gradient(d_loss, discriminator.trainable_variables)
d_optimizer.apply_gradients(zip(d_gradients, discriminator.trainable_variables))
# 3. Train generator
with tf.GradientTape() as g_tape:
generated_images = generator(noise, training=True)
fake_output = discriminator(generated_images, training=True)
g_loss = cross_entropy(tf.ones_like(fake_output), fake_output) # goal is to fool the discriminator
g_gradients = g_tape.gradient(g_loss, generator.trainable_variables)
g_optimizer.apply_gradients(zip(g_gradients, generator.trainable_variables))
return d_loss, g_loss
# Mini-training (50 steps)
for epoch in range(50):
d_loss, g_loss = train_step()
if epoch % 10 == 0:
print(f"Epoch {epoch}, D Loss: {d_loss:.3f}, G Loss: {g_loss:.3f}")
# Generate sample image
test_noise = tf.random.normal([1, LATENT_DIM])
generated_img = generator(test_noise, training=False)[0]
plt.imshow(generated_img)
plt.axis('off')
plt.show()