|
|
import gradio as gr |
|
|
import tensorflow as tf |
|
|
from tensorflow import keras |
|
|
from tensorflow.keras import layers |
|
|
import numpy as np |
|
|
import os |
|
|
from PIL import Image, ImageFilter |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
IMG_SIZE = 64 |
|
|
CHANNELS = 3 |
|
|
TIMESTEPS = 300 |
|
|
BATCH_SIZE = 32 |
|
|
WEIGHTS_FILE = "veda_hq_model.weights.h5" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_veda_patterns(num_images=600): |
|
|
print(f"Generating {num_images} Sharp Veda patterns...") |
|
|
data = [] |
|
|
|
|
|
for _ in range(num_images): |
|
|
x = np.linspace(-1, 1, IMG_SIZE) |
|
|
y = np.linspace(-1, 1, IMG_SIZE) |
|
|
X, Y = np.meshgrid(x, y) |
|
|
|
|
|
|
|
|
freq = np.random.uniform(3, 12) |
|
|
phase = np.random.uniform(0, np.pi) |
|
|
|
|
|
|
|
|
R = np.sqrt(X**2 + Y**2) |
|
|
|
|
|
|
|
|
pattern = np.tanh(np.sin(freq * R - phase) * 5) |
|
|
|
|
|
img = np.zeros((IMG_SIZE, IMG_SIZE, 3)) |
|
|
|
|
|
|
|
|
color_scheme = np.random.randint(0, 3) |
|
|
|
|
|
if color_scheme == 0: |
|
|
img[:, :, 0] = (pattern + 1) / 2 * 1.0 |
|
|
img[:, :, 1] = (pattern + 1) / 2 * 0.5 |
|
|
img[:, :, 2] = (pattern + 1) / 2 * 0.1 |
|
|
elif color_scheme == 1: |
|
|
img[:, :, 0] = (pattern + 1) / 2 * 0.1 |
|
|
img[:, :, 1] = (pattern + 1) / 2 * 0.4 |
|
|
img[:, :, 2] = (pattern + 1) / 2 * 1.0 |
|
|
else: |
|
|
img[:, :, 0] = (pattern + 1) / 2 * 0.8 |
|
|
img[:, :, 1] = (pattern + 1) / 2 * 0.2 |
|
|
img[:, :, 2] = (pattern + 1) / 2 * 0.8 |
|
|
|
|
|
data.append(img) |
|
|
|
|
|
return np.array(data).astype("float32") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
beta_np = np.linspace(0.0001, 0.02, TIMESTEPS) |
|
|
beta = tf.cast(beta_np, dtype=tf.float32) |
|
|
alpha = 1.0 - beta |
|
|
alpha_bar = tf.math.cumprod(alpha, axis=0) |
|
|
|
|
|
def forward_noise(x_0, t): |
|
|
n = tf.shape(x_0)[0] |
|
|
a_bar = tf.gather(alpha_bar, t) |
|
|
a_bar = tf.reshape(a_bar, [n, 1, 1, 1]) |
|
|
noise = tf.random.normal(shape=tf.shape(x_0), dtype=tf.float32) |
|
|
return (tf.sqrt(a_bar) * x_0) + (tf.sqrt(1.0 - a_bar) * noise), noise |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def build_unet(): |
|
|
image_input = layers.Input(shape=(IMG_SIZE, IMG_SIZE, CHANNELS)) |
|
|
time_input = layers.Input(shape=(1,)) |
|
|
|
|
|
|
|
|
t = layers.Dense(32, activation="swish")(time_input) |
|
|
t = layers.Reshape((1, 1, 32))(t) |
|
|
|
|
|
|
|
|
x = layers.Conv2D(32, 3, padding="same", activation="swish")(image_input) |
|
|
x = layers.Add()([x, t]) |
|
|
|
|
|
|
|
|
x1 = layers.Conv2D(64, 3, strides=2, padding="same", activation="swish")(x) |
|
|
x2 = layers.Conv2D(128, 3, strides=2, padding="same", activation="swish")(x1) |
|
|
|
|
|
|
|
|
x3 = layers.Conv2D(256, 3, padding="same", activation="swish")(x2) |
|
|
|
|
|
|
|
|
x = layers.Conv2DTranspose(128, 3, strides=2, padding="same", activation="swish")(x3) |
|
|
x = layers.Conv2DTranspose(64, 3, strides=2, padding="same", activation="swish")(x) |
|
|
x = layers.Conv2D(32, 3, padding="same", activation="swish")(x) |
|
|
|
|
|
outputs = layers.Conv2D(CHANNELS, 1, padding="same")(x) |
|
|
return keras.Model([image_input, time_input], outputs) |
|
|
|
|
|
model = build_unet() |
|
|
optimizer = keras.optimizers.Adam(learning_rate=1e-4) |
|
|
loss_fn = keras.losses.MeanSquaredError() |
|
|
|
|
|
if os.path.exists(WEIGHTS_FILE): |
|
|
try: |
|
|
model.load_weights(WEIGHTS_FILE) |
|
|
print("HQ Brain Loaded!") |
|
|
except: pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def train_model(epochs): |
|
|
yield "Generating HD Data (Math)..." |
|
|
x_train = generate_veda_patterns(num_images=600) |
|
|
dataset = tf.data.Dataset.from_tensor_slices(x_train).batch(BATCH_SIZE).shuffle(600) |
|
|
|
|
|
yield "Dataset Ready. Training..." |
|
|
|
|
|
for epoch in range(int(epochs)): |
|
|
total_loss = 0 |
|
|
steps = 0 |
|
|
for batch in dataset: |
|
|
batch_size = tf.shape(batch)[0] |
|
|
t = tf.random.uniform([batch_size], minval=0, maxval=TIMESTEPS, dtype=tf.int32) |
|
|
noisy_images, noise = forward_noise(batch, t) |
|
|
|
|
|
with tf.GradientTape() as tape: |
|
|
pred_noise = model([noisy_images, t], training=True) |
|
|
loss = loss_fn(noise, pred_noise) |
|
|
|
|
|
grads = tape.gradient(loss, model.trainable_variables) |
|
|
optimizer.apply_gradients(zip(grads, model.trainable_variables)) |
|
|
total_loss += loss |
|
|
steps += 1 |
|
|
|
|
|
model.save_weights(WEIGHTS_FILE) |
|
|
yield f"Epoch {epoch+1}/{epochs} - Loss: {total_loss/steps:.4f}" |
|
|
|
|
|
yield "Training Complete." |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_art(): |
|
|
img = tf.random.normal((1, IMG_SIZE, IMG_SIZE, CHANNELS), dtype=tf.float32) |
|
|
|
|
|
for i in range(TIMESTEPS - 1, 0, -1): |
|
|
t = tf.fill([1], i) |
|
|
pred_noise = model([img, t], training=False) |
|
|
|
|
|
alpha_t = tf.gather(alpha, i) |
|
|
alpha_bar_t = tf.gather(alpha_bar, i) |
|
|
beta_t = tf.gather(beta, i) |
|
|
|
|
|
term1 = 1.0 / tf.sqrt(alpha_t) |
|
|
term2 = (1.0 - alpha_t) / tf.sqrt(1.0 - alpha_bar_t) |
|
|
|
|
|
img = term1 * (img - term2 * pred_noise) |
|
|
|
|
|
if i > 1: |
|
|
z = tf.random.normal((1, IMG_SIZE, IMG_SIZE, CHANNELS), dtype=tf.float32) |
|
|
img = img + (tf.sqrt(beta_t) * z) |
|
|
|
|
|
|
|
|
img = tf.clip_by_value(img, 0.0, 1.0) |
|
|
img = img[0].numpy() |
|
|
img = (img * 255).astype(np.uint8) |
|
|
|
|
|
|
|
|
|
|
|
pil_img = Image.fromarray(img) |
|
|
pil_img = pil_img.resize((512, 512), Image.LANCZOS) |
|
|
|
|
|
return pil_img |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_training_wrapper(): |
|
|
for update in train_model(10): |
|
|
yield update |
|
|
|
|
|
with gr.Blocks(title="Akasha Art Engine") as demo: |
|
|
gr.Markdown("# Akasha Art Engine (High Quality)") |
|
|
gr.Markdown("Generating 64x64 math patterns and upscaling to 512x512.") |
|
|
|
|
|
with gr.Tab("Dream"): |
|
|
gen_btn = gr.Button("Generate Energy Pattern", variant="primary") |
|
|
out_img = gr.Image(label="Veda Dream") |
|
|
gen_btn.click(generate_art, outputs=out_img) |
|
|
|
|
|
with gr.Tab("Train"): |
|
|
train_btn = gr.Button("Train AI (10 Epochs - Takes ~5 mins)") |
|
|
log = gr.Textbox(label="Status") |
|
|
train_btn.click(run_training_wrapper, outputs=log) |
|
|
|
|
|
demo.launch() |