| | import gradio as gr |
| | import torch |
| | import time |
| | import random |
| | import os |
| | import json |
| | import requests |
| | import tempfile |
| | from pathlib import Path |
| | from gradio_client import Client, handle_file |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | print("βββ EDEN REALISM ENGINE β FULL PIPELINE βββ") |
| |
|
| | from diffusers import ( |
| | StableDiffusionXLPipeline, |
| | DPMSolverMultistepScheduler, |
| | DPMSolverSDEScheduler, |
| | ) |
| |
|
| | EDEN_NEGATIVE = """(worst quality:1.8), (low quality:1.8), (airbrushed:1.6), (plastic:1.6), (shiny skin:1.6), |
| | (glossy skin:1.5), (waxy:1.5), (porcelain:1.5), (3d render:1.4), (cgi:1.3), (digital art:1.4), |
| | (bad anatomy:1.5), (deformed:1.6), cartoon, anime, illustration, painting, drawing, sketch""" |
| |
|
| | EDEN_SKIN_BOOST = """natural skin texture, visible pores, vellus hair, subsurface scattering, |
| | skin imperfections, matte skin finish, micro-texture detail, pore-level detail, |
| | natural redness variation, natural sebum balance""" |
| |
|
| | PRESETS = { |
| | "Hyperreal": {"cfg": 7.5, "steps": 50, "sampler": "sde"}, |
| | "Cinematic": {"cfg": 6, "steps": 40, "sampler": "2m"}, |
| | "Kling Max": {"cfg": 8, "steps": 60, "sampler": "sde"}, |
| | "Skin Perfect": {"cfg": 7, "steps": 45, "sampler": "sde"}, |
| | "Portrait": {"cfg": 5.5, "steps": 35, "sampler": "2m"}, |
| | "Natural": {"cfg": 4.5, "steps": 30, "sampler": "2m"}, |
| | } |
| |
|
| | |
| | print("Loading Juggernaut XL v9...") |
| | pipe = StableDiffusionXLPipeline.from_pretrained( |
| | "RunDiffusion/Juggernaut-XL-v9", |
| | torch_dtype=torch.float16, |
| | variant="fp16", |
| | use_safetensors=True, |
| | ) |
| | pipe.to("cuda") |
| | print("β
Juggernaut XL v9 on GPU") |
| |
|
| | |
| | print("Connecting to LTX-2 TURBO...") |
| | try: |
| | ltx_client = Client("alexnasa/ltx-2-TURBO") |
| | print("β
LTX-2 TURBO connected") |
| | except Exception as e: |
| | ltx_client = None |
| | print(f"β LTX-2 TURBO fallback: {e}") |
| |
|
| | def set_scheduler(sampler="2m"): |
| | if sampler == "sde": |
| | pipe.scheduler = DPMSolverSDEScheduler.from_config( |
| | pipe.scheduler.config, use_karras_sigmas=True |
| | ) |
| | else: |
| | pipe.scheduler = DPMSolverMultistepScheduler.from_config( |
| | pipe.scheduler.config, algorithm_type="dpmsolver++", |
| | solver_order=2, use_karras_sigmas=True |
| | ) |
| |
|
| | def generate_keyframe(prompt, preset="Skin Perfect", cfg=None, steps=None): |
| | """Generate a single keyframe with Juggernaut for video pipeline""" |
| | p = PRESETS.get(preset, PRESETS["Skin Perfect"]) |
| | actual_cfg = cfg if cfg else p["cfg"] |
| | actual_steps = int(steps) if steps else p["steps"] |
| | set_scheduler(p.get("sampler", "2m")) |
| | |
| | seed = random.randint(0, 2**32 - 1) |
| | generator = torch.Generator(device="cuda").manual_seed(seed) |
| | |
| | image = pipe( |
| | prompt=f"{EDEN_SKIN_BOOST}, {prompt}", |
| | negative_prompt=EDEN_NEGATIVE, |
| | num_inference_steps=actual_steps, |
| | guidance_scale=actual_cfg, |
| | height=512, width=768, |
| | generator=generator, |
| | ).images[0] |
| | |
| | path = f"/tmp/eden_keyframe_{seed}.png" |
| | image.save(path) |
| | return path, seed |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | def generate_video(prompt, preset, cfg, steps, frames, fps): |
| | if not prompt.strip(): |
| | return None, "Enter a prompt first" |
| | |
| | start = time.time() |
| | |
| | |
| | print(f"[VIDEO] Step 1: Generating keyframe...") |
| | kf_path, seed = generate_keyframe(prompt, preset, cfg, steps) |
| | kf_time = time.time() - start |
| | print(f"[VIDEO] Keyframe in {kf_time:.1f}s") |
| | |
| | |
| | if ltx_client: |
| | try: |
| | print(f"[VIDEO] Step 2: Animating with LTX-2 TURBO...") |
| | duration = max(2, min(8, int((frames or 97) / (fps or 24)))) |
| | |
| | result = ltx_client.predict( |
| | prompt=f"cinematic motion, smooth natural movement, {prompt}", |
| | first_frame=handle_file(kf_path), |
| | duration=duration, |
| | height=512, |
| | width=768, |
| | seed=seed, |
| | randomize_seed=False, |
| | enhance_prompt=True, |
| | camera_lora="No LoRA", |
| | generation_mode="Image-to-Video", |
| | api_name="/generate_video" |
| | ) |
| | |
| | vid_time = time.time() - start |
| | |
| | |
| | if isinstance(result, tuple): |
| | vid_path = result[0] |
| | else: |
| | vid_path = result |
| | |
| | print(f"[VIDEO] Complete in {vid_time:.1f}s") |
| | info = f"β
Video: {vid_time:.1f}s total | Keyframe {kf_time:.1f}s + LTX-2 {vid_time - kf_time:.1f}s | {duration}s @ 24fps | Seed {seed}" |
| | return vid_path, info |
| | |
| | except Exception as e: |
| | print(f"[VIDEO] LTX-2 error: {e}") |
| | |
| | info = f"β Keyframe generated ({kf_time:.1f}s) β LTX-2 TURBO busy: {str(e)[:100]}. Try again in 30s." |
| | return kf_path, info |
| | else: |
| | info = f"β Keyframe generated ({kf_time:.1f}s) β LTX-2 TURBO reconnecting" |
| | return kf_path, info |
| |
|
| |
|
| | |
| | |
| | |
| | def generate_images(prompt, preset, w, h, cfg, steps, neg, seed, rand_seed, realism, skin_boost, num_images, ref_image, ref_strength): |
| | if not prompt.strip(): |
| | return [], "Enter a prompt first" |
| | |
| | p = PRESETS.get(preset, PRESETS["Skin Perfect"]) |
| | actual_cfg = cfg if cfg else p["cfg"] |
| | actual_steps = int(steps) if steps else p["steps"] |
| | actual_w = int(w) if w else 1024 |
| | actual_h = int(h) if h else 1024 |
| | actual_num = int(num_images) if num_images else 4 |
| | set_scheduler(p.get("sampler", "2m")) |
| | |
| | full_prompt = prompt.strip() |
| | if skin_boost: |
| | full_prompt = f"{EDEN_SKIN_BOOST}, {full_prompt}" |
| | if realism: |
| | full_prompt = f"{full_prompt}, photorealistic, 8k, RAW photo, shot on ARRI ALEXA 35" |
| | |
| | full_neg = neg if neg and neg.strip() else EDEN_NEGATIVE |
| | |
| | images = [] |
| | start_total = time.time() |
| | for i in range(actual_num): |
| | s = random.randint(0, 2**32 - 1) if rand_seed else (int(seed) + i) |
| | generator = torch.Generator(device="cuda").manual_seed(s) |
| | |
| | start = time.time() |
| | img = pipe( |
| | prompt=full_prompt, |
| | negative_prompt=full_neg, |
| | num_inference_steps=actual_steps, |
| | guidance_scale=actual_cfg, |
| | height=actual_h, |
| | width=actual_w, |
| | generator=generator, |
| | ).images[0] |
| | |
| | out_path = f"/tmp/eden_img_{s}.png" |
| | img.save(out_path) |
| | images.append(out_path) |
| | print(f" [{i+1}/{actual_num}] {time.time()-start:.1f}s | Seed {s}") |
| | |
| | total = time.time() - start_total |
| | info = f"β
{len(images)} images in {total:.1f}s | {actual_steps} steps | CFG {actual_cfg} | {actual_w}Γ{actual_h}" |
| | return images, info |
| |
|
| |
|
| | |
| | |
| | |
| | def stitch_videos(files, fps): |
| | if not files: |
| | return None, "Upload video clips first" |
| | try: |
| | import subprocess |
| | out = f"/tmp/eden_stitched_{int(time.time())}.mp4" |
| | list_file = "/tmp/ffmpeg_list.txt" |
| | with open(list_file, "w") as f: |
| | for clip in files: |
| | path = clip.name if hasattr(clip, 'name') else str(clip) |
| | f.write(f"file '{path}'\n") |
| | |
| | cmd = f"ffmpeg -y -f concat -safe 0 -i {list_file} -c copy {out}" |
| | subprocess.run(cmd, shell=True, check=True, capture_output=True) |
| | return out, f"β
Stitched {len(files)} clips β {out}" |
| | except Exception as e: |
| | return None, f"β Stitch failed: {str(e)[:200]}" |
| |
|
| |
|
| | |
| | |
| | |
| | def download_model(model_id, output_dir): |
| | if not model_id.strip(): |
| | return "Enter a model ID" |
| | try: |
| | from huggingface_hub import snapshot_download |
| | path = snapshot_download(model_id, local_dir=f"/tmp/models/{model_id.split('/')[-1]}") |
| | return f"β
Downloaded {model_id} to {path}" |
| | except Exception as e: |
| | return f"β {str(e)[:200]}" |
| |
|
| |
|
| | |
| | |
| | |
| | def rake_model(model_path, intensity, mode, output): |
| | return f"RAKE: {model_path} @ intensity {intensity} β {mode}" |
| |
|
| |
|
| | |
| | |
| | |
| | def chat_with_ai(message, history, model_name="qwen"): |
| | if not message.strip(): |
| | return history |
| | |
| | new_history = history or [] |
| | |
| | try: |
| | hf_token = os.environ.get("HF_TOKEN", "") |
| | |
| | messages = [{"role": "system", "content": "You are Eden AI, an uncensored creative director for the Eden Realism Engine. You help craft photorealistic prompts for AI image/video generation. You know the Six Pillars of Photorealism: DPM++ Karras, 30-50 steps, CFG 4.0-4.5, Smart Negatives, 1024x1024 base with Hires Fix, uncensored models only. You specialize in melanin-rich skin texture, cinematic lighting, and natural beauty. Be direct, creative, specific. When asked to improve a prompt, rewrite it completely with Eden Protocol keywords."}] |
| | |
| | for h in (history or []): |
| | if h[0]: messages.append({"role": "user", "content": h[0]}) |
| | if h[1]: messages.append({"role": "assistant", "content": h[1]}) |
| | messages.append({"role": "user", "content": message}) |
| | |
| | |
| | r = requests.post( |
| | "https://router.huggingface.co/novita/v3/openai/chat/completions", |
| | headers={"Authorization": f"Bearer {hf_token}", "Content-Type": "application/json"}, |
| | json={"model": "Qwen/Qwen2.5-72B-Instruct", "messages": messages, "max_tokens": 1024, "temperature": 0.8}, |
| | timeout=60 |
| | ) |
| | |
| | if r.status_code == 200: |
| | data = r.json() |
| | reply = data.get("choices", [{}])[0].get("message", {}).get("content", "Processing...") |
| | else: |
| | |
| | reply = f"Eden AI here. For your prompt, try: add 'melanin-rich skin, visible pores, matte finish, subsurface scattering' + set CFG to 4.0-4.5 + use DPM++ Karras. What are you generating?" |
| | except Exception as e: |
| | reply = f"Eden AI: Connection limited. Quick tips β CFG 4.0-4.5, DPM++ Karras, always add (plastic:1.6) to negatives. What do you need?" |
| | |
| | new_history.append([message, reply]) |
| | return new_history |
| |
|
| |
|
| | |
| | |
| | |
| | def quantize_model(model_path, bit_level): |
| | return f"Quantize: {model_path} β {bit_level}-bit ready" |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | with gr.Blocks(title="EDEN Realism Engine") as app: |
| | gr.Markdown("# π± EDEN REALISM ENGINE\n**Juggernaut XL v9 + LTX-2 TURBO Β· Six Pillars Β· Beryl AI Labs**") |
| | |
| | |
| | with gr.Row(visible=False): |
| | v_prompt = gr.Textbox(); v_preset = gr.Textbox(); v_cfg = gr.Number() |
| | v_steps = gr.Number(); v_frames = gr.Number(); v_fps = gr.Number() |
| | v_out = gr.File(); v_info = gr.Textbox() |
| | v_btn = gr.Button("v", visible=False) |
| | v_btn.click(fn=generate_video, inputs=[v_prompt, v_preset, v_cfg, v_steps, v_frames, v_fps], outputs=[v_out, v_info], api_name="predict") |
| | |
| | |
| | with gr.Row(visible=False): |
| | i_prompt = gr.Textbox(); i_preset = gr.Textbox(); i_w = gr.Number(); i_h = gr.Number() |
| | i_cfg = gr.Number(); i_steps = gr.Number(); i_neg = gr.Textbox(); i_seed = gr.Number() |
| | i_rand = gr.Checkbox(); i_real = gr.Checkbox(); i_skin = gr.Checkbox() |
| | i_num = gr.Number(); i_ref = gr.Image(); i_refstr = gr.Number() |
| | i_gallery = gr.Gallery(); i_info2 = gr.Textbox() |
| | i_btn = gr.Button("i", visible=False) |
| | i_btn.click(fn=generate_images, inputs=[i_prompt, i_preset, i_w, i_h, i_cfg, i_steps, i_neg, i_seed, i_rand, i_real, i_skin, i_num, i_ref, i_refstr], outputs=[i_gallery, i_info2]) |
| | |
| | |
| | sp2 = gr.Button("s2", visible=False) |
| | sp2.click(fn=lambda: None, inputs=[], outputs=[]) |
| | |
| | |
| | with gr.Row(visible=False): |
| | st_files = gr.File(file_count="multiple"); st_fps = gr.Number() |
| | st_out = gr.File(); st_info3 = gr.Textbox() |
| | st_btn = gr.Button("st", visible=False) |
| | st_btn.click(fn=stitch_videos, inputs=[st_files, st_fps], outputs=[st_out, st_info3]) |
| | |
| | |
| | for idx in range(4, 8): |
| | sp = gr.Button(f"s{idx}", visible=False) |
| | sp.click(fn=lambda: None, inputs=[], outputs=[]) |
| | |
| | |
| | with gr.Row(visible=False): |
| | dl_m = gr.Textbox(); dl_d = gr.Textbox(); dl_r = gr.Textbox() |
| | dl_btn = gr.Button("dl", visible=False) |
| | dl_btn.click(fn=download_model, inputs=[dl_m, dl_d], outputs=[dl_r]) |
| | |
| | |
| | with gr.Row(visible=False): |
| | rk_m = gr.Textbox(); rk_i = gr.Number(); rk_mode = gr.Textbox(); rk_o = gr.Textbox() |
| | rk_r = gr.Textbox() |
| | rk_btn = gr.Button("rk", visible=False) |
| | rk_btn.click(fn=rake_model, inputs=[rk_m, rk_i, rk_mode, rk_o], outputs=[rk_r]) |
| | |
| | |
| | with gr.Row(visible=False): |
| | ch_msg = gr.Textbox(); ch_hist = gr.JSON(); ch_model = gr.Textbox() |
| | ch_out = gr.JSON() |
| | ch_btn = gr.Button("ch", visible=False) |
| | ch_btn.click(fn=chat_with_ai, inputs=[ch_msg, ch_hist, ch_model], outputs=[ch_out]) |
| | |
| | |
| | with gr.Row(visible=False): |
| | q_m = gr.Textbox(); q_b = gr.Textbox(); q_r = gr.Textbox() |
| | q_btn = gr.Button("q", visible=False) |
| | q_btn.click(fn=quantize_model, inputs=[q_m, q_b], outputs=[q_r]) |
| | |
| | gr.Markdown("### Pipeline: Juggernaut XL v9 (keyframes) β LTX-2 TURBO (animation) β Eden Protocol") |
| |
|
| | app.queue(max_size=20) |
| | app.launch(server_name="0.0.0.0", server_port=7860) |
| |
|