File size: 5,727 Bytes
9fbc9a0
cafa63d
a436e48
 
 
 
cafa63d
120deb9
cafa63d
f0fb06d
120deb9
 
a436e48
0d67961
cafa63d
a436e48
 
120deb9
 
 
 
 
 
 
 
f0fb06d
120deb9
 
 
 
 
e3de1b9
120deb9
 
 
0d67961
120deb9
9fbc9a0
120deb9
 
 
 
 
 
 
 
 
 
 
9fbc9a0
120deb9
a436e48
 
120deb9
9fbc9a0
 
 
 
 
 
 
120deb9
 
 
f0fb06d
120deb9
 
a436e48
 
 
 
cafa63d
a436e48
 
120deb9
 
 
 
f0fb06d
120deb9
 
f0fb06d
120deb9
 
f0fb06d
 
120deb9
 
 
f0fb06d
 
 
 
 
120deb9
f0fb06d
 
 
9fbc9a0
120deb9
f0fb06d
 
120deb9
f0fb06d
120deb9
f0fb06d
 
 
 
f46b4ed
f0fb06d
 
 
 
 
 
 
 
120deb9
 
09dbcb2
9fbc9a0
120deb9
a436e48
120deb9
 
a436e48
 
f0fb06d
a436e48
cafa63d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# app.py (النسخة النهائية - مع تصحيح SyntaxError الثاني)

import gradio as gr
import numpy as np
import random
import torch
from diffusers import DiffusionPipeline
import time

# --- 1. Settings and Constants ---
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32

MODEL_ID = "YourUsername/Takween-v1" 
BASE_MODEL_ID = "runwayml/stable-diffusion-v1-5"
MAX_SEED = np.iinfo(np.int32).max

LOGO_SVG = """
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
  <path d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2z"></path>
  <path d="M7 7h10v2H7z"></path>
  <path d="M12 7v10"></path>
</svg>
"""

# --- 2. Model Loading ---
try:
    pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=DTYPE, safety_checker=None)
    print(f"✅ Trained model '{MODEL_ID}' loaded successfully.")
except Exception:
    print(f"❌ Could not load trained model '{MODEL_ID}'. Loading base model.")
    pipe = DiffusionPipeline.from_pretrained(BASE_MODEL_ID, torch_dtype=DTYPE, safety_checker=None)

pipe = pipe.to(DEVICE)

# --- 3. Professional Theme (Golden Version) ---
theme = gr.themes.Base(
    primary_hue=gr.themes.colors.amber,
    secondary_hue=gr.themes.colors.neutral,
    font=[gr.themes.GoogleFont("IBM Plex Sans"), "system-ui", "sans-serif"],
).set(
    body_background_fill="*neutral_50",
    block_background_fill="white",
    block_border_width="1px",
    block_shadow="*shadow_drop_lg",
    button_primary_background_fill="*primary_500",
    button_primary_background_fill_hover="*primary_600",
)

# --- 4. Inference Function with UI Updates (Corrected) ---
def infer(prompt, negative_prompt, guidance_scale, num_inference_steps, seed, randomize_seed):
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    
    # =======================================================
    # <<< تم تعديل هذا الجزء لحل مشكلة SyntaxError >>>
    # الخطوة 1: إنشاء المولد على الجهاز الصحيح
    generator = torch.Generator(device=DEVICE)
    # الخطوة 2: تحديد البذرة للمولد
    generator.manual_seed(seed)
    # =======================================================
    
    yield {
        output_image: gr.update(value=None, interactive=False, visible=True),
        run_button: gr.update(interactive=False, value="Generating..."),
    }
    
    image = pipe(
        prompt=prompt,
        negative_prompt=negative_prompt,
        guidance_scale=guidance_scale,
        num_inference_steps=int(num_inference_steps),
        generator=generator,
    ).images[0]
    
    yield {
        output_image: gr.update(value=image, interactive=True),
        output_seed: gr.update(value=seed),
        run_button: gr.update(interactive=True, value="Generate Again"),
    }

# --- 5. Professional UI Layout ---
with gr.Blocks(theme=theme, css="#footer {text-align: center;}") as demo:
    with gr.Row():
        gr.HTML(f"<div style='display: flex; align-items: center; gap: 12px;'>{LOGO_SVG}<h1>Takween Project</h1></div>")
    gr.Markdown("#### A specialized model for generating precise geometric images from text descriptions.")
    gr.HTML("<hr>")
    with gr.Row():
        with gr.Column(scale=1):
            prompt = gr.Textbox(label="Prompt", placeholder="A red circle with thick black borders...", lines=3)
            negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Low quality, blurry, distorted...")
            with gr.Accordion("Advanced Settings", open=False):
                guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, value=7.5, step=0.1)
                num_inference_steps = gr.Slider(label="Number of Steps", minimum=10, maximum=100, value=30, step=1)
                with gr.Row():
                    seed = gr.Number(label="Seed", value=0, precision=0)
                    randomize_seed = gr.Checkbox(label="Randomize", value=True)
            run_button = gr.Button("Generate Image", variant="primary")
            gr.Examples(examples=["A filled red circle with a thick black border", "An outline blue triangle positioned to the left of a yellow square", "A green star overlapping a purple rectangle"], inputs=[prompt])
        with gr.Column(scale=2):
            output_image = gr.Image(label="Generated Image", interactive=False, height=512)
            output_seed = gr.Textbox(label="Seed Used", interactive=False)
    gr.HTML("<hr>")
    with gr.Accordion("Team and Acknowledgments", open=False):
        gr.Markdown("""
        <div style='text-align: left;'>
            <h4><b>Development Team:</b></h4>
            <ul>
                <li>Osama Saeed</li>
                <li>Tareq Al-Omari</li>
            </ul>
            <hr>
            <h4><b>Special Thanks:</b></h4>
            <p>We extend our sincere gratitude for the guidance and support of:</p>
            <ul>
                <li><b>Dr. Akram Al-Sabari</b> (Professor of AI and Machine Learning)</li>
                <li><b>Eng. Faten Al-Hayafi</b> (Practical Side Instructor)</li>
            </ul>
        </div>
        """)
    gr.Markdown("<p id='footer'>© 2025 Takween Project. Developed by Osama Saeed & Tareq Al-Omari. All rights reserved.</p>")
    
    run_button.click(
        fn=infer,
        inputs=[prompt, negative_prompt, guidance_scale, num_inference_steps, seed, randomize_seed],
        outputs=[output_image, output_seed, run_button],
    )

# --- 6. Launch the App ---
if __name__ == "__main__":
    demo.launch()