alomari7 commited on
Commit
0d67961
·
verified ·
1 Parent(s): f0fb06d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # app.py (English LTR Version)
2
 
3
  import gradio as gr
4
  import numpy as np
@@ -11,10 +11,12 @@ import time
11
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
12
  DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32
13
 
14
- MODEL_ID = "YourUsername/Takween-v1" # IMPORTANT: Replace with your model's name on Hugging Face
 
15
  BASE_MODEL_ID = "runwayml/stable-diffusion-v1-5"
16
  MAX_SEED = np.iinfo(np.int32).max
17
 
 
18
  LOGO_SVG = """
19
  <svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
20
  <path d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2z"></path>
@@ -33,9 +35,9 @@ except Exception:
33
 
34
  pipe = pipe.to(DEVICE)
35
 
36
- # --- 3. Professional Theme ---
37
  theme = gr.themes.Base(
38
- primary_hue=gr.themes.colors.purple,
39
  secondary_hue=gr.themes.colors.neutral,
40
  font=[gr.themes.GoogleFont("IBM Plex Sans"), "system-ui", "sans-serif"],
41
  ).set(
@@ -54,11 +56,13 @@ def infer(prompt, negative_prompt, guidance_scale, num_inference_steps, seed, ra
54
 
55
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
56
 
 
57
  yield {
58
  output_image: gr.update(value=None, interactive=False, visible=True),
59
  run_button: gr.update(interactive=False, value="Generating..."),
60
  }
61
 
 
62
  image = pipe(
63
  prompt=prompt,
64
  negative_prompt=negative_prompt,
@@ -67,6 +71,7 @@ def infer(prompt, negative_prompt, guidance_scale, num_inference_steps, seed, ra
67
  generator=generator,
68
  ).images[0]
69
 
 
70
  yield {
71
  output_image: gr.update(value=image, interactive=True),
72
  output_seed: gr.update(value=seed),
 
1
+ # app.py (النسخة النهائية - بالثيم الذهبي)
2
 
3
  import gradio as gr
4
  import numpy as np
 
11
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
12
  DTYPE = torch.float16 if torch.cuda.is_available() else torch.float32
13
 
14
+ # IMPORTANT: Replace "YourUsername/Takween-v1" with your model's name on Hugging Face
15
+ MODEL_ID = "YourUsername/Takween-v1"
16
  BASE_MODEL_ID = "runwayml/stable-diffusion-v1-5"
17
  MAX_SEED = np.iinfo(np.int32).max
18
 
19
+ # Project Logo (Embedded SVG)
20
  LOGO_SVG = """
21
  <svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
22
  <path d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2z"></path>
 
35
 
36
  pipe = pipe.to(DEVICE)
37
 
38
+ # --- 3. Professional Theme (Golden Version) ---
39
  theme = gr.themes.Base(
40
+ primary_hue=gr.themes.colors.amber, # Golden Theme Primary Color
41
  secondary_hue=gr.themes.colors.neutral,
42
  font=[gr.themes.GoogleFont("IBM Plex Sans"), "system-ui", "sans-serif"],
43
  ).set(
 
56
 
57
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
58
 
59
+ # Update UI to show loading state
60
  yield {
61
  output_image: gr.update(value=None, interactive=False, visible=True),
62
  run_button: gr.update(interactive=False, value="Generating..."),
63
  }
64
 
65
+ # Generate the image
66
  image = pipe(
67
  prompt=prompt,
68
  negative_prompt=negative_prompt,
 
71
  generator=generator,
72
  ).images[0]
73
 
74
+ # Update UI with the final result
75
  yield {
76
  output_image: gr.update(value=image, interactive=True),
77
  output_seed: gr.update(value=seed),