Aklavya commited on
Commit
a1de646
·
verified ·
1 Parent(s): 9dff619

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -61
app.py CHANGED
@@ -4,61 +4,32 @@ import uuid
4
  import gradio as gr
5
  import numpy as np
6
  from PIL import Image
7
- import spaces
8
  import torch
9
- from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
 
10
  from typing import Tuple
11
 
12
- def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
13
- styles = {
14
- "3840 x 2160": (
15
- "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
16
- "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
17
- ),
18
- "Style Zero": ("{prompt}", ""),
19
- }
20
- DEFAULT_STYLE_NAME = "3840 x 2160"
21
-
22
- p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
23
- return p.replace("{prompt}", positive), n + negative
24
-
25
- def load_and_prepare_model(model_choice: str):
26
- if model_choice == "DeepFloyd/IF-I-XL-v1.0":
27
- model_id = "DeepFloyd/IF-I-XL-v1.0"
28
- pipe = StableDiffusionPipeline.from_pretrained(
29
- model_id,
30
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
31
- use_safetensors=True,
32
- add_watermarker=False,
33
- ).to(torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))
34
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
35
- else:
36
- # Default to RealVisXL_V5.0_Lightning if no choice is made or another option is selected
37
- model_id = "SG161222/RealVisXL_V5.0_Lightning"
38
- pipe = StableDiffusionPipeline.from_pretrained(
39
- model_id,
40
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
41
- use_safetensors=True,
42
- add_watermarker=False,
43
- ).to(torch.device("cuda:0" if torch.cuda.is_available() else "cpu"))
44
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
45
 
46
- return pipe
 
47
 
48
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
49
- if randomize_seed:
50
- seed = random.randint(0, np.iinfo(np.int32).max)
51
- return seed
 
 
 
 
52
 
53
- def save_image(img):
54
- unique_name = str(uuid.uuid4()) + ".png"
55
- img.save(unique_name)
56
- return unique_name
57
 
58
- @spaces.GPU(duration=60, enable_queue=True)
59
  def generate(
60
  prompt: str,
61
- model_choice: str = "DeepFloyd/IF-I-XL-v1.0", # Default model choice
62
  seed: int = 1,
63
  width: int = 1024,
64
  height: int = 1024,
@@ -66,9 +37,28 @@ def generate(
66
  num_inference_steps: int = 25,
67
  randomize_seed: bool = False,
68
  ):
69
- model = load_and_prepare_model(model_choice) # Load the selected model
70
- seed = int(randomize_seed_fn(seed, randomize_seed))
71
- generator = torch.Generator(device=model.device).manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  positive_prompt, negative_prompt = apply_style("3840 x 2160", prompt)
74
 
@@ -83,36 +73,34 @@ def generate(
83
  "output_type": "pil",
84
  }
85
 
 
86
  images = model(**options).images
87
- image_paths = [save_image(img) for img in images]
88
- return image_paths, seed
89
 
 
90
  with gr.Blocks(theme="soft") as demo:
91
  # Centered text "SNAPSCRIBE" at the top of the screen
92
  gr.Markdown("<h1 style='text-align:center; color:white; font-weight:bold; text-decoration:underline;'>SNAPSCRIBE</h1>")
93
 
94
  with gr.Row():
95
  with gr.Column(scale=3):
96
- # Dropdown for model selection
97
- model_dropdown = gr.Dropdown(
98
- label="Select Model",
99
- choices=["DeepFloyd/IF-I-XL-v1.0", "SG161222/RealVisXL_V5.0_Lightning"],
100
- value="DeepFloyd/IF-I-XL-v1.0", # Default model choice
101
- )
102
- # Input Prompt text field
103
  prompt = gr.Textbox(
104
  label="Input Prompt",
105
  placeholder="Describe the image you want to create",
106
  lines=2,
107
  )
 
 
 
 
 
108
  run_button = gr.Button("Generate Image")
109
- gr.Markdown("Developed using the DeepFloyd/IF-I-XL-v1.0 or RealVisXL_V5.0_Lightning model.", elem_id="model_info")
110
  with gr.Column(scale=7):
111
- result = gr.Image(label="Generated Image", type="filepath")
112
 
113
  run_button.click(
114
  fn=generate,
115
- inputs=[prompt, model_dropdown],
116
  outputs=[result],
117
  )
118
 
 
4
  import gradio as gr
5
  import numpy as np
6
  from PIL import Image
 
7
  import torch
8
+ from huggingface_hub import login
9
+ from diffusers import StableDiffusionPipeline
10
  from typing import Tuple
11
 
12
+ # Use the Hugging Face token from environment variable
13
+ token = os.getenv("HF_TOKEN") # Get token from environment variable
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ # Log in to Hugging Face using the token
16
+ login(token=token)
17
 
18
+ # Function to load and prepare the model
19
+ def load_and_prepare_model(model_id: str):
20
+ # Load the model using the Stable Diffusion pipeline
21
+ pipe = StableDiffusionPipeline.from_pretrained(
22
+ model_id,
23
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
24
+ use_safetensors=True
25
+ ).to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
26
 
27
+ return pipe
 
 
 
28
 
29
+ # Function to generate the image
30
  def generate(
31
  prompt: str,
32
+ model_choice: str,
33
  seed: int = 1,
34
  width: int = 1024,
35
  height: int = 1024,
 
37
  num_inference_steps: int = 25,
38
  randomize_seed: bool = False,
39
  ):
40
+ # Set the seed
41
+ if randomize_seed:
42
+ seed = random.randint(0, np.iinfo(np.int32).max)
43
+
44
+ generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu").manual_seed(seed)
45
+
46
+ # Choose model based on the user's selection
47
+ model_id = "DeepFloyd/IF-I-XL-v1.0" if model_choice == "DeepFloyd/IF-I-XL-v1.0" else "RealVisXL_V5.0_Lightning"
48
+ model = load_and_prepare_model(model_id)
49
+
50
+ # Style functions (if needed)
51
+ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
52
+ styles = {
53
+ "3840 x 2160": (
54
+ "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
55
+ "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
56
+ ),
57
+ "Style Zero": ("{prompt}", ""),
58
+ }
59
+ DEFAULT_STYLE_NAME = "3840 x 2160"
60
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
61
+ return p.replace("{prompt}", positive), n + negative
62
 
63
  positive_prompt, negative_prompt = apply_style("3840 x 2160", prompt)
64
 
 
73
  "output_type": "pil",
74
  }
75
 
76
+ # Generate the image
77
  images = model(**options).images
78
+ return images[0]
 
79
 
80
+ # Gradio interface setup
81
  with gr.Blocks(theme="soft") as demo:
82
  # Centered text "SNAPSCRIBE" at the top of the screen
83
  gr.Markdown("<h1 style='text-align:center; color:white; font-weight:bold; text-decoration:underline;'>SNAPSCRIBE</h1>")
84
 
85
  with gr.Row():
86
  with gr.Column(scale=3):
 
 
 
 
 
 
 
87
  prompt = gr.Textbox(
88
  label="Input Prompt",
89
  placeholder="Describe the image you want to create",
90
  lines=2,
91
  )
92
+ # Drop down to select model
93
+ model_choice = gr.Dropdown(
94
+ choices=["DeepFloyd/IF-I-XL-v1.0", "RealVisXL_V5.0_Lightning"],
95
+ label="Choose a Model"
96
+ )
97
  run_button = gr.Button("Generate Image")
 
98
  with gr.Column(scale=7):
99
+ result = gr.Image(label="Generated Image", type="pil")
100
 
101
  run_button.click(
102
  fn=generate,
103
+ inputs=[prompt, model_choice],
104
  outputs=[result],
105
  )
106