Maryamm commited on
Commit
17088ae
·
verified ·
1 Parent(s): a241a9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -142
app.py CHANGED
@@ -1,9 +1,6 @@
1
  import os
2
- import gradio as gr
3
- import numpy as np
4
- import random
5
  from diffusers import DiffusionPipeline
6
- import torch
7
 
8
  # Retrieve the Hugging Face token from the environment variable
9
  hf_token = os.getenv("HUGGINGFACE_TOKEN")
@@ -12,147 +9,19 @@ hf_token = os.getenv("HUGGINGFACE_TOKEN")
12
  if hf_token is None:
13
  raise EnvironmentError("Hugging Face token not found in environment variables. Make sure it's correctly set.")
14
 
15
- # Set device to CPU or GPU based on availability
16
- device = "cuda" if torch.cuda.is_available() else "cpu"
17
-
18
  # Load the model
19
- if torch.cuda.is_available():
20
- torch.cuda.max_memory_allocated(device=device)
21
  pipe = DiffusionPipeline.from_pretrained(
22
- "Maryamm/Lora_finetune_mnist",
23
- torch_dtype=torch.float16,
24
- variant="fp16",
25
- use_safetensors=True,
26
- use_auth_token=hf_token # Use token to authenticate
27
  )
28
- pipe.enable_xformers_memory_efficient_attention()
29
- pipe = pipe.to(device)
30
- else:
31
- pipe = DiffusionPipeline.from_pretrained(
32
- "Maryamm/Lora_finetune_mnist",
33
- use_safetensors=True,
34
- use_auth_token=hf_token # Use token to authenticate
35
- )
36
- pipe = pipe.to(device)
37
-
38
- MAX_SEED = np.iinfo(np.int32).max
39
- MAX_IMAGE_SIZE = 512
40
 
41
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
42
- if randomize_seed:
43
- seed = random.randint(0, MAX_SEED)
44
-
45
- generator = torch.Generator(device=device).manual_seed(seed)
46
-
47
- image = pipe(
48
- prompt=prompt,
49
- negative_prompt=negative_prompt,
50
- guidance_scale=guidance_scale,
51
- num_inference_steps=num_inference_steps,
52
- width=width,
53
- height=height,
54
- generator=generator
55
- ).images[0]
56
-
57
  return image
58
 
59
- examples = [
60
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
61
- "An astronaut riding a green horse",
62
- "A delicious ceviche cheesecake slice",
63
- ]
64
-
65
- css = """
66
- #col-container {
67
- margin: 0 auto;
68
- max-width: 520px;
69
- }
70
- """
71
-
72
- power_device = "GPU" if torch.cuda.is_available() else "CPU"
73
-
74
- with gr.Blocks(css=css) as demo:
75
-
76
- with gr.Column(elem_id="col-container"):
77
- gr.Markdown(f"""
78
- # Text-to-Image Gradio Template
79
- Currently running on {power_device}.
80
- """)
81
-
82
- with gr.Row():
83
- prompt = gr.Text(
84
- label="Prompt",
85
- show_label=False,
86
- max_lines=1,
87
- placeholder="Enter your prompt",
88
- container=False,
89
- )
90
-
91
- run_button = gr.Button("Run", scale=0)
92
-
93
- result = gr.Image(label="Result", show_label=False)
94
-
95
- with gr.Accordion("Advanced Settings", open=False):
96
- negative_prompt = gr.Text(
97
- label="Negative prompt",
98
- max_lines=1,
99
- placeholder="Enter a negative prompt",
100
- visible=False,
101
- )
102
-
103
- seed = gr.Slider(
104
- label="Seed",
105
- minimum=0,
106
- maximum=MAX_SEED,
107
- step=1,
108
- value=0,
109
- )
110
-
111
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
112
-
113
- with gr.Row():
114
- width = gr.Slider(
115
- label="Width",
116
- minimum=256,
117
- maximum=MAX_IMAGE_SIZE,
118
- step=32,
119
- value=512,
120
- )
121
-
122
- height = gr.Slider(
123
- label="Height",
124
- minimum=256,
125
- maximum=MAX_IMAGE_SIZE,
126
- step=32,
127
- value=512,
128
- )
129
-
130
- with gr.Row():
131
- guidance_scale = gr.Slider(
132
- label="Guidance scale",
133
- minimum=0.0,
134
- maximum=10.0,
135
- step=0.1,
136
- value=0.0,
137
- )
138
-
139
- num_inference_steps = gr.Slider(
140
- label="Number of inference steps",
141
- minimum=1,
142
- maximum=12,
143
- step=1,
144
- value=2,
145
- )
146
-
147
- gr.Examples(
148
- examples=examples,
149
- inputs=[prompt]
150
- )
151
-
152
- run_button.click(
153
- fn=infer,
154
- inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
155
- outputs=[result]
156
- )
157
-
158
- demo.queue().launch()
 
1
  import os
 
 
 
2
  from diffusers import DiffusionPipeline
3
+ import gradio as gr
4
 
5
  # Retrieve the Hugging Face token from the environment variable
6
  hf_token = os.getenv("HUGGINGFACE_TOKEN")
 
9
  if hf_token is None:
10
  raise EnvironmentError("Hugging Face token not found in environment variables. Make sure it's correctly set.")
11
 
 
 
 
12
  # Load the model
13
+ try:
 
14
  pipe = DiffusionPipeline.from_pretrained(
15
+ "Maryamm/Lora_finetune_mnist", # Your private model's path
16
+ use_auth_token=hf_token,
17
+ force_download=True # Ensure it downloads the model
 
 
18
  )
19
+ pipe = pipe.to("cpu") # Ensure it's running on CPU
20
+ except Exception as e:
21
+ raise RuntimeError(f"Failed to load model: {e}")
 
 
 
 
 
 
 
 
 
22
 
23
+ def infer(prompt):
24
+ image = pipe(prompt).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  return image
26
 
27
+ gr.Interface(fn=infer, inputs="text", outputs="image").launch()