LEIDIA commited on
Commit
a7b3e91
verified
1 Parent(s): 470fdab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -14
app.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
@@ -5,10 +7,16 @@ import random
5
  # import spaces #[uncomment to use ZeroGPU]
6
  from diffusers import DiffusionPipeline
7
  from diffusers import StableDiffusionPipeline
 
8
  import torch
9
 
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
- model_repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" # Replace to the model you would like to use
 
 
 
 
12
 
13
  if torch.cuda.is_available():
14
  torch_dtype = torch.float16
@@ -21,6 +29,7 @@ pipe = pipe.to(device)
21
  MAX_SEED = np.iinfo(np.int32).max
22
  MAX_IMAGE_SIZE = 752
23
 
 
24
  from datasets import load_dataset, Dataset
25
 
26
  dataset = load_dataset("LEIDIA/Data_Womleimg") # Exemplo do seu dataset no Hugging Face
@@ -49,6 +58,16 @@ descriptions = [
49
  ]
50
 
51
 
 
 
 
 
 
 
 
 
 
 
52
  # @spaces.GPU #[uncomment to use ZeroGPU]
53
  def infer(
54
  prompt,
@@ -90,18 +109,20 @@ css = """
90
  }
91
  """
92
 
93
- with gr.Blocks(css=css) as demo:
94
- with gr.Column(elem_id="col-container"):
95
- gr.Markdown(" # Text-to-Image Wom_Test")
96
-
97
- with gr.Row():
98
- prompt = gr.Text(
99
- label="Prompt",
100
- show_label=False,
101
- max_lines=1,
102
- placeholder="Enter your prompt",
103
- container=False,
104
- )
 
 
105
 
106
  run_button = gr.Button("Run", scale=0, variant="primary")
107
 
@@ -156,7 +177,7 @@ with gr.Blocks(css=css) as demo:
156
  minimum=1,
157
  maximum=10,
158
  step=1,
159
- value=2, # Replace with defaults that work for your model
160
  )
161
 
162
  gr.Examples(examples=examples, inputs=[prompt])
 
1
+ pip install onnxruntime
2
+
3
  import gradio as gr
4
  import numpy as np
5
  import random
 
7
  # import spaces #[uncomment to use ZeroGPU]
8
  from diffusers import DiffusionPipeline
9
  from diffusers import StableDiffusionPipeline
10
+ from diffusers import OnnxRuntimeModel
11
  import torch
12
 
13
+
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
+ model_repo_id = "runwayml/stable-diffusion-v1-5" # Replace to the model you would like to use
16
+
17
+ pipe = OnnxRuntimeModel.from_pretrained("model_path", provider="CPUExecutionProvider")
18
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch.float16)
19
+ pipe.enable_attention_slicing() # Divide o c谩lculo de aten莽茫o para melhorar o desempenho em dispositivos com menos mem贸ria
20
 
21
  if torch.cuda.is_available():
22
  torch_dtype = torch.float16
 
29
  MAX_SEED = np.iinfo(np.int32).max
30
  MAX_IMAGE_SIZE = 752
31
 
32
+
33
  from datasets import load_dataset, Dataset
34
 
35
  dataset = load_dataset("LEIDIA/Data_Womleimg") # Exemplo do seu dataset no Hugging Face
 
58
  ]
59
 
60
 
61
+ def infer(prompt, num_inference_steps):
62
+ image = pipe(
63
+ prompt=prompt,
64
+ num_inference_steps=num_inference_steps,
65
+ height=MAX_IMAGE_SIZE,
66
+ width=MAX_IMAGE_SIZE,
67
+ ).images[0]
68
+ return image
69
+
70
+
71
  # @spaces.GPU #[uncomment to use ZeroGPU]
72
  def infer(
73
  prompt,
 
109
  }
110
  """
111
 
112
+ # Interface Gradio
113
+ with gr.Blocks() as demo:
114
+ gr.Markdown("## Text-to-Image Optimized for CPU")
115
+ with gr.Row():
116
+ prompt = gr.Textbox(label="Prompt")
117
+ num_inference_steps = gr.Slider(
118
+ label="Inference Steps", minimum=1, maximum=50, step=1, value=15
119
+ )
120
+ with gr.Row():
121
+ generate_button = gr.Button("Generate")
122
+ result = gr.Image(label="Generated Image")
123
+ generate_button.click(infer, inputs=[prompt, num_inference_steps], outputs=result)
124
+
125
+ demo.launch()
126
 
127
  run_button = gr.Button("Run", scale=0, variant="primary")
128
 
 
177
  minimum=1,
178
  maximum=10,
179
  step=1,
180
+ value=15, # Replace with defaults that work for your model
181
  )
182
 
183
  gr.Examples(examples=examples, inputs=[prompt])