tejani commited on
Commit
59bc5c4
·
verified ·
1 Parent(s): 2c50913

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -84
app.py CHANGED
@@ -2,15 +2,11 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
  import os
 
5
 
6
  # Fetch the API token from environment variable
7
  hf_token = os.getenv("HF_TOKEN")
8
-
9
- # Load the model from Hugging Face Inference API
10
- interface = gr.load("models/ZB-Tech/Text-to-Image", token=hf_token)
11
-
12
- MAX_SEED = np.iinfo(np.int32).max
13
- MAX_IMAGE_SIZE = 1024
14
 
15
  def infer(
16
  prompt,
@@ -24,22 +20,17 @@ def infer(
24
  progress=gr.Progress(track_tqdm=True),
25
  ):
26
  if randomize_seed:
27
- seed = random.randint(0, MAX_SEED)
28
 
29
- # Call the interface with the correct key ("inputs" instead of "prompt")
30
- # Start with minimal parameters and add others if supported
31
- image = interface(inputs=prompt)
32
 
33
- # If the model supports additional parameters, you can extend this:
34
- # image = interface(
35
- # inputs=prompt,
36
- # negative_prompt=negative_prompt if negative_prompt else None,
37
- # seed=seed,
38
- # width=width,
39
- # height=height,
40
- # guidance_scale=guidance_scale,
41
- # num_inference_steps=num_inference_steps,
42
- # )
43
 
44
  return image, seed
45
 
@@ -73,73 +64,11 @@ with gr.Blocks(css=css) as demo:
73
 
74
  result = gr.Image(label="Result", show_label=False)
75
 
76
- with gr.Accordion("Advanced Settings", open=False):
77
- negative_prompt = gr.Text(
78
- label="Negative prompt",
79
- max_lines=1,
80
- placeholder="Enter a negative prompt",
81
- visible=True,
82
- )
83
-
84
- seed = gr.Slider(
85
- label="Seed",
86
- minimum=0,
87
- maximum=MAX_SEED,
88
- step=1,
89
- value=0,
90
- )
91
-
92
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
93
-
94
- with gr.Row():
95
- width = gr.Slider(
96
- label="Width",
97
- minimum=256,
98
- maximum=MAX_IMAGE_SIZE,
99
- step=32,
100
- value=512,
101
- )
102
-
103
- height = gr.Slider(
104
- label="Height",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=512,
109
- )
110
-
111
- with gr.Row():
112
- guidance_scale = gr.Slider(
113
- label="Guidance scale",
114
- minimum=0.0,
115
- maximum=10.0,
116
- step=0.1,
117
- value=1.0,
118
- )
119
-
120
- num_inference_steps = gr.Slider(
121
- label="Number of inference steps",
122
- minimum=1,
123
- maximum=50,
124
- step=1,
125
- value=10,
126
- )
127
-
128
- gr.Examples(examples=examples, inputs=[prompt])
129
  gr.on(
130
  triggers=[run_button.click, prompt.submit],
131
  fn=infer,
132
- inputs=[
133
- prompt,
134
- negative_prompt,
135
- seed,
136
- randomize_seed,
137
- width,
138
- height,
139
- guidance_scale,
140
- num_inference_steps,
141
- ],
142
- outputs=[result, seed],
143
  )
144
 
145
  if __name__ == "__main__":
 
2
  import numpy as np
3
  import random
4
  import os
5
+ import requests
6
 
7
  # Fetch the API token from environment variable
8
  hf_token = os.getenv("HF_TOKEN")
9
+ API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image"
 
 
 
 
 
10
 
11
  def infer(
12
  prompt,
 
20
  progress=gr.Progress(track_tqdm=True),
21
  ):
22
  if randomize_seed:
23
+ seed = random.randint(0, np.iinfo(np.int32).max)
24
 
25
+ headers = {"Authorization": f"Bearer {hf_token}"}
26
+ payload = {"inputs": prompt} # Start with minimal payload
27
+ response = requests.post(API_URL, headers=headers, json=payload)
28
 
29
+ if response.status_code != 200:
30
+ raise Exception(f"API Error: {response.status_code} - {response.text}")
31
+
32
+ # Assuming the response is an image (binary data)
33
+ image = response.content # Gradio will handle binary image data
 
 
 
 
 
34
 
35
  return image, seed
36
 
 
64
 
65
  result = gr.Image(label="Result", show_label=False)
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  gr.on(
68
  triggers=[run_button.click, prompt.submit],
69
  fn=infer,
70
+ inputs=[prompt],
71
+ outputs=[result],
 
 
 
 
 
 
 
 
 
72
  )
73
 
74
  if __name__ == "__main__":