arshadrana commited on
Commit
2b2ae4f
·
verified ·
1 Parent(s): 74e7283

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -20
app.py CHANGED
@@ -1,27 +1,47 @@
1
- import gradio as gr
2
- import requests
3
- import io
4
  import os
5
- from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
8
- headers = {"Authorization": f"Bearer {os.getenv('HF_AUTH_TOKEN')}"}
 
 
 
 
 
 
9
 
10
- def query_image(prompt):
11
- payload = {"inputs": prompt}
12
- response = requests.post(API_URL, headers=headers, json=payload)
13
- image_bytes = response.content
14
- image = Image.open(io.BytesIO(image_bytes))
15
  return image
16
 
17
- # Gradio interface
18
- iface = gr.Interface(
19
- fn=query_image,
20
- inputs=gr.Textbox(label="Enter Prompt", placeholder="Give me lodhrangpt image"),
21
- outputs=gr.Image(label="Generated Image"),
22
- title="Image Generator",
23
- description="Enter a prompt to generate an image using the FLUX model."
24
  )
25
 
26
- # Launch the Gradio app
27
- iface.launch()
 
 
 
 
1
  import os
2
+ import torch
3
+ import gradio as gr
4
+ from diffusers import StableDiffusion3Pipeline
5
+ from huggingface_hub import login
6
+
7
+ # Load Hugging Face token from environment variable
8
+ hf_token = os.getenv("HF_TOKEN")
9
+
10
+ # Authenticate using the Hugging Face token
11
+ if hf_token is not None:
12
+ login(token=hf_token)
13
+ else:
14
+ raise ValueError("Please set your Hugging Face token as the HF_TOKEN environment variable.")
15
+
16
+ def image_generator(prompt):
17
+ device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ # Load the model using the authenticated token
19
+ pipeline = StableDiffusion3Pipeline.from_pretrained(
20
+ "stabilityai/stable-diffusion-3-medium-diffusers",
21
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
22
+ text_encoder_3=None,
23
+ tokenizer_3=None,
24
+ use_auth_token=hf_token # Use the token for authentication
25
+ )
26
+ pipeline.to(device)
27
 
28
+ image = pipeline(
29
+ prompt=prompt,
30
+ negative_prompt="blurred, ugly, watermark, low, resolution, blurry",
31
+ num_inference_steps=40,
32
+ height=1024,
33
+ width=1024,
34
+ guidance_scale=9.0
35
+ ).images[0]
36
 
 
 
 
 
 
37
  return image
38
 
39
+ interface = gr.Interface(
40
+ fn=image_generator,
41
+ inputs=gr.Textbox(lines=2, placeholder="Enter your prompt..."),
42
+ outputs=gr.Image(type="pil"),
43
+ title="Image Generator App",
44
+ description="This is a simple image generator app using HuggingFace's Stable Diffusion 3 model."
 
45
  )
46
 
47
+ interface.launch()