speedartificialintelligence1122 commited on
Commit
814e862
·
verified ·
1 Parent(s): 171da0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -8
app.py CHANGED
@@ -3,21 +3,38 @@ from pydantic import BaseModel
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
 
 
 
 
 
 
6
  app = FastAPI()
7
 
8
- # Load model at startup
9
  pipe = StableDiffusionPipeline.from_pretrained(
10
- "runwayml/stable-diffusion-v1-5",
11
- torch_dtype=torch.float16,
12
- revision="fp16"
13
- ).to("cuda" if torch.cuda.is_available() else "cpu")
14
 
 
15
  class Prompt(BaseModel):
16
  text: str
17
 
 
 
 
 
18
  @app.post("/generate")
19
  def generate_image(prompt: Prompt):
20
  image = pipe(prompt.text).images[0]
21
- image_path = f"output.png"
22
- image.save(image_path)
23
- return {"message": "Image generated", "image_path": image_path}
 
 
 
 
 
 
 
 
 
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
 
6
+ import uuid
7
+ import base64
8
+ from io import BytesIO
9
+ from PIL import Image
10
+
11
  app = FastAPI()
12
 
13
+ # Load the model (NO fp16 issues now)
14
  pipe = StableDiffusionPipeline.from_pretrained(
15
+ "runwayml/stable-diffusion-v1-5"
16
+ )
17
+ pipe = pipe.to("cpu") # Or use .to("cuda") if you're on GPU
 
18
 
19
+ # For receiving prompts from the frontend
20
  class Prompt(BaseModel):
21
  text: str
22
 
23
+ @app.get("/")
24
+ def greet_json():
25
+ return {"message": "Text to Image generation ready!"}
26
+
27
  @app.post("/generate")
28
  def generate_image(prompt: Prompt):
29
  image = pipe(prompt.text).images[0]
30
+
31
+ # Convert image to base64 to send over HTTP
32
+ buffered = BytesIO()
33
+ image.save(buffered, format="PNG")
34
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
35
+
36
+ return {
37
+ "image_base64": img_str,
38
+ "status": "success",
39
+ "prompt": prompt.text
40
+ }