Maxenoz commited on
Commit
ad79e87
·
verified ·
1 Parent(s): 64d8344

Create server.py

Browse files
Files changed (1) hide show
  1. server.py +79 -0
server.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from fastapi import FastAPI, HTTPException
3
+ from pydantic import BaseModel
4
+ import openai
5
+ import torch
6
+ from transformers import pipeline
7
+ from diffusers import StableDiffusionPipeline
8
+ from PIL import Image
9
+ import io
10
+ import base64
11
+ import soundfile as sf
12
+ import numpy as np
13
+
14
+ # Initialize APIs
15
+ openai.api_key = os.getenv("OPENAI_API_KEY")
16
+
17
+ app = FastAPI(title="Maxenoz AI Server")
18
+
19
+ # Request models
20
+ class TextRequest(BaseModel):
21
+ input_text: str
22
+
23
+ class ImageRequest(BaseModel):
24
+ prompt: str
25
+
26
+ class AudioRequest(BaseModel):
27
+ seed: int = 42
28
+
29
+ # --- Health check ---
30
+ @app.get("/")
31
+ def health():
32
+ return {"status": "OK", "message": "Server is running"}
33
+
34
+ # --- GPT Text Endpoint ---
35
+ @app.post("/ai/text")
36
+ def ai_text(request: TextRequest):
37
+ try:
38
+ response = openai.ChatCompletion.create(
39
+ model="gpt-4-turbo",
40
+ messages=[{"role": "user", "content": request.input_text}]
41
+ )
42
+ return {"output_text": response.choices[0].message.content}
43
+ except Exception as e:
44
+ raise HTTPException(status_code=500, detail=str(e))
45
+
46
+ # --- Image Generation Endpoint ---
47
+ @app.post("/ai/image")
48
+ def ai_image(request: ImageRequest):
49
+ try:
50
+ # Minimal example using StableDiffusion
51
+ pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
52
+ pipe = pipe.to("cpu") # Change to "cuda" if GPU available
53
+ image = pipe(request.prompt).images[0]
54
+ # Encode image to base64
55
+ buf = io.BytesIO()
56
+ image.save(buf, format="PNG")
57
+ img_bytes = buf.getvalue()
58
+ img_b64 = base64.b64encode(img_bytes).decode("utf-8")
59
+ return {"image_base64": img_b64}
60
+ except Exception as e:
61
+ raise HTTPException(status_code=500, detail=str(e))
62
+
63
+ # --- Audio Generation Endpoint (placeholder) ---
64
+ @app.post("/ai/audio")
65
+ def ai_audio(request: AudioRequest):
66
+ try:
67
+ # Example: generate 1-second silent audio
68
+ audio = np.zeros(16000) # 16kHz, 1 sec
69
+ buf = io.BytesIO()
70
+ sf.write(buf, audio, 16000, format="WAV")
71
+ audio_b64 = base64.b64encode(buf.getvalue()).decode("utf-8")
72
+ return {"audio_base64": audio_b64}
73
+ except Exception as e:
74
+ raise HTTPException(status_code=500, detail=str(e))
75
+
76
+ # --- Run server ---
77
+ if __name__ == "__main__":
78
+ import uvicorn
79
+ uvicorn.run(app, host="0.0.0.0", port=7860)