GoGma commited on
Commit
e859e00
verified
1 Parent(s): 32027a1

Update api.py

Browse files

feat: reuse sofia-rivera api structure in backend.

Files changed (1) hide show
  1. api.py +35 -14
api.py CHANGED
@@ -1,25 +1,46 @@
1
- from fastapi import FastAPI
2
  from pydantic import BaseModel
3
- import uvicorn
4
 
5
- app = FastAPI()
6
 
7
- class GenerateRequest(BaseModel):
8
- prompt: str
 
 
 
 
 
 
 
 
9
 
10
  @app.get("/health")
11
- def health_check():
12
- return {"status": "ok"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- @app.post("/generate-image")
15
- def generate_image(req: GenerateRequest):
16
- # Aqu铆 luego conectamos con tu l贸gica real de generaci贸n (FLUX / SD)
17
  return {
18
- "status": "received",
19
- "prompt": req.prompt,
20
- "image_url": "https://example.com/fake-image-url"
 
21
  }
22
 
23
  if __name__ == "__main__":
24
- # Hugging Face espera que el servidor escuche en 0.0.0.0:7860
 
25
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ from fastapi import FastAPI, BackgroundTasks, HTTPException
2
  from pydantic import BaseModel
 
3
 
4
+ app = FastAPI(title="Sofia AI Backend")
5
 
6
+ class MessageRequest(BaseModel):
7
+ platform: str
8
+ message: str
9
+ user_id: str
10
+ timestamp: str | None = None
11
+
12
+ class ImageGenerationRequest(BaseModel):
13
+ prompt_type: str
14
+ custom_prompt: str | None = None
15
+ model: str = "FLUX.1-schnell"
16
 
17
  @app.get("/health")
18
+ async def health():
19
+ return {"status": "ok", "service": "sofia-ai-backend"}
20
+
21
+ @app.post("/webhook/message")
22
+ async def webhook_message(body: MessageRequest, background_tasks: BackgroundTasks):
23
+ # Aqu铆 luego conectar谩s l贸gica real (guardar en cola, n8n, etc.)
24
+ background_tasks.add_task(
25
+ lambda: print(f"[Message] {body.platform}: {body.message} (user: {body.user_id})")
26
+ )
27
+ return {"status": "queued"}
28
+
29
+ @app.post("/api/generate")
30
+ async def api_generate(body: ImageGenerationRequest):
31
+ # TODO: aqu铆 conectaremos con tu l贸gica real de generaci贸n (FLUX/SDXL)
32
+ # Por ahora devolvemos stub
33
+ if not body.custom_prompt and not body.prompt_type:
34
+ raise HTTPException(status_code=400, detail="prompt_type or custom_prompt required")
35
 
 
 
 
36
  return {
37
+ "status": "pending",
38
+ "message": "Image generation not yet implemented",
39
+ "prompt_type": body.prompt_type,
40
+ "model": body.model,
41
  }
42
 
43
  if __name__ == "__main__":
44
+ import uvicorn
45
+ # Importante: usar puerto 7860 para Hugging Face Spaces
46
  uvicorn.run(app, host="0.0.0.0", port=7860)