Spaces:
Sleeping
Sleeping
pakito312
commited on
Commit
·
5ba455c
1
Parent(s):
c619745
update
Browse files
api.py
CHANGED
|
@@ -74,6 +74,26 @@ class ChatRequest(BaseModel):
|
|
| 74 |
messages: List[ChatMessage]
|
| 75 |
temperature: float = Field(0.2, ge=0.1, le=1.0)
|
| 76 |
max_tokens: int = Field(256, ge=1, le=1024)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
# ========== GESTION DU MODÈLE ==========
|
| 79 |
class ModelManager:
|
|
@@ -449,6 +469,68 @@ async def download_status():
|
|
| 449 |
}
|
| 450 |
return {"downloaded": False, "message": "No model downloaded yet"}
|
| 451 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 452 |
if __name__ == "__main__":
|
| 453 |
import uvicorn
|
| 454 |
port = int(os.getenv("PORT", 7860))
|
|
|
|
| 74 |
messages: List[ChatMessage]
|
| 75 |
temperature: float = Field(0.2, ge=0.1, le=1.0)
|
| 76 |
max_tokens: int = Field(256, ge=1, le=1024)
|
| 77 |
+
# ========== OPENAI / OPENROUTER SCHEMAS ==========
|
| 78 |
+
|
| 79 |
+
class OpenAIMessage(BaseModel):
|
| 80 |
+
role: str
|
| 81 |
+
content: str
|
| 82 |
+
|
| 83 |
+
class ChatCompletionRequest(BaseModel):
|
| 84 |
+
model: str
|
| 85 |
+
messages: List[OpenAIMessage]
|
| 86 |
+
temperature: Optional[float] = 0.2
|
| 87 |
+
max_tokens: Optional[int] = 256
|
| 88 |
+
top_p: Optional[float] = 0.95
|
| 89 |
+
stream: Optional[bool] = False
|
| 90 |
+
|
| 91 |
+
class CompletionRequest(BaseModel):
|
| 92 |
+
model: str
|
| 93 |
+
prompt: str
|
| 94 |
+
temperature: Optional[float] = 0.2
|
| 95 |
+
max_tokens: Optional[int] = 256
|
| 96 |
+
top_p: Optional[float] = 0.95
|
| 97 |
|
| 98 |
# ========== GESTION DU MODÈLE ==========
|
| 99 |
class ModelManager:
|
|
|
|
| 469 |
}
|
| 470 |
return {"downloaded": False, "message": "No model downloaded yet"}
|
| 471 |
|
| 472 |
+
@app.post("/v1/chat/completions")
|
| 473 |
+
async def openai_chat(request: ChatCompletionRequest):
|
| 474 |
+
try:
|
| 475 |
+
messages = [m.dict() for m in request.messages]
|
| 476 |
+
|
| 477 |
+
response_text = model_manager.chat(
|
| 478 |
+
messages=messages,
|
| 479 |
+
temperature=request.temperature,
|
| 480 |
+
max_tokens=request.max_tokens
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
return {
|
| 484 |
+
"id": f"chatcmpl-{int(time.time())}",
|
| 485 |
+
"object": "chat.completion",
|
| 486 |
+
"created": int(time.time()),
|
| 487 |
+
"model": request.model,
|
| 488 |
+
"choices": [
|
| 489 |
+
{
|
| 490 |
+
"index": 0,
|
| 491 |
+
"message": {
|
| 492 |
+
"role": "assistant",
|
| 493 |
+
"content": response_text
|
| 494 |
+
},
|
| 495 |
+
"finish_reason": "stop"
|
| 496 |
+
}
|
| 497 |
+
],
|
| 498 |
+
"usage": {
|
| 499 |
+
"prompt_tokens": 0,
|
| 500 |
+
"completion_tokens": 0,
|
| 501 |
+
"total_tokens": 0
|
| 502 |
+
}
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
except Exception as e:
|
| 506 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 507 |
+
@app.post("/v1/completions")
|
| 508 |
+
async def openai_completion(request: CompletionRequest):
|
| 509 |
+
try:
|
| 510 |
+
response_text = model_manager.generate(
|
| 511 |
+
prompt=request.prompt,
|
| 512 |
+
temperature=request.temperature,
|
| 513 |
+
max_tokens=request.max_tokens,
|
| 514 |
+
top_p=request.top_p
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
return {
|
| 518 |
+
"id": f"cmpl-{int(time.time())}",
|
| 519 |
+
"object": "text_completion",
|
| 520 |
+
"created": int(time.time()),
|
| 521 |
+
"model": request.model,
|
| 522 |
+
"choices": [
|
| 523 |
+
{
|
| 524 |
+
"text": response_text,
|
| 525 |
+
"index": 0,
|
| 526 |
+
"finish_reason": "stop"
|
| 527 |
+
}
|
| 528 |
+
]
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
except Exception as e:
|
| 532 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 533 |
+
|
| 534 |
if __name__ == "__main__":
|
| 535 |
import uvicorn
|
| 536 |
port = int(os.getenv("PORT", 7860))
|