Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,52 +1,73 @@
|
|
| 1 |
-
from fastapi import FastAPI
|
| 2 |
from pydantic import BaseModel
|
|
|
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
-
import
|
| 5 |
-
import time
|
| 6 |
import uuid
|
| 7 |
-
|
| 8 |
-
|
| 9 |
|
| 10 |
app = FastAPI()
|
|
|
|
| 11 |
|
|
|
|
| 12 |
class Message(BaseModel):
|
| 13 |
-
role:
|
| 14 |
content: str
|
| 15 |
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
model: str
|
| 18 |
-
|
| 19 |
-
temperature: float = 0.7
|
| 20 |
-
top_p: float = 0.95
|
| 21 |
-
max_tokens: int = 512
|
| 22 |
-
stream: bool = False
|
| 23 |
|
| 24 |
-
@app.post("/v1/chat/completions")
|
| 25 |
-
async def
|
| 26 |
-
#
|
| 27 |
-
|
| 28 |
|
|
|
|
| 29 |
response_text = ""
|
| 30 |
for chunk in client.chat_completion(
|
| 31 |
-
messages
|
| 32 |
max_tokens=request.max_tokens,
|
| 33 |
temperature=request.temperature,
|
| 34 |
top_p=request.top_p,
|
| 35 |
-
stream=False
|
| 36 |
):
|
| 37 |
-
response_text += chunk.choices[0].delta.content
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
"
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
}
|
| 50 |
]
|
| 51 |
-
|
|
|
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
from pydantic import BaseModel
|
| 3 |
+
from typing import List, Literal, Optional
|
| 4 |
from huggingface_hub import InferenceClient
|
| 5 |
+
from fastapi.responses import JSONResponse
|
|
|
|
| 6 |
import uuid
|
| 7 |
+
import time
|
| 8 |
+
import uvicorn
|
| 9 |
|
| 10 |
app = FastAPI()
|
| 11 |
+
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 12 |
|
| 13 |
+
# OpenAI-compatible request message
|
| 14 |
class Message(BaseModel):
|
| 15 |
+
role: Literal["system", "user", "assistant"]
|
| 16 |
content: str
|
| 17 |
|
| 18 |
+
# OpenAI-compatible request body
|
| 19 |
+
class ChatCompletionRequest(BaseModel):
|
| 20 |
+
model: str = "zephyr-7b-beta"
|
| 21 |
+
messages: List[Message]
|
| 22 |
+
temperature: Optional[float] = 0.7
|
| 23 |
+
top_p: Optional[float] = 0.95
|
| 24 |
+
max_tokens: Optional[int] = 512
|
| 25 |
+
stream: Optional[bool] = False
|
| 26 |
+
|
| 27 |
+
# OpenAI-compatible response message
|
| 28 |
+
class Choice(BaseModel):
|
| 29 |
+
index: int
|
| 30 |
+
message: Message
|
| 31 |
+
finish_reason: Optional[str] = "stop"
|
| 32 |
+
|
| 33 |
+
# OpenAI-compatible full response
|
| 34 |
+
class ChatCompletionResponse(BaseModel):
|
| 35 |
+
id: str
|
| 36 |
+
object: str = "chat.completion"
|
| 37 |
+
created: int
|
| 38 |
model: str
|
| 39 |
+
choices: List[Choice]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
+
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
|
| 42 |
+
async def chat_completions(request: ChatCompletionRequest):
|
| 43 |
+
# Build HuggingFace-style message list
|
| 44 |
+
messages = [{"role": m.role, "content": m.content} for m in request.messages]
|
| 45 |
|
| 46 |
+
# Generate chat completion
|
| 47 |
response_text = ""
|
| 48 |
for chunk in client.chat_completion(
|
| 49 |
+
messages,
|
| 50 |
max_tokens=request.max_tokens,
|
| 51 |
temperature=request.temperature,
|
| 52 |
top_p=request.top_p,
|
| 53 |
+
stream=False,
|
| 54 |
):
|
| 55 |
+
response_text += chunk.choices[0].delta.content
|
| 56 |
+
|
| 57 |
+
# Build OpenAI-style response
|
| 58 |
+
chat_response = ChatCompletionResponse(
|
| 59 |
+
id=f"chatcmpl-{uuid.uuid4().hex}",
|
| 60 |
+
created=int(time.time()),
|
| 61 |
+
model=request.model,
|
| 62 |
+
choices=[
|
| 63 |
+
Choice(
|
| 64 |
+
index=0,
|
| 65 |
+
message=Message(role="assistant", content=response_text),
|
| 66 |
+
)
|
|
|
|
| 67 |
]
|
| 68 |
+
)
|
| 69 |
+
return JSONResponse(content=chat_response.dict())
|
| 70 |
|
| 71 |
+
# Run this file directly
|
| 72 |
+
if __name__ == "__main__":
|
| 73 |
+
uvicorn.run("app:app", host="52.44.98.165", port=8000, reload=True)
|