ngandugilbert commited on
Commit
ff93a9b
·
verified ·
1 Parent(s): 1f10c9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py CHANGED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ from typing import List, Literal, Optional
4
+ from huggingface_hub import InferenceClient
5
+ from fastapi.responses import JSONResponse
6
+ import uuid
7
+ import time
8
+ import uvicorn
9
+
10
+ app = FastAPI()
11
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
+
13
+ # OpenAI-compatible request message
14
+ class Message(BaseModel):
15
+ role: Literal["system", "user", "assistant"]
16
+ content: str
17
+
18
+ # OpenAI-compatible request body
19
+ class ChatCompletionRequest(BaseModel):
20
+ model: str = "zephyr-7b-beta"
21
+ messages: List[Message]
22
+ temperature: Optional[float] = 0.7
23
+ top_p: Optional[float] = 0.95
24
+ max_tokens: Optional[int] = 512
25
+ stream: Optional[bool] = False
26
+
27
+ # OpenAI-compatible response message
28
+ class Choice(BaseModel):
29
+ index: int
30
+ message: Message
31
+ finish_reason: Optional[str] = "stop"
32
+
33
+ # OpenAI-compatible full response
34
+ class ChatCompletionResponse(BaseModel):
35
+ id: str
36
+ object: str = "chat.completion"
37
+ created: int
38
+ model: str
39
+ choices: List[Choice]
40
+
41
+ @app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
42
+ async def chat_completions(request: ChatCompletionRequest):
43
+ # Build HuggingFace-style message list
44
+ messages = [{"role": m.role, "content": m.content} for m in request.messages]
45
+
46
+ # Generate chat completion
47
+ response_text = ""
48
+ for chunk in client.chat_completion(
49
+ messages,
50
+ max_tokens=request.max_tokens,
51
+ temperature=request.temperature,
52
+ top_p=request.top_p,
53
+ stream=False,
54
+ ):
55
+ response_text += chunk.choices[0].delta.content
56
+
57
+ # Build OpenAI-style response
58
+ chat_response = ChatCompletionResponse(
59
+ id=f"chatcmpl-{uuid.uuid4().hex}",
60
+ created=int(time.time()),
61
+ model=request.model,
62
+ choices=[
63
+ Choice(
64
+ index=0,
65
+ message=Message(role="assistant", content=response_text),
66
+ )
67
+ ]
68
+ )
69
+ return JSONResponse(content=chat_response.dict())
70
+
71
+ # Run this file directly
72
+ if __name__ == "__main__":
73
+ uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True)