BoxzDev commited on
Commit
cf2ce1c
·
verified ·
1 Parent(s): 3d68382

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -55
app.py CHANGED
@@ -1,64 +1,43 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  token = message.choices[0].delta.content
 
38
 
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
 
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from fastapi import FastAPI
4
+ from pydantic import BaseModel
5
+ import uvicorn
6
 
7
+ # Create FastAPI app
8
+ app = FastAPI()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ # Hugging Face model
11
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
 
13
+ # Request format
14
+ class Request(BaseModel):
15
+ message: str
16
+ history: list[tuple[str, str]] = []
17
+ system_message: str = "You are a friendly chatbot."
18
+ max_tokens: int = 512
19
+ temperature: float = 0.7
20
+ top_p: float = 0.95
21
+
22
+ @app.post("/chat")
23
+ def chat(req: Request):
24
+ messages = [{"role": "system", "content": req.system_message}]
25
+
26
+ for user_msg, bot_reply in req.history:
27
+ if user_msg:
28
+ messages.append({"role": "user", "content": user_msg})
29
+ if bot_reply:
30
+ messages.append({"role": "assistant", "content": bot_reply})
31
+
32
+ messages.append({"role": "user", "content": req.message})
33
+
34
+ response_text = ""
35
+ for message in client.chat_completion(messages, max_tokens=req.max_tokens, stream=True, temperature=req.temperature, top_p=req.top_p):
36
  token = message.choices[0].delta.content
37
+ response_text += token
38
 
39
+ return {"response": response_text}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
+ # Launch API
42
  if __name__ == "__main__":
43
+ uvicorn.run(app, host="0.0.0.0", port=7860)