File size: 2,784 Bytes
4a2338f
8fcc4b3
cf2ce1c
 
 
4a2338f
8fcc4b3
 
 
b9691b6
 
 
cf2ce1c
 
 
 
b9691b6
cf2ce1c
 
 
 
b9691b6
cf2ce1c
8fcc4b3
 
 
 
 
 
 
 
 
 
b9691b6
 
 
 
 
 
 
8fcc4b3
b9691b6
 
 
 
 
cf2ce1c
b9691b6
 
 
4a2338f
b9691b6
 
 
 
 
4a2338f
b9691b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a2338f
b9691b6
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import gradio as gr
from huggingface_hub import InferenceClient
from fastapi import FastAPI
from pydantic import BaseModel
import uvicorn

# Hugging Face model
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

# FastAPI app
app = FastAPI()

# Request format
class Request(BaseModel):
    message: str
    history: list[tuple[str, str]] = []
    system_message: str = "You are a friendly chatbot."
    max_tokens: int = 512
    temperature: float = 0.7
    top_p: float = 0.95

@app.post("/chat")  # ✅ This makes the API work with Roblox!
def chat(req: Request):
    messages = [{"role": "system", "content": req.system_message}]
    
    for user_msg, bot_reply in req.history:
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if bot_reply:
            messages.append({"role": "assistant", "content": bot_reply})
    
    messages.append({"role": "user", "content": req.message})

    response_text = ""

    for message in client.chat_completion(
        messages, 
        max_tokens=req.max_tokens, 
        stream=True, 
        temperature=req.temperature, 
        top_p=req.top_p
    ):
        token = message.choices[0].delta.content
        response_text += token

    return {"response": response_text}  # ✅ Returns plain text response

# ✅ Gradio Interface (optional, can be removed if using FastAPI only)
def respond(message, history, system_message, max_tokens, temperature, top_p):
    messages = [{"role": "system", "content": system_message}]

    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    messages.append({"role": "user", "content": message})

    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = message.choices[0].delta.content
        response += token
        yield response

demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
    ],
)

# Run both Gradio and FastAPI
if __name__ == "__main__":
    import threading

    def run_gradio():
        demo.launch(share=True)  # ✅ Keep Gradio running

    def run_fastapi():
        uvicorn.run(app, host="0.0.0.0", port=7860)

    threading.Thread(target=run_gradio).start()
    run_fastapi()