File size: 867 Bytes
9d171ff
3ad0ce8
5893ff1
 
2a20905
9d171ff
83f893c
 
 
 
5893ff1
 
886526c
5893ff1
 
 
fe69f0a
123fe0d
6aeb881
5893ff1
99dc546
036f5d5
6aeb881
0d29a35
123fe0d
 
9d171ff
5893ff1
9d171ff
f221a14
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import gradio as gr
import random
from huggingface_hub import InferenceClient

client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")

def echo(message, history):
    choices = ["yes", "no", "silly ahh question", "not likely", "ask again", "absolutely", "YASSS"]
    yes_or_no = random.choice(choices)
    return yes_or_no

def respond(message, history):
    messages = [{"role": "system", "content": "You are a sassy chatbot from the 1800s."}]
    if history:
        messages.extend(history)
    messages.append({"role": "user", "content": message})
    
    response = ""
    for message in client.chat_completion(
        messages,
        max_tokens = 100,
        stream = True,
    ):
        token = message.choices[0].delta.content
        response += token
        yield response
    
chatbot = gr.ChatInterface(respond, type = "messages")

chatbot.launch()