File size: 1,001 Bytes
4381b6b
cdcc40c
71c44b0
4381b6b
4a4d15c
99af4e8
71c44b0
 
27f7078
71c44b0
 
 
4a4d15c
 
 
 
 
 
6b181bf
71c44b0
 
 
3c6b017
cdcc40c
 
4a4d15c
 
 
4381b6b
71c44b0
4381b6b
1a08d11
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import gradio as gr
import random
from huggingface_hub import InferenceClient


client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")

def respond(message, history):
    messages = [{"role": "system", "content": "You are an angry chatbot."}]
    if history:
        messages.extend(history)
    messages.append({"role" : "user", "content" : message})
    response = ""
    for message in client.chat_completion(
        messages, 
        max_tokens = 100, 
        stream = True,
    ):
    #print(response["choices"][0]["message"]["content"].strip())
    return response["choices"][0]["message"]["content"].strip()

def echo(message, history):
    choices = ["yes", "no", "sure", "absolutely", "of course not", "by no means"]
    chat_answer = random.choice(choices)
    #use random to select ones of those choices
        token = messages.choices[0].delta.content
        response += token
        yield response

chatbot = gr.ChatInterface(respond, type = 'messages')

chatbot.launch(debug = True)