File size: 1,039 Bytes
3783cb5
 
 
230e612
3783cb5
 
 
a86ee16
3a2bdf2
 
9b5d97e
3a2bdf2
 
a86ee16
584d8f6
3783cb5
 
 
 
 
7886827
 
 
 
 
 
 
05ab5cd
18efcdc
7886827
6c8811e
 
230e612
 
3783cb5
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import gradio as gr
from huggingface_hub import InferenceClient #InferenceClient class

client = InferenceClient("Qwen/Qwen2.5-7B-Instruct")


def respond(message, history):
    messages = [
        {
            "role": "system", 
            "content": """You are a recipe assistant who suggests simple recipies that take less than 30 minutes based on the
            ingredients the user has and their dietary restrictions."""
        }
    ]
    
    if history: 
        messages.extend(history)

    messages.append({"role": "user", "content": message})


    #stream response, return 1 word at a time as soon as its available instead of returning all at once
    response = ""

    for message in client.chat_completion(
        messages,
        max_tokens = 500,
        temperature = 0.5,
        stream = True
    ):
        
        token = message.choices[0].delta.content
        response += token
        yield response

# defining chatbot
chatbot = gr.ChatInterface(respond, title = "", description = "") 

chatbot.launch()