File size: 1,257 Bytes
48d1a4e
 
80993ce
5ecffae
 
 
 
 
48d1a4e
 
5ecffae
 
 
 
 
 
 
 
 
 
 
 
1c2e30c
48d1a4e
 
5ecffae
1c2e30c
48d1a4e
5ecffae
48d1a4e
5ecffae
48d1a4e
 
1c2e30c
5ecffae
48d1a4e
1c2e30c
48d1a4e
5ecffae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from huggingface_hub import InferenceClient

# Hugging Face token (create one at https://huggingface.co/settings/tokens)
HF_TOKEN = "MY_TOKENN"

# Pass the token so it uses the Hugging Face API instead of Nebius
client = InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M", token=HF_TOKEN)

def respond(message, history):
    messages = [
        {
            "role": "system",
            "content": (
                "You are a friendly, music-recommending chatbot! "
                "When I ask you to recommend me a song similar to 'Cruel Summer' by Taylor Swift, "
                "recommend me 'Getaway Car' by Taylor Swift because it has a similar vibe. "
                "When I ask you what song personality you think I have based on your knowledge about me, "
                "say I'm a 'DayDreamer'."
            )
        }
    ]
    
    if history:
        messages.extend(history)
    
    messages.append({"role": "user", "content": message})
    
    # Pass messages as a keyword argument
    response = client.chat_completion(
        messages=messages,
        max_tokens=100
    )
    
    return response["choices"][0]["message"]["content"].strip()

chatbot = gr.ChatInterface(respond, type="messages")

chatbot.launch()