File size: 3,515 Bytes
a90f37e
 
 
2931efa
 
 
 
 
 
a90f37e
2931efa
 
 
 
 
 
 
a90f37e
2931efa
 
 
 
 
a90f37e
2931efa
a90f37e
2931efa
 
a90f37e
 
 
 
 
 
2931efa
a90f37e
 
 
2931efa
 
 
 
 
 
a90f37e
 
2931efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a90f37e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import gradio as gr
from huggingface_hub import InferenceClient

# Define available models (update with your actual model IDs)
model_list = {
    "Safe LM": "HuggingFaceH4/zephyr-7b-beta",  # Replace with your Safe LM model ID
    "Zephyr Beta": "HuggingFaceH4/zephyr-7b-beta",
    "Another Model": "HuggingFaceH4/zephyr-7b-beta"
}

def respond(message, history, system_message, max_tokens, temperature, top_p, selected_model):
    # Look up the model ID from our list based on the dropdown selection
    model_id = model_list.get(selected_model, "HuggingFaceH4/zephyr-7b-beta")
    # Create an InferenceClient for the selected model
    client = InferenceClient(model_id)
    
    # Build the conversation history into the message list
    messages = [{"role": "system", "content": system_message}]
    for user_msg, assistant_msg in history or []:
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if assistant_msg:
            messages.append({"role": "assistant", "content": assistant_msg})
    messages.append({"role": "user", "content": message})
    
    response = ""
    # Stream the response from the client
    for token_message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = token_message.choices[0].delta.content
        response += token
        yield response

# CSS styling: pastel backgrounds, gentle light colors, and rounded corners for a safe vibe
css = """
body { background-color: #FAF3E0; }
.gradio-container { background-color: #FFFFFF; border-radius: 16px; padding: 20px; }
button, input, .gradio-dropdown, .gradio-slider, textarea { border-radius: 16px; }
.gradio-chat { border-radius: 16px; }
"""

with gr.Blocks(css=css) as demo:
    with gr.Row():
        # Left sidebar: Model selector
        with gr.Column(scale=1):
            gr.Markdown("## Models")
            model_dropdown = gr.Dropdown(
                choices=list(model_list.keys()),
                label="Select Model",
                value="Safe LM"
            )
        # Main area: Chat interface and settings
        with gr.Column(scale=3):
            gr.Markdown("## Chat Interface")
            chatbot = gr.Chatbot(label="Chat with your Model")
            user_input = gr.Textbox(placeholder="Enter your message...", label="Your Message")
            with gr.Row():
                send_button = gr.Button("Send")
                clear_button = gr.Button("Clear Chat")
            gr.Markdown("### Chat Settings")
            system_message = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
            max_tokens_slider = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max New Tokens")
            temperature_slider = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
            top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
    
    # When "Send" is clicked, run the respond() function and update the chat interface.
    send_button.click(
        fn=respond,
        inputs=[user_input, chatbot, system_message, max_tokens_slider, temperature_slider, top_p_slider, model_dropdown],
        outputs=[user_input, chatbot],
    )
    
    # Clear the chat history when "Clear Chat" is clicked.
    clear_button.click(lambda: None, None, chatbot, queue=False)

if __name__ == "__main__":
    demo.launch()