File size: 4,222 Bytes
e4233b9
 
 
 
 
 
1894678
 
 
 
e4233b9
 
1894678
e4233b9
 
e467758
1894678
e467758
 
 
 
 
 
1894678
e4233b9
 
 
 
 
1894678
 
e4233b9
 
 
 
 
 
 
 
1894678
 
e4233b9
 
 
 
 
 
 
 
 
 
1894678
e4233b9
 
 
 
1894678
 
e4233b9
 
 
 
 
1894678
 
 
 
 
 
 
e4233b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1894678
 
 
 
e4233b9
 
1894678
 
 
 
e4233b9
 
 
 
1894678
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Path: chatbot_app.py
Description: Gradio-based chatbot with selectable Hugging Face LLMs, using transformers pipelines.
"""

import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import torch
import asyncio
from typing import List, Tuple, Dict

# ✅ List of available premium models
PREMIUM_MODELS = [
    "HuggingFaceH4/zephyr-7b-beta",
    "K00B404/BagOClownCoders-slerp-7B",
    "Qwen/Qwen2.5-Omni-7B",
    "Qwen/Qwen2.5-VL-7B-Instruct",
    "deepseek-ai/Janus-Pro-7B",
    "meta-llama/Llama-2-7b-hf",
    "Alibaba-NLP/gte-Qwen2-7B-instruct",
]

# ✅ Cache for loaded pipelines
pipeline_cache: Dict[str, pipeline] = {}

# ✅ Initial system prompt
DEFAULT_SYSTEM_PROMPT = "You are a ChatBuddy and chat with the user in a Human way."


def load_pipeline(model_name: str) -> pipeline:
    """
    Load and cache the text generation pipeline for the given model.
    """
    if model_name in pipeline_cache:
        return pipeline_cache[model_name]

    try:
        print(f"Loading model: {model_name}")
        tokenizer = AutoTokenizer.from_pretrained(model_name)
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
        )
        pipe = pipeline(
            "text-generation",
            model=model,
            tokenizer=tokenizer,
            device=0 if torch.cuda.is_available() else -1,
        )
        pipeline_cache[model_name] = pipe
        return pipe

    except Exception as e:
        raise RuntimeError(f"Failed to load model '{model_name}': {str(e)}")


def build_prompt(user_input: str, history: List[Tuple[str, str]]) -> str:
    """
    Construct the prompt string with system prompt, history, and current user input.
    """
    messages = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}]
    for pair in history:
        messages.append({"role": "user", "content": pair[0]})
        messages.append({"role": "assistant", "content": pair[1]})
    messages.append({"role": "user", "content": user_input})

    prompt = ""
    for msg in messages:
        role_tag = f"<|{msg['role']}|>"
        prompt += f"{role_tag} {msg['content']}\n"

    return prompt


def extract_response(generated_text: str) -> str:
    """
    Extract the last assistant response from generated text.
    """
    if "<|assistant|>" in generated_text:
        split_res = generated_text.split("<|assistant|>")
        return split_res[-1].strip()
    return generated_text.strip()


async def chatbot(
    user_input: str, history: List[Tuple[str, str]], model_choice: str
) -> Tuple[str, List[Tuple[str, str]]]:
    """
    Main chatbot logic to generate model response asynchronously.
    """
    if not user_input.strip():
        return "", history  # Ignore empty inputs

    try:
        pipe = await asyncio.to_thread(load_pipeline, model_choice)
        prompt = build_prompt(user_input, history)

        response = await asyncio.to_thread(
            pipe,
            prompt,
            max_new_tokens=200,
            do_sample=True,
            top_p=0.95,
            temperature=0.7,
        )

        generated_text = response[0]["generated_text"]
        final_response = extract_response(generated_text)

    except RuntimeError as load_err:
        final_response = str(load_err)
    except Exception as e:
        final_response = f"⚠️ Error during generation: {str(e)}"

    history.append((user_input, final_response))
    return "", history


# ✅ Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# 🤖 ChatBuddy - Advanced Chatbot with Selectable LLMs")

    with gr.Row():
        model_choice = gr.Dropdown(
            label="Select Model", choices=PREMIUM_MODELS, value=PREMIUM_MODELS[0]
        )

    chatbot_ui = gr.Chatbot()
    user_input = gr.Textbox(show_label=False, placeholder="Type your message and press Enter")
    clear_btn = gr.Button("Clear")

    state = gr.State([])

    user_input.submit(chatbot, [user_input, state, model_choice], [user_input, chatbot_ui])
    clear_btn.click(lambda: ([], ""), None, [chatbot_ui, state])

demo.launch()