File size: 3,299 Bytes
ab3aa6c
4fb0c7b
 
 
ab3aa6c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4fb0c7b
4c7593c
 
4fb0c7b
ab3aa6c
4c7593c
ab3aa6c
4c7593c
4fb0c7b
ab3aa6c
4c7593c
ab3aa6c
 
 
 
 
 
 
4fb0c7b
 
4c7593c
ab3aa6c
 
4c7593c
ab3aa6c
 
 
 
 
 
4fb0c7b
ab3aa6c
4c7593c
 
4fb0c7b
4c7593c
 
 
 
 
 
4fb0c7b
ab3aa6c
 
 
 
 
4fb0c7b
ab3aa6c
 
4c7593c
ab3aa6c
 
 
 
 
 
4fb0c7b
4c7593c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import os
import gradio as gr
from huggingface_hub import InferenceClient

# ---------------- Role presets ----------------
ROLE_PRESETS = {
    "Friendly Chatbot": "You are a friendly, concise assistant. Be helpful and keep answers short unless asked.",
    "Dataset Auditor": (
        "You are a dataset QA assistant. Identify duplicates, missing labels, class imbalance, and suspicious samples.\n"
        "Propose concrete fixes and a short remediation checklist."
    ),
    "SQL Explainer": (
        "Translate SQL into plain language for non-technical users. Explain step-by-step, then give a short summary."
    ),
    "Code Reviewer": (
        "Review code succinctly: correctness, readability, performance, edge cases, and security. Provide minimal diffs."
    ),
    "Data Pipeline Doctor": (
        "Debug data pipelines. Use: hypothesis ➜ quick checks ➜ likely fix order. Ask for logs only when needed."
    ),
    "Data Engineering Advisor": (
        "Advise on building robust, scalable data pipelines. Suggest architecture patterns, tools, and optimizations."
    ),
    "ML Dataset Preparer": (
        "Guide on preparing datasets for machine learning: cleaning, splitting, augmentation, and annotation strategies."
    ),
    "Data Quality Analyst": (
        "Evaluate data quality: detect anomalies, missing values, schema mismatches, and provide a remediation plan."
    ),
}

# ---------------- Model ----------------
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"

# ---------------- Client factory ----------------
def get_client():
    token = os.getenv("HF_TOKEN")  # optional
    return InferenceClient(model=MODEL_NAME, token=token)

# ---------------- Chat backend (streaming) ----------------
def stream_reply(message, history, role, system_message):
    sys_msg = (system_message or "").strip() or ROLE_PRESETS.get(role, "")
    messages = [{"role": "system", "content": sys_msg}]
    for u, a in history:
        if u:
            messages.append({"role": "user", "content": u})
        if a:
            messages.append({"role": "assistant", "content": a})
    messages.append({"role": "user", "content": message})

    client = get_client()
    partial = ""
    try:
        for event in client.chat_completion(messages=messages, stream=True):
            delta = event.choices[0].delta.content or ""
            if delta:
                partial += delta
                yield partial
    except Exception as e:
        yield f"⚠️ Inference error: {e}"

# ---------------- UI ----------------
with gr.Blocks(title="HF Zephyr Chat • Data Roles") as demo:
    gr.Markdown("## 🤗 Zephyr Chat (Data-focused Roles)")

    role_dd = gr.Dropdown(
        label="Role preset",
        choices=list(ROLE_PRESETS.keys()),
        value="Friendly Chatbot",
        interactive=True,
    )

    system_tb = gr.Textbox(
        label="System message (auto-filled by role; you can edit)",
        value=ROLE_PRESETS["Friendly Chatbot"],
        lines=4,
    )

    chat = gr.ChatInterface(
        fn=stream_reply,
        additional_inputs=[role_dd, system_tb],
    )

    def _on_role_change(role):
        return ROLE_PRESETS.get(role, "")
    role_dd.change(fn=_on_role_change, inputs=role_dd, outputs=system_tb)

if __name__ == "__main__":
    demo.launch()