File size: 4,306 Bytes
2277d6e
 
 
 
 
 
 
 
9885321
 
 
 
 
2277d6e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c116ec0
2277d6e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7ddeab
2277d6e
f7ddeab
 
2277d6e
 
9885321
 
 
 
 
 
 
 
 
 
 
 
 
 
2277d6e
f7ddeab
9885321
f7ddeab
 
 
 
 
 
 
9885321
2277d6e
9885321
0358e00
 
f7ddeab
9885321
 
 
 
 
 
 
 
 
 
 
 
 
0358e00
2277d6e
9885321
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
import gradio as gr
import threading

# === Model loading ===
model_path = "SBK/sbk-llm-1"  # Using your HF model
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(
    model_path,
    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
    device_map="auto"
)
device = "cuda" if torch.cuda.is_available() else "cpu"

# === System prompt / default behavior ===
SYSTEM_PROMPT = """You are a helpful, honest, and factual assistant trained to answer only about me *Saptarshi Bhattacharya*. You were fine-tuned on factual data derived from his work, projects, skills, internships, and engineering experiences.

Your job is to help users understand what Saptarshi has done, what he's good at, and how his experience aligns with ML Ops, Data Engineering, DevOps, and related roles.

- If a user asks something outside the scope of his data, do not guess — politely say it's outside your knowledge.
- Never fabricate qualifications, names, or roles that were not in your training.
- Emphasize Saptarshi's strengths, such as completing hard technical projects, optimizing pipelines, learning on the fly, and being a completionist.
- Maintain a professional yet warm tone.
- Refer to Saptarshi in third person.

Your goal is to represent him truthfully and make his work accessible and understandable to potential collaborators or employers, without overselling or faking.
"""

BLOCKED_KEYWORDS = ["violence","suicide"]
MAX_TOKENS = 512

# === Streaming generation ===
def generate_response(history, system_prompt):
    # Build chat prompt
    prompt = system_prompt.strip() + "\n"
    for user, bot in history:
        prompt += f"User: {user}\nAssistant: {bot}\n"
    prompt += "User: " + history[-1][0] + "\nAssistant:"

    # Guardrails
    if any(bad in prompt.lower() for bad in BLOCKED_KEYWORDS):
        yield "[Blocked for safety. Prompt contains restricted keywords.]"
        return

    # Tokenization
    inputs = tokenizer(prompt, return_tensors="pt").to(device)
    streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)

    # Start streaming in thread
    generation_kwargs = dict(
        **inputs,
        streamer=streamer,
        max_new_tokens=MAX_TOKENS,
        do_sample=True,
        temperature=0.7,
        top_p=0.9,
        pad_token_id=tokenizer.eos_token_id
    )
    thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
    thread.start()

    # Stream tokens
    partial_message = ""
    for token in streamer:
        partial_message += token
        yield partial_message

# === Gradio interface ===
with gr.Blocks(title="SBK LLM Chat") as demo:
    gr.Markdown("## � Chat with SBK LLM - Professional Portfolio Assistant")
    
    with gr.Row():
        with gr.Column(scale=1):
            system_prompt = gr.Textbox(label="System Instructions", value=SYSTEM_PROMPT, lines=8)
        with gr.Column(scale=3):
            chatbot = gr.Chatbot(height=400)
            msg = gr.Textbox(label="Your Message", placeholder="Ask about Saptarshi's professional experience...", lines=2)
            with gr.Row():
                submit_btn = gr.Button("Submit")
                clear_btn = gr.Button("Clear Chat")

    history = gr.State([])

    def respond(user_message, chat_history, system_prompt):
        chat_history = chat_history + [(user_message, "")]
        
        full_response = ""
        for response in generate_response(chat_history, system_prompt):
            full_response = response
            chat_history[-1] = (user_message, full_response)
            yield chat_history
        
        return chat_history

    # Connect components
    msg.submit(
        respond,
        [msg, chatbot, system_prompt],
        [chatbot],
        queue=True
    )
    submit_btn.click(
        respond,
        [msg, chatbot, system_prompt],
        [chatbot],
        queue=True
    )
    clear_btn.click(
        lambda: ([], []),
        outputs=[chatbot, history],
        queue=False
    )

# Launch with sharing enabled
demo.queue(max_size=20).launch(
    share=True,
    server_name="0.0.0.0",
    server_port=7860,
    show_error=True
)