SBK commited on
Commit
2277d6e
·
verified ·
1 Parent(s): 529e159

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -0
app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
3
+ import gradio as gr
4
+ import threading
5
+
6
+ # === Model loading ===
7
+ model_path = "SBK/sbk-llm-1" # Using your HF model
8
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
9
+ model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
10
+ device = "cuda" if torch.cuda.is_available() else "cpu"
11
+ model.to(device)
12
+
13
+ # === System prompt / default behavior ===
14
+ SYSTEM_PROMPT = """You are a helpful, honest, and factual assistant trained to answer only about me *Saptarshi Bhattacharya*. You were fine-tuned on factual data derived from his work, projects, skills, internships, and engineering experiences.
15
+
16
+ Your job is to help users understand what Saptarshi has done, what he's good at, and how his experience aligns with ML Ops, Data Engineering, DevOps, and related roles.
17
+
18
+ - If a user asks something outside the scope of his data, do not guess — politely say it's outside your knowledge.
19
+ - Never fabricate qualifications, names, or roles that were not in your training.
20
+ - Emphasize Saptarshi's strengths, such as completing hard technical projects, optimizing pipelines, learning on the fly, and being a completionist.
21
+ - Maintain a professional yet warm tone.
22
+ - Refer to Saptarshi in third person.
23
+
24
+ Your goal is to represent him truthfully and make his work accessible and understandable to potential collaborators or employers, without overselling or faking.
25
+ """
26
+
27
+
28
+ BLOCKED_KEYWORDS = ["kill", "harm", "violence", "bomb", "suicide"] # simple guardrail
29
+ MAX_TOKENS = 512
30
+
31
+ # === Streaming generation ===
32
+ def generate_response(history, system_prompt):
33
+ # Build chat prompt
34
+ prompt = system_prompt.strip() + "\n"
35
+ for user, bot in history:
36
+ prompt += f"User: {user}\nAssistant: {bot}\n"
37
+ prompt += "User: " + history[-1][0] + "\nAssistant:"
38
+
39
+ # Guardrails
40
+ if any(bad in prompt.lower() for bad in BLOCKED_KEYWORDS):
41
+ yield "[Blocked for safety. Prompt contains restricted keywords.]"
42
+ return
43
+
44
+ # Tokenization
45
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
46
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
47
+
48
+ # Start streaming in thread
49
+ generation_kwargs = dict(
50
+ **inputs,
51
+ streamer=streamer,
52
+ max_new_tokens=MAX_TOKENS,
53
+ do_sample=True,
54
+ temperature=0.7,
55
+ top_p=0.9,
56
+ pad_token_id=tokenizer.eos_token_id
57
+ )
58
+ thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
59
+ thread.start()
60
+
61
+ # Stream tokens
62
+ for token in streamer:
63
+ yield token
64
+
65
+ # === Gradio interface ===
66
+ with gr.Blocks() as demo:
67
+ gr.Markdown("## 🧠 Chat with SBK LLM")
68
+ system_prompt = gr.Textbox(label="System Prompt", value=SYSTEM_PROMPT, lines=2)
69
+ chatbot = gr.Chatbot()
70
+ msg = gr.Textbox(label="Your Message", placeholder="Ask me anything...", lines=1)
71
+ clear = gr.Button("Clear")
72
+
73
+ history = gr.State([]) # memory for session
74
+
75
+ def respond(user_message, history, system_prompt):
76
+ history = history + [[user_message, ""]]
77
+ return "", history, generate_response(history, system_prompt)
78
+
79
+ msg.submit(respond, [msg, history, system_prompt], [msg, history, chatbot], stream=True)
80
+ clear.click(lambda: ([], "", ""), outputs=[chatbot, msg, history])
81
+
82
+ demo.launch()