jasvir-singh1021 commited on
Commit
90e51ed
·
verified ·
1 Parent(s): cb7750d

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +6 -5
  2. app.py +68 -0
  3. requirements.txt +3 -0
README.md CHANGED
@@ -1,13 +1,14 @@
1
  ---
2
  title: Phi2 Chatbot
3
- emoji: 🐢
4
- colorFrom: red
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 5.38.2
8
  app_file: app.py
9
  pinned: false
10
- short_description: Smalllmodel chatbbot
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
  title: Phi2 Chatbot
3
+ emoji: 🤖
4
+ colorFrom: indigo
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: "4.24.0"
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
+
14
+ This is a lightweight Gradio chatbot app using the [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) model.
app.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import gradio as gr
4
+
5
+ # Load model & tokenizer
6
+ model_id = "microsoft/phi-2"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
9
+ model.eval()
10
+
11
+ # Memory for chat history
12
+ chat_history = []
13
+
14
+ # System prompt
15
+ system_prompt = "You are a helpful and friendly assistant."
16
+
17
+ def chat(user_input):
18
+ # Combine system prompt and chat history
19
+ prompt = system_prompt + "\n"
20
+ for speaker, msg in chat_history:
21
+ prompt += f"{speaker}: {msg}\n"
22
+ prompt += f"User: {user_input}\nBot:"
23
+
24
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
25
+ with torch.no_grad():
26
+ output = model.generate(input_ids, max_new_tokens=100, do_sample=True, temperature=0.7)
27
+
28
+ full_reply = tokenizer.decode(output[0], skip_special_tokens=True)
29
+ reply = full_reply.split("Bot:")[-1].strip()
30
+
31
+ chat_history.append(("User", user_input))
32
+ chat_history.append(("Bot", reply))
33
+
34
+ return format_chat(chat_history)
35
+
36
+ def format_chat(history):
37
+ return "\n\n".join([f"**{speaker}:** {msg}" for speaker, msg in history])
38
+
39
+ def clear():
40
+ chat_history.clear()
41
+ return ""
42
+
43
+ def export_chat():
44
+ export = "\n".join([f"{s}: {m}" for s, m in chat_history])
45
+ return gr.File.update(value=export.encode("utf-8"), filename="chat.txt")
46
+
47
+ with gr.Blocks() as demo:
48
+ gr.Markdown("""# 🤖 Chat with Phi-2
49
+ Small CPU-friendly chatbot using Hugging Face Transformers
50
+ """)
51
+
52
+ chatbot = gr.Markdown()
53
+ with gr.Row():
54
+ user_input = gr.Textbox(placeholder="Type your message here...", show_label=False)
55
+ send_btn = gr.Button("Send")
56
+ with gr.Row():
57
+ clear_btn = gr.Button("🧹 Clear")
58
+ export_btn = gr.Button("💾 Export Chat")
59
+ file_out = gr.File(label="Download Chat")
60
+
61
+ send_btn.click(chat, inputs=user_input, outputs=chatbot)
62
+ user_input.submit(chat, inputs=user_input, outputs=chatbot)
63
+ clear_btn.click(clear, outputs=chatbot)
64
+ export_btn.click(export_chat, outputs=file_out)
65
+
66
+ chatbot.value = format_chat(chat_history)
67
+
68
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+ gradio