jeevav62 commited on
Commit
138b42d
Β·
verified Β·
1 Parent(s): ca1f085

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +36 -40
app.py CHANGED
@@ -3,25 +3,23 @@ import gradio as gr
3
  from transformers import pipeline
4
 
5
  # -------------------------------
6
- # Load TinyLLaMA Model
7
  # -------------------------------
8
  pipe = pipeline(
9
  "text-generation",
10
  model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
11
- torch_dtype=torch.float32, # CPU-friendly
12
- device_map="auto",
13
  )
14
 
15
  # -------------------------------
16
- # Chat Function
17
  # -------------------------------
18
  def chat(user_message, history, system_prompt, temperature, max_tokens):
19
- # Stronger personality: always prepend system prompt
20
  messages = [{"role": "system", "content": system_prompt}]
21
 
22
- for u, a in history:
23
- messages.append({"role": "user", "content": u})
24
- messages.append({"role": "assistant", "content": a})
25
 
26
  messages.append({"role": "user", "content": user_message})
27
 
@@ -33,14 +31,17 @@ def chat(user_message, history, system_prompt, temperature, max_tokens):
33
 
34
  output = pipe(
35
  prompt,
36
- max_new_tokens=max_tokens,
37
  temperature=temperature,
38
  top_p=0.9,
39
  do_sample=True,
40
  )
41
 
42
- response = output[0]["generated_text"].split("<|assistant|>")[-1].strip()
43
- history.append([user_message, response])
 
 
 
44
  return history
45
 
46
  # -------------------------------
@@ -48,58 +49,53 @@ def chat(user_message, history, system_prompt, temperature, max_tokens):
48
  # -------------------------------
49
  with gr.Blocks(title="TinyLLaMA Chatbot") as demo:
50
  gr.Markdown("## πŸ¦™ TinyLLaMA Chatbot")
51
- gr.Markdown("Select personality to change chatbot behavior and adjust generation settings.")
52
 
53
- # Predefined personalities
54
  preset_prompts = {
55
- "Pirate πŸ΄β€β˜ οΈ": "You are a friendly chatbot who always responds like a pirate. Use pirate words like 'Ahoy', 'Matey', and speak in a fun, adventurous style.",
56
- "Teacher πŸ‘¨β€πŸ«": "You are a patient teacher who explains concepts clearly. Give detailed, step-by-step explanations, and use examples.",
57
- "Coder πŸ‘¨β€πŸ’»": "You are a helpful programming assistant. Provide clear code examples and explain syntax professionally.",
58
- "Friendly Assistant πŸ€–": "You are a cheerful and friendly AI assistant. Always respond in a kind, helpful, and approachable tone."
59
  }
60
 
61
- system_prompt_dropdown = gr.Dropdown(
62
  choices=list(preset_prompts.keys()),
63
  value="Pirate πŸ΄β€β˜ οΈ",
64
- label="Choose Personality",
65
  )
66
 
67
- system_prompt_textbox = gr.Textbox(
68
  value=preset_prompts["Pirate πŸ΄β€β˜ οΈ"],
69
- label="System Prompt (Editable)"
70
  )
71
 
72
- # Update textbox when dropdown changes
73
- def update_prompt(choice):
74
- return preset_prompts[choice]
75
-
76
- system_prompt_dropdown.change(update_prompt, inputs=system_prompt_dropdown, outputs=system_prompt_textbox)
77
 
78
- chatbot = gr.Chatbot(height=400)
79
  user_input = gr.Textbox(label="Your Message")
80
 
81
- # Generation sliders
82
- temperature_slider = gr.Slider(0.1, 1.2, value=0.85, step=0.05, label="Temperature")
83
- max_tokens_slider = gr.Slider(32, 256, value=128, step=16, label="Max Tokens")
84
 
85
- send_btn = gr.Button("Send πŸš€")
86
- clear_btn = gr.Button("Clear 🧹")
87
 
88
- send_btn.click(
89
  chat,
90
- inputs=[user_input, chatbot, system_prompt_textbox, temperature_slider, max_tokens_slider],
91
  outputs=chatbot
92
  )
93
 
94
  user_input.submit(
95
  chat,
96
- inputs=[user_input, chatbot, system_prompt_textbox, temperature_slider, max_tokens_slider],
97
  outputs=chatbot
98
  )
99
 
100
- clear_btn.click(lambda: [], outputs=chatbot)
 
 
101
 
102
- # -------------------------------
103
- # Launch
104
- # -------------------------------
105
- demo.launch(share=True)
 
3
  from transformers import pipeline
4
 
5
  # -------------------------------
6
+ # Load Model (CPU-safe)
7
  # -------------------------------
8
  pipe = pipeline(
9
  "text-generation",
10
  model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
11
+ torch_dtype=torch.float32,
12
+ device_map=None,
13
  )
14
 
15
  # -------------------------------
16
+ # Chat Function (messages format)
17
  # -------------------------------
18
  def chat(user_message, history, system_prompt, temperature, max_tokens):
 
19
  messages = [{"role": "system", "content": system_prompt}]
20
 
21
+ if history:
22
+ messages.extend(history)
 
23
 
24
  messages.append({"role": "user", "content": user_message})
25
 
 
31
 
32
  output = pipe(
33
  prompt,
34
+ max_new_tokens=int(max_tokens),
35
  temperature=temperature,
36
  top_p=0.9,
37
  do_sample=True,
38
  )
39
 
40
+ assistant_reply = output[0]["generated_text"].split("<|assistant|>")[-1].strip()
41
+
42
+ history.append({"role": "user", "content": user_message})
43
+ history.append({"role": "assistant", "content": assistant_reply})
44
+
45
  return history
46
 
47
  # -------------------------------
 
49
  # -------------------------------
50
  with gr.Blocks(title="TinyLLaMA Chatbot") as demo:
51
  gr.Markdown("## πŸ¦™ TinyLLaMA Chatbot")
 
52
 
 
53
  preset_prompts = {
54
+ "Pirate πŸ΄β€β˜ οΈ": "You are a pirate chatbot. Speak like a pirate.",
55
+ "Teacher πŸ‘¨β€πŸ«": "You are a patient teacher.",
56
+ "Coder πŸ‘¨β€πŸ’»": "You are a programming assistant.",
57
+ "Friendly πŸ€–": "You are a friendly assistant."
58
  }
59
 
60
+ personality = gr.Dropdown(
61
  choices=list(preset_prompts.keys()),
62
  value="Pirate πŸ΄β€β˜ οΈ",
63
+ label="Choose Personality"
64
  )
65
 
66
+ system_prompt = gr.Textbox(
67
  value=preset_prompts["Pirate πŸ΄β€β˜ οΈ"],
68
+ label="System Prompt"
69
  )
70
 
71
+ personality.change(
72
+ lambda x: preset_prompts[x],
73
+ inputs=personality,
74
+ outputs=system_prompt
75
+ )
76
 
77
+ chatbot = gr.Chatbot(type="messages", height=400)
78
  user_input = gr.Textbox(label="Your Message")
79
 
80
+ temperature = gr.Slider(0.1, 1.2, value=0.85)
81
+ max_tokens = gr.Slider(32, 128, value=96, step=16)
 
82
 
83
+ send = gr.Button("Send πŸš€")
84
+ clear = gr.Button("Clear 🧹")
85
 
86
+ send.click(
87
  chat,
88
+ inputs=[user_input, chatbot, system_prompt, temperature, max_tokens],
89
  outputs=chatbot
90
  )
91
 
92
  user_input.submit(
93
  chat,
94
+ inputs=[user_input, chatbot, system_prompt, temperature, max_tokens],
95
  outputs=chatbot
96
  )
97
 
98
+ clear.click(lambda: [], outputs=chatbot)
99
+
100
+ demo.launch()
101