ar0551 commited on
Commit
1b182bc
·
verified ·
1 Parent(s): 6ebe49c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -19,12 +19,12 @@ model = AutoModelForCausalLM.from_pretrained(
19
 
20
  # Chat function
21
  @spaces.GPU
22
- def chat_with_bot(user_input, history):
23
  history = history or []
24
  prompt = ""
25
  for user, bot in history:
26
  prompt += f"User: {user}\nAssistant: {bot}\n"
27
- prompt += f"User: {user_input}\nAssistant:"
28
 
29
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
30
  outputs = model.generate(
@@ -38,14 +38,13 @@ def chat_with_bot(user_input, history):
38
 
39
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
40
  response = decoded[len(prompt):].strip().split("\n")[0]
41
-
42
- history.append((user_input, response))
43
  return response, history
44
 
45
- # Gradio UI
46
  gr.ChatInterface(
47
  fn=chat_with_bot,
48
- title="Phi-2 Chatbot (ZeroGPU Safe)",
49
  theme="soft",
50
- examples=["What is AI?", "Summarize the French Revolution.", "Tell me a space fact."]
51
  ).launch(share=True)
 
19
 
20
  # Chat function
21
  @spaces.GPU
22
+ def chat_with_bot(message, history):
23
  history = history or []
24
  prompt = ""
25
  for user, bot in history:
26
  prompt += f"User: {user}\nAssistant: {bot}\n"
27
+ prompt += f"User: {message}\nAssistant:"
28
 
29
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
30
  outputs = model.generate(
 
38
 
39
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
40
  response = decoded[len(prompt):].strip().split("\n")[0]
41
+ history.append((message, response))
 
42
  return response, history
43
 
44
+ # gr.ChatInterface expects fn(message, history) → (response, updated_history)
45
  gr.ChatInterface(
46
  fn=chat_with_bot,
47
+ title="Phi-2 Chatbot (CPU-friendly)",
48
  theme="soft",
49
+ examples=["What is AI?", "Tell me a fun fact about space.", "Summarize photosynthesis."],
50
  ).launch(share=True)