DSDUDEd commited on
Commit
1444cb4
·
verified ·
1 Parent(s): d296aa2

update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -1
app.py CHANGED
@@ -9,6 +9,7 @@ import gradio as gr
9
  model_name = "Fredithefish/Guanaco-3B-Uncensored-v2"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  model = AutoModelForCausalLM.from_pretrained(model_name)
 
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
  model.to(device)
14
 
@@ -22,7 +23,7 @@ chat_history = []
22
  # --------------------------
23
  def generate_response(prompt):
24
  global chat_history
25
- # Combine previous messages
26
  context = ""
27
  for user_msg, ai_msg in chat_history:
28
  context += f"User: {user_msg}\nAI: {ai_msg}\n"
@@ -39,6 +40,38 @@ def generate_response(prompt):
39
  chat_history.append((prompt, response))
40
  return response
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  # --------------------------
43
  # Simulate live typing
44
  # --------------------------
 
9
  model_name = "Fredithefish/Guanaco-3B-Uncensored-v2"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  model = AutoModelForCausalLM.from_pretrained(model_name)
12
+
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
  model.to(device)
15
 
 
23
  # --------------------------
24
  def generate_response(prompt):
25
  global chat_history
26
+ # Combine previous conversation
27
  context = ""
28
  for user_msg, ai_msg in chat_history:
29
  context += f"User: {user_msg}\nAI: {ai_msg}\n"
 
40
  chat_history.append((prompt, response))
41
  return response
42
 
43
+ # --------------------------
44
+ # Live typing effect
45
+ # --------------------------
46
+ def live_typing(prompt):
47
+ response = generate_response(prompt)
48
+ displayed_text = ""
49
+ for char in response:
50
+ displayed_text += char
51
+ time.sleep(0.02) # typing speed
52
+ yield displayed_text
53
+
54
+ # --------------------------
55
+ # Gradio UI
56
+ # --------------------------
57
+ with gr.Blocks() as demo:
58
+ gr.Markdown("## 🤖 Guanaco-3B Chatbot with Live Typing")
59
+
60
+ # User input
61
+ user_input = gr.Textbox(label="Your Message", placeholder="Type something...")
62
+
63
+ # Output box
64
+ output_box = gr.Textbox(label="AI is typing...", lines=8)
65
+
66
+ # Submit event
67
+ user_input.submit(live_typing, inputs=[user_input], outputs=[output_box])
68
+
69
+ demo.launch()
70
+ )
71
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True).split("AI:")[-1].strip()
72
+ chat_history.append((prompt, response))
73
+ return response
74
+
75
  # --------------------------
76
  # Simulate live typing
77
  # --------------------------