Dub973 commited on
Commit
eea3f86
·
verified ·
1 Parent(s): 6594b81

Upload 2 files

Browse files
Files changed (1) hide show
  1. app.py +37 -16
app.py CHANGED
@@ -1,36 +1,57 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Load a text generation model
5
- generator = pipeline("text-generation", model="gpt2")
6
 
7
- # Function to handle each new user message
8
  def chat(user_message, history):
9
- # Combine history to make a context
10
  context = ""
11
  for turn in history:
12
  context += f"User: {turn[0]}\nBot: {turn[1]}\n"
13
  context += f"User: {user_message}\nBot:"
14
 
15
- # Generate response
16
- response = generator(context, max_length=200, do_sample=True, temperature=0.7)[0]['generated_text']
17
- # Keep only new part after last "Bot:"
18
- reply = response.split("Bot:")[-1].strip().split("\n")[0]
19
-
20
- # Append to chat history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  history.append((user_message, reply))
22
- return history, history
23
 
24
  with gr.Blocks() as demo:
25
- gr.Markdown("<h1 style='text-align: center;'>💬 Conversational Agent Chatbot</h1>")
26
 
27
  chatbot = gr.Chatbot()
28
- msg = gr.Textbox(placeholder="Type your question and press Enter...")
 
 
29
 
30
  clear = gr.Button("Clear Chat")
31
 
32
- # Bind events
33
- msg.submit(chat, [msg, chatbot], [chatbot, chatbot])
34
- clear.click(lambda: None, None, chatbot, queue=False)
35
 
36
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Load GPT-2 generator
5
+ generator = pipeline("text-generation", model="gpt2", max_length=200)
6
 
7
+ # Chat function
8
  def chat(user_message, history):
9
+ # Build context string
10
  context = ""
11
  for turn in history:
12
  context += f"User: {turn[0]}\nBot: {turn[1]}\n"
13
  context += f"User: {user_message}\nBot:"
14
 
15
+ # Print context for debugging (will also be in trace box)
16
+ print("----- TRACE: Prompt to LLM -----")
17
+ print(context)
18
+ print("---------------------------------")
19
+
20
+ # Generate raw output
21
+ raw_output = generator(context, max_length=len(context.split()) + 50, do_sample=True, temperature=0.7)[0]['generated_text']
22
+
23
+ print("----- TRACE: Raw LLM output -----")
24
+ print(raw_output)
25
+ print("---------------------------------")
26
+
27
+ # Extract final reply
28
+ if "Bot:" in raw_output:
29
+ reply = raw_output.split("Bot:")[-1].split("\n")[0].strip()
30
+ else:
31
+ reply = raw_output[len(context):].strip()
32
+
33
+ # Build trace text
34
+ trace_text = (
35
+ f"📄 Prompt sent to LLM:\n\n{context}\n\n"
36
+ f"⚡ Raw LLM output:\n\n{raw_output}\n\n"
37
+ f"✅ Final extracted reply:\n\n{reply}"
38
+ )
39
+
40
+ # Update history
41
  history.append((user_message, reply))
42
+ return history, history, trace_text
43
 
44
  with gr.Blocks() as demo:
45
+ gr.Markdown("<h1 style='text-align: center;'>💬 Conversational Agent with Trace</h1>")
46
 
47
  chatbot = gr.Chatbot()
48
+ msg = gr.Textbox(placeholder="Type your message and press Enter...")
49
+
50
+ trace_box = gr.Textbox(label="🧐 Trace Logs (for debugging)", lines=15)
51
 
52
  clear = gr.Button("Clear Chat")
53
 
54
+ msg.submit(chat, [msg, chatbot], [chatbot, chatbot, trace_box])
55
+ clear.click(lambda: ([], [], ""), None, [chatbot, chatbot, trace_box], queue=False)
 
56
 
57
  demo.launch()