Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| # Load GPT-2 generator | |
| generator = pipeline("text-generation", model="gpt2", max_length=200) | |
| # Chat function | |
| def chat(user_message, history): | |
| # Build context string | |
| context = "" | |
| for turn in history: | |
| context += f"User: {turn[0]}\nBot: {turn[1]}\n" | |
| context += f"User: {user_message}\nBot:" | |
| # Print context for debugging (will also be in trace box) | |
| print("----- TRACE: Prompt to LLM -----") | |
| print(context) | |
| print("---------------------------------") | |
| # Generate raw output | |
| raw_output = generator(context, max_length=len(context.split()) + 50, do_sample=True, temperature=0.7)[0]['generated_text'] | |
| print("----- TRACE: Raw LLM output -----") | |
| print(raw_output) | |
| print("---------------------------------") | |
| # Extract final reply | |
| if "Bot:" in raw_output: | |
| reply = raw_output.split("Bot:")[-1].split("\n")[0].strip() | |
| else: | |
| reply = raw_output[len(context):].strip() | |
| # Build trace text | |
| trace_text = ( | |
| f"π Prompt sent to LLM:\n\n{context}\n\n" | |
| f"β‘ Raw LLM output:\n\n{raw_output}\n\n" | |
| f"β Final extracted reply:\n\n{reply}" | |
| ) | |
| # Update history | |
| history.append((user_message, reply)) | |
| return history, history, trace_text | |
| with gr.Blocks() as demo: | |
| gr.Markdown("<h1 style='text-align: center;'>π¬ Conversational Agent with Trace</h1>") | |
| chatbot = gr.Chatbot() | |
| msg = gr.Textbox(placeholder="Type your message and press Enter...") | |
| trace_box = gr.Textbox(label="π§ Trace Logs (for debugging)", lines=15) | |
| clear = gr.Button("Clear Chat") | |
| msg.submit(chat, [msg, chatbot], [chatbot, chatbot, trace_box]) | |
| clear.click(lambda: ([], [], ""), None, [chatbot, chatbot, trace_box], queue=False) | |
| demo.launch() | |