Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| # Initialize the text-generation pipeline | |
| pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype="auto", device_map="auto") | |
| # Define agent roles | |
| agents = { | |
| "Pirate": "You are a friendly chatbot who always responds in the style of a pirate.", | |
| "Professor": "You are a knowledgeable professor who explains concepts in detail.", | |
| "Comedian": "You are a witty comedian who answers with humor and jokes.", | |
| "Motivator": "You are a motivational speaker who provides inspiring and uplifting responses.", | |
| } | |
| def multi_agent_system(agent, user_input): | |
| # Set the role of the selected agent | |
| messages = [ | |
| {"role": "system", "content": agents[agent]}, | |
| {"role": "user", "content": user_input}, | |
| ] | |
| # Format the chat template | |
| prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| # Generate the response | |
| outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) | |
| return outputs[0]["generated_text"] | |
| # Gradio UI | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Multi-Agent Chat System") | |
| with gr.Row(): | |
| agent_dropdown = gr.Dropdown( | |
| choices=list(agents.keys()), label="Select an Agent", value="Pirate" | |
| ) | |
| user_input = gr.Textbox(label="Enter your message:", placeholder="Type your query here...") | |
| submit_button = gr.Button("Submit") | |
| chat_output = gr.Textbox(label="Agent's Response:", interactive=False) | |
| submit_button.click( | |
| fn=multi_agent_system, | |
| inputs=[agent_dropdown, user_input], | |
| outputs=chat_output | |
| ) | |
| # Launch the app | |
| demo.launch() | |