Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| import json | |
| from dotenv import load_dotenv | |
| from langchain_anthropic import ChatAnthropic | |
| load_dotenv() | |
| class LegionMariaAssistant: | |
| def __init__(self): | |
| # Router LLM - lightweight for section classification | |
| self.router_llm = ChatAnthropic( | |
| anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"), | |
| model="claude-3-5-haiku-20241022", | |
| temperature=0.1 | |
| ) | |
| # Response LLM - for generating final answers | |
| self.response_llm = ChatAnthropic( | |
| anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"), | |
| model="claude-3-5-haiku-20241022", | |
| temperature=0.1 # Lower temperature for more consistent, direct responses | |
| ) | |
| self.data_content = {} | |
| self.load_data() | |
| def load_data(self): | |
| """Load structured JSON data""" | |
| data_file = "./data.json" | |
| if os.path.exists(data_file): | |
| try: | |
| with open(data_file, 'r', encoding='utf-8') as f: | |
| self.data_content = json.load(f) | |
| print("Loaded Legion Maria JSON data successfully") | |
| print(f"Available sections: {list(self.data_content.keys())}") | |
| except Exception as e: | |
| print(f"Error loading data: {str(e)}") | |
| self.data_content = {} | |
| else: | |
| print("data.json not found") | |
| self.data_content = {} | |
| def route_query(self, message): | |
| """Router LLM decides which section of data to use""" | |
| available_sections = list(self.data_content.keys()) | |
| router_prompt = f"""You are a query router for the Legion Maria Youth Affairs system. | |
| Available data sections: {available_sections} | |
| Each section contains: | |
| - about: mission, vision, core values, organizational information | |
| - office: projects, community outreach, operational details | |
| - leadership: organizational structure, leadership team, roles | |
| User query: "{message}" | |
| Respond with ONLY the most relevant section name from the available sections. If the query spans multiple sections or is general, respond with "general". | |
| Examples: | |
| - "Who is the director?" -> leadership | |
| - "What is your mission?" -> about | |
| - "Tell me about your projects" -> office | |
| - "What do you do?" -> general | |
| Section:""" | |
| try: | |
| response = self.router_llm.invoke([{"role": "user", "content": router_prompt}]) | |
| section = response.content.strip().lower() | |
| # Validate section exists | |
| if section in available_sections: | |
| return section | |
| elif section == "general": | |
| return "general" | |
| else: | |
| return "about" # Default fallback | |
| except Exception as e: | |
| print(f"Router error: {str(e)}") | |
| return "about" # Default fallback | |
| def chat_response(self, message, history): | |
| """Two-tier LLM system: Router + Specialist response""" | |
| if not message.strip(): | |
| return "Please ask me something about our Legion Maria Youth Affairs!" | |
| try: | |
| if not self.data_content: | |
| return "I don't have that information available right now." | |
| # Step 1: Router LLM decides which section to use | |
| selected_section = self.route_query(message) | |
| print(f"Router selected section: {selected_section}") | |
| # Step 2: Get relevant data based on routing decision | |
| if selected_section == "general": | |
| # Use all data for general queries | |
| relevant_data = self.data_content | |
| else: | |
| # Use only the specific section | |
| relevant_data = {selected_section: self.data_content.get(selected_section, {})} | |
| # Build conversation context | |
| conversation_context = "" | |
| if history: | |
| conversation_context = "Previous conversation:\n" | |
| for user_msg, assistant_msg in history[-3:]: # Keep last 3 exchanges | |
| conversation_context += f"User: {user_msg}\nAssistant: {assistant_msg}\n\n" | |
| conversation_context += "Current conversation:\n" | |
| # Step 3: Response LLM generates answer using only relevant data | |
| response_prompt = f"""You are Santa Legion from the Legion Maria Directorate of Youth Affairs. Speak in first person as a member of the organization. | |
| Your Knowledge: | |
| {json.dumps(relevant_data, indent=2)} | |
| {conversation_context}User: {message} | |
| Guidelines: | |
| - You are Santa Legion, speak as "I" and "we" (the organization) | |
| - Keep responses SHORT (1-3 sentences maximum) | |
| - Be direct and personal | |
| - Never mention being provided documents or data | |
| - Speak as if this is your natural knowledge | |
| - Use "our mission", "we believe", "I can help you with" | |
| Answer:""" | |
| # Get response from specialist LLM | |
| response = self.response_llm.invoke([{"role": "user", "content": response_prompt}]) | |
| return response.content | |
| except Exception as e: | |
| print(f"Error generating response: {str(e)}") | |
| return "I'm sorry, I'm having trouble right now. Please try again." | |
| def main(): | |
| assistant = LegionMariaAssistant() | |
| # Initial greeting message | |
| initial_greeting = [ | |
| [None, "π Hello! I'm Santa Legion from the Legion Maria Youth Affairs. I'm here to help you learn about our mission, leadership, projects, and activities. What would you like to know?"] | |
| ] | |
| # Create mobile-optimized Gradio chat interface | |
| with gr.Blocks(title="Legion Maria Chat", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# π¬ Legion Maria YA") | |
| # Mobile-optimized chat interface | |
| chatbot = gr.Chatbot( | |
| value=initial_greeting, | |
| height=400, # Reduced for mobile | |
| show_label=False, | |
| container=True, | |
| bubble_full_width=True, # Better for mobile | |
| show_share_button=False | |
| ) | |
| # Mobile-friendly input layout | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| placeholder="Ask me anything...", | |
| show_label=False, | |
| scale=5, | |
| container=False, | |
| lines=1 | |
| ) | |
| send_btn = gr.Button("π€", variant="primary", scale=1, size="sm") | |
| clear = gr.Button("π New Chat", variant="secondary", size="sm") | |
| # Chat functionality | |
| def respond(message, history): | |
| if message.strip(): | |
| bot_response = assistant.chat_response(message, history) | |
| history.append([message, bot_response]) | |
| return history, "" | |
| # Event handlers | |
| msg.submit(respond, [msg, chatbot], [chatbot, msg]) | |
| send_btn.click(respond, [msg, chatbot], [chatbot, msg]) | |
| clear.click(lambda: initial_greeting, None, chatbot) | |
| # Launch with better settings for chat app | |
| demo.launch( | |
| share=False, | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| show_api=False | |
| ) | |
| if __name__ == "__main__": | |
| main() |