Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from langchain.prompts import ChatPromptTemplate | |
| from langchain.schema import HumanMessage, SystemMessage, AIMessage | |
| from huggingface_hub import InferenceClient | |
| import os | |
| import time | |
| import logging | |
| import re | |
| # --- Environment and Logging Setup --- | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Support both token names for flexibility | |
| hf_token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACEHUB_API_TOKEN") | |
| if not hf_token: | |
| logger.warning("Neither HF_TOKEN nor HUGGINGFACEHUB_API_TOKEN is set, the application may not work.") | |
| # --- LLM Configuration --- | |
| client = InferenceClient( | |
| provider="together", | |
| api_key=hf_token, | |
| ) | |
| math_template = ChatPromptTemplate.from_messages([ | |
| ("system", """{system_message} | |
| You are an expert math tutor. For every math problem: | |
| 1. Break it down into key concepts | |
| 2. Briefly explain concepts | |
| 3. Outline the process for solving a similar problem | |
| Be comprehensive and educational. Structure your response clearly."""), | |
| ("human", "{question}") | |
| ]) | |
| research_template = ChatPromptTemplate.from_messages([ | |
| ("system", """{system_message} | |
| You are a research skills mentor. Help students with: | |
| - Determining the validity of sources | |
| - Evaluating source credibility and bias if a source is mentioned | |
| - Proper citation formats (APA, MLA, Chicago, etc.) | |
| - Research strategies and methodologies | |
| - Academic writing techniques and structure | |
| - Database navigation and search strategies | |
| Provide detailed, actionable advice with specific examples."""), | |
| ("human", "{question}") | |
| ]) | |
| study_template = ChatPromptTemplate.from_messages([ | |
| ("system", """{system_message} | |
| You are a study skills coach. Help students with: | |
| - Effective study methods for different learning styles | |
| - Time management and scheduling techniques | |
| - Memory techniques and retention strategies | |
| - Test preparation and exam strategies | |
| - Note-taking methods and organization | |
| - Learning style optimization | |
| - Offer short quiz sessions where you pose one to two questions at a time, then provide feedback on the students answers. | |
| Provide comprehensive, personalized advice with practical examples."""), | |
| ("human", "{question}") | |
| ]) | |
| general_template = ChatPromptTemplate.from_messages([ | |
| ("system", """{system_message} | |
| You are EduBot, a comprehensive AI learning assistant. You help students with: | |
| ๐ Mathematics (Concise explanations rooted in understanding the concepts and process rather than answering the math problem directly) | |
| ๐ Research skills (source guidance, research advice, evaluation, and citation) | |
| ๐ Study strategies (effective learning techniques and exam preparation) | |
| ๐ ๏ธ Educational tools (guidance on learning resources and technologies) | |
| Always be encouraging, patient, thorough, and comprehensive."""), | |
| ("human", "{question}") | |
| ]) | |
| # --- Core Logic Functions --- | |
| def detect_subject(message): | |
| """Detects the subject of the user's message based on keywords.""" | |
| message_lower = message.lower() | |
| math_keywords = ['math', 'solve', 'calculate', 'equation', 'formula', 'algebra', 'geometry', 'calculus', 'derivative', 'integral', 'theorem', 'proof'] | |
| research_keywords = ['research', 'source', 'citation', 'bibliography', 'reference', 'academic', 'paper', 'essay', 'thesis', 'database', 'journal'] | |
| study_keywords = ['study', 'memorize', 'exam', 'test', 'quiz', 'review', 'learn', 'remember', 'focus', 'motivation', 'notes'] | |
| if any(keyword in message_lower for keyword in math_keywords): | |
| return math_template, "๐งฎ Math Mode" | |
| elif any(keyword in message_lower for keyword in research_keywords): | |
| return research_template, "๐ Research Mode" | |
| elif any(keyword in message_lower for keyword in study_keywords): | |
| return study_template, "๐ Study Mode" | |
| else: | |
| return general_template, "๐ General Mode" | |
| def smart_truncate(text, max_length=3000): | |
| """Truncates text intelligently to the last full sentence or word.""" | |
| if len(text) <= max_length: | |
| return text | |
| # Try to split by sentence | |
| sentences = re.split(r'(?<=[.!?])\s+', text[:max_length]) | |
| if len(sentences) > 1: | |
| return ' '.join(sentences[:-1]) + "... [Response truncated - ask for continuation]" | |
| # Otherwise, split by word | |
| else: | |
| words = text[:max_length].split() | |
| return ' '.join(words[:-1]) + "... [Response truncated - ask for continuation]" | |
| def respond_with_enhanced_streaming(message, history): | |
| """Streams the bot's response, detecting the subject and handling errors.""" | |
| try: | |
| template, mode = detect_subject(message) | |
| # Build conversation history with proper LangChain message objects | |
| messages = [] | |
| # Add system message | |
| system_msg = SystemMessage(content="You are EduBot, an expert AI learning assistant. Provide comprehensive, educational responses that help students truly understand concepts.") | |
| messages.append(system_msg) | |
| # Add conversation history if available | |
| if history: | |
| for exchange in history[-5:]: # Keep last 5 exchanges for context | |
| if exchange.get("role") == "user": | |
| messages.append(HumanMessage(content=exchange["content"])) | |
| elif exchange.get("role") == "assistant": | |
| messages.append(AIMessage(content=exchange["content"])) | |
| # Add current user message | |
| messages.append(HumanMessage(content=message)) | |
| yield f"*{mode}*\n\nGenerating response..." | |
| logger.info(f"Processing {mode} query: {message[:50]}...") | |
| # Use LangChain template to format the prompt | |
| formatted_prompt = template.format( | |
| question=message, | |
| system_message="You are EduBot, an expert AI learning assistant. Provide comprehensive, educational responses that help students truly understand concepts." | |
| ) | |
| # Use the Together provider with text_generation | |
| response = client.text_generation( | |
| formatted_prompt, | |
| model="meta-llama/Meta-Llama-3.1-8B-Instruct", | |
| max_new_tokens=1024, | |
| temperature=0.7, | |
| top_p=0.9, | |
| ) | |
| response = smart_truncate(response, max_length=3000) | |
| # Stream the response word by word | |
| words = response.split() | |
| partial_response = f"*{mode}*\n\n" | |
| for i, word in enumerate(words): | |
| partial_response += word + " " | |
| # Update the stream periodically | |
| if i % 4 == 0: | |
| yield partial_response | |
| time.sleep(0.03) | |
| final_response = f"*{mode}*\n\n{response}" | |
| logger.info(f"Response completed. Length: {len(response)} characters") | |
| yield final_response | |
| except Exception as e: | |
| logger.exception("Error in response generation") | |
| yield f"Sorry, I encountered an error: {str(e)}" | |
| # --- Fixed Gradio UI and CSS --- | |
| custom_css = """ | |
| /* Main container styling */ | |
| .gradio-container { | |
| background-color: rgb(240, 236, 230) !important; | |
| font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; | |
| } | |
| /* Title styling */ | |
| .title-header { | |
| background-color: rgb(240, 236, 230); | |
| padding: 20px; | |
| border-bottom: 2px solid rgba(28, 18, 5, 0.1); | |
| text-align: left; | |
| } | |
| .title-header h1 { | |
| font-size: 1.8rem; | |
| font-weight: bold; | |
| color: black; | |
| margin: 0; | |
| } | |
| /* Chat container */ | |
| .chat-container { | |
| min-height: 500px; | |
| background-color: rgb(240, 236, 230); | |
| } | |
| /* Chatbot styling */ | |
| .gradio-chatbot { | |
| background-color: transparent !important; | |
| border: none !important; | |
| padding: 20px !important; | |
| } | |
| /* Message styling */ | |
| .gradio-chatbot .message.bot .markdown { | |
| background-color: rgb(240, 185, 103) !important; | |
| color: black !important; | |
| border-radius: 18px !important; | |
| padding: 12px 16px !important; | |
| box-shadow: 0 1px 2px rgba(0,0,0,0.05) !important; | |
| border: none !important; | |
| max-width: 70%; | |
| margin-left: 0; | |
| margin-right: auto; | |
| word-wrap: break-word; | |
| } | |
| .gradio-chatbot .message.user .markdown { | |
| background-color: rgb(242, 238, 233) !important; | |
| color: black !important; | |
| border-radius: 18px !important; | |
| padding: 12px 16px !important; | |
| box-shadow: 0 1px 2px rgba(0,0,0,0.05) !important; | |
| border: none !important; | |
| max-width: 70%; | |
| margin-left: auto; | |
| margin-right: 0; | |
| word-wrap: break-word; | |
| } | |
| /* Hide avatars */ | |
| .gradio-chatbot .avatar-container { | |
| display: none !important; | |
| } | |
| /* Input section styling */ | |
| .input-section { | |
| background-color: rgb(240, 236, 230); | |
| border-top: 2px solid rgba(28, 18, 5, 0.1); | |
| padding: 20px; | |
| } | |
| /* Button styling */ | |
| .clear-button, .send-button { | |
| background-color: rgb(28, 18, 5) !important; | |
| color: white !important; | |
| border: none !important; | |
| border-radius: 10px !important; | |
| padding: 8px 16px !important; | |
| cursor: pointer !important; | |
| margin: 5px !important; | |
| } | |
| .clear-button:hover, .send-button:hover { | |
| background-color: rgba(28, 18, 5, 0.8) !important; | |
| } | |
| .send-button { | |
| background-color: rgb(51, 102, 204) !important; | |
| } | |
| .send-button:hover { | |
| background-color: rgba(51, 102, 204, 0.8) !important; | |
| } | |
| /* Textbox styling - keep it simple */ | |
| .input-textbox { | |
| background-color: rgb(242, 238, 233) !important; | |
| border: 2px solid rgb(28, 18, 5) !important; | |
| border-radius: 20px !important; | |
| } | |
| .input-textbox textarea { | |
| background-color: transparent !important; | |
| border: none !important; | |
| color: black !important; | |
| padding: 15px !important; | |
| font-size: 16px !important; | |
| } | |
| """ | |
| # Create the interface with proper structure | |
| with gr.Blocks(css=custom_css, title="EduBot") as demo: | |
| # Title Section | |
| gr.HTML('<div class="title-header"><h1>๐ EduBot</h1></div>') | |
| # Chat Section | |
| with gr.Row(elem_classes=["chat-container"]): | |
| chatbot = gr.Chatbot( | |
| type="messages", | |
| show_copy_button=True, | |
| show_share_button=False, | |
| avatar_images=None, | |
| height=500 | |
| ) | |
| # Input Section | |
| with gr.Column(elem_classes=["input-section"]): | |
| with gr.Row(): | |
| clear = gr.Button("Clear", elem_classes=["clear-button"]) | |
| send = gr.Button("Send", elem_classes=["send-button"]) | |
| msg = gr.Textbox( | |
| placeholder="Ask me about math, research, study strategies, or any educational topic...", | |
| show_label=False, | |
| lines=3, | |
| max_lines=8, | |
| elem_classes=["input-textbox"] | |
| ) | |
| def respond_and_update(message, history): | |
| """Main function to handle user submission.""" | |
| if not message.strip(): | |
| return history, "" | |
| # Add user message to history | |
| history.append({"role": "user", "content": message}) | |
| # Yield history to show the user message immediately, and clear the textbox | |
| yield history, "" | |
| # Stream the bot's response | |
| full_response = "" | |
| for response_chunk in respond_with_enhanced_streaming(message, history): | |
| full_response = response_chunk | |
| # Update the last message (bot's response) | |
| if len(history) > 0 and history[-1]["role"] == "user": | |
| history.append({"role": "assistant", "content": full_response}) | |
| else: | |
| history[-1] = {"role": "assistant", "content": full_response} | |
| yield history, "" | |
| def clear_chat(): | |
| """Clear the chat history.""" | |
| return [], "" | |
| # Set up the "send on Enter" event and send button click | |
| msg.submit(respond_and_update, [msg, chatbot], [chatbot, msg]) | |
| send.click(respond_and_update, [msg, chatbot], [chatbot, msg]) | |
| # Set up the clear button | |
| clear.click(clear_chat, outputs=[chatbot, msg]) | |
| if __name__ == "__main__": | |
| logger.info("Starting EduBot...") | |
| demo.launch(debug=True) |