Spaces:
Sleeping
Sleeping
| """ | |
| AI Programming Tutor - Production Version with Fine-tuned Model Only | |
| No demo fallbacks - shows exact errors for debugging | |
| Version: 3.0 - Production Ready, No Demo Mode | |
| """ | |
| import streamlit as st | |
| import os | |
| import tempfile | |
| # Configure page | |
| st.set_page_config( | |
| page_title="AI Programming Tutor", | |
| page_icon="π€", | |
| layout="wide" | |
| ) | |
| # Set up proper cache directories for HF Spaces | |
| def setup_cache_directories(): | |
| """Set up cache directories with proper permissions""" | |
| try: | |
| # Create cache directories in writable locations | |
| cache_dirs = { | |
| 'HF_HOME': os.path.join(tempfile.gettempdir(), 'huggingface'), | |
| 'TRANSFORMERS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'transformers'), | |
| 'HF_DATASETS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'datasets'), | |
| } | |
| for env_var, cache_path in cache_dirs.items(): | |
| os.environ[env_var] = cache_path | |
| os.makedirs(cache_path, exist_ok=True) | |
| st.info(f"π Cache directory set: {env_var} = {cache_path}") | |
| return True | |
| except Exception as e: | |
| st.error(f"β Failed to set up cache directories: {e}") | |
| return False | |
| # Set up cache directories | |
| setup_cache_directories() | |
| # Try to import the fine-tuned model components | |
| try: | |
| from fine import ProgrammingEducationAI, ComprehensiveFeedback | |
| MODEL_AVAILABLE = True | |
| except Exception as e: | |
| MODEL_AVAILABLE = False | |
| st.error(f"β CRITICAL ERROR: Cannot import fine-tuned model components") | |
| st.error(f"π Import Error: {e}") | |
| st.error("π‘ This is a production app - the fine-tuned model MUST be available") | |
| st.stop() | |
| # Note: Using public model - no HF_TOKEN required | |
| HF_TOKEN = None # Set to None for public model | |
| def main(): | |
| st.title("π€ AI Programming Tutor - Production") | |
| st.markdown("### Fine-tuned CodeLlama-7B for Programming Education") | |
| # Sidebar for settings | |
| with st.sidebar: | |
| st.header("βοΈ Settings") | |
| student_level = st.selectbox( | |
| "Student Level:", | |
| ["beginner", "intermediate", "advanced"], | |
| help="Adjusts feedback complexity" | |
| ) | |
| st.markdown("---") | |
| st.markdown("### π About") | |
| st.markdown(""" | |
| This AI tutor provides structured feedback on programming code: | |
| - **Strengths**: What you did well | |
| - **Weaknesses**: Areas for improvement | |
| - **Issues**: Problems to fix | |
| - **Improvements**: Step-by-step guidance | |
| - **Learning Points**: Key concepts to understand | |
| - **Questions**: Test your comprehension | |
| - **Code Fix**: Improved version | |
| """) | |
| # Show model status | |
| st.success("β Fine-tuned model available") | |
| st.success("π Using public model - no authentication required") | |
| st.info(f"π Model path: FaroukTomori/codellama-7b-programming-education") | |
| # Memory optimization info | |
| st.markdown("---") | |
| st.markdown("### πΎ Memory Optimization") | |
| st.info("π§ 8-bit quantization (with fallback)") | |
| st.info("π Proper cache directories configured") | |
| st.info("β‘ Auto device mapping for efficiency") | |
| st.warning("β οΈ Model size: ~13GB (quantized to ~7GB)") | |
| st.info("π Auto-fallback if quantization fails") | |
| # Show if model is loaded in session | |
| if 'ai_tutor' in st.session_state: | |
| st.success("β Model loaded in session") | |
| else: | |
| st.info("β³ Model not loaded yet - will load when you analyze code") | |
| # Main content | |
| st.markdown("---") | |
| # Code input | |
| code_input = st.text_area( | |
| "π Enter your code here:", | |
| height=200, | |
| placeholder="def hello_world():\n print('Hello, World!')\n return 'success'", | |
| help="Paste your Python code here for analysis" | |
| ) | |
| if st.button("π Analyze Code", type="primary"): | |
| if not code_input.strip(): | |
| st.warning("β οΈ Please enter some code to analyze") | |
| return | |
| with st.spinner("π€ Analyzing your code..."): | |
| try: | |
| # Check if model is already loaded | |
| if 'ai_tutor' not in st.session_state: | |
| with st.spinner("π Loading fine-tuned model (this may take 5-10 minutes on HF Spaces)..."): | |
| try: | |
| # Use Hugging Face Model Hub | |
| model_path = "FaroukTomori/codellama-7b-programming-education" | |
| st.info( | |
| "π Using public model - no authentication required") | |
| st.info( | |
| f"π Attempting to load model from: {model_path}") | |
| # Memory optimization settings for HF Spaces | |
| st.info( | |
| "π§ Loading with memory optimization for HF Spaces...") | |
| st.info( | |
| "πΎ Using 8-bit quantization to reduce memory usage") | |
| st.info( | |
| "π Using proper cache directories for permissions") | |
| ai_tutor = ProgrammingEducationAI(model_path) | |
| st.success( | |
| "β Model class instantiated successfully") | |
| # Load model with memory optimization (with fallback) | |
| try: | |
| ai_tutor.load_model( | |
| load_in_8bit=True, device_map="auto") | |
| st.success( | |
| "β Model loaded with 8-bit quantization!") | |
| except ImportError as e: | |
| if "bitsandbytes" in str(e): | |
| st.warning( | |
| "β οΈ 8-bit quantization failed, trying without it...") | |
| ai_tutor.load_model( | |
| load_in_8bit=False, device_map="auto") | |
| st.success( | |
| "β Model loaded without 8-bit quantization!") | |
| else: | |
| raise e | |
| st.session_state['ai_tutor'] = ai_tutor | |
| st.success( | |
| "β Fine-tuned model loaded successfully!") | |
| except PermissionError as e: | |
| st.error( | |
| f"β CACHE PERMISSION ERROR: Model download blocked") | |
| st.error(f"π Error Type: {type(e).__name__}") | |
| st.error(f"π Error Message: {str(e)}") | |
| st.error("π This usually means:") | |
| st.error( | |
| " 1. Another user is downloading the same model (wait 5-10 minutes)") | |
| st.error( | |
| " 2. A previous download was interrupted (cache needs clearing)") | |
| st.error("π§ Solutions:") | |
| st.error(" β’ Wait 5-10 minutes and try again") | |
| st.error( | |
| " β’ Restart your HF Space to clear cache") | |
| st.error( | |
| " β’ The model will download automatically on retry") | |
| st.error( | |
| "π‘ This is temporary - the model will load once cache is cleared") | |
| st.stop() | |
| except Exception as e: | |
| st.error( | |
| f"β CRITICAL ERROR: Failed to load fine-tuned model") | |
| st.error(f"π Error Type: {type(e).__name__}") | |
| st.error(f"π Error Message: {str(e)}") | |
| st.error("π Full error details:") | |
| st.code(str(e), language="text") | |
| st.error( | |
| "π‘ This is a production app - the model MUST load successfully") | |
| st.error( | |
| "π‘ Check the error above and fix the model loading issue") | |
| st.stop() # Stop the app completely | |
| # Use fine-tuned model | |
| try: | |
| feedback = st.session_state['ai_tutor'].generate_comprehensive_feedback( | |
| code_input, student_level) | |
| st.success("β Feedback generated using fine-tuned model!") | |
| except Exception as e: | |
| st.error(f"β CRITICAL ERROR: Failed to generate feedback") | |
| st.error(f"π Error Type: {type(e).__name__}") | |
| st.error(f"π Error Message: {str(e)}") | |
| st.error("π Full error details:") | |
| st.code(str(e), language="text") | |
| st.error( | |
| "π‘ Check the error above to fix the feedback generation issue") | |
| st.stop() | |
| # Display AI feedback in tabs | |
| tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs([ | |
| "β Strengths", "β Weaknesses", "π¨ Issues", | |
| "π Improvements", "π Learning", "β Questions", "π§ Code Fix" | |
| ]) | |
| with tab1: | |
| st.subheader("β Code Strengths") | |
| for strength in feedback.strengths: | |
| st.markdown(f"β’ {strength}") | |
| with tab2: | |
| st.subheader("β Areas for Improvement") | |
| for weakness in feedback.weaknesses: | |
| st.markdown(f"β’ {weakness}") | |
| with tab3: | |
| st.subheader("π¨ Issues to Address") | |
| for issue in feedback.issues: | |
| st.markdown(f"β’ {issue}") | |
| with tab4: | |
| st.subheader("π Step-by-Step Improvements") | |
| for i, step in enumerate(feedback.step_by_step_improvement, 1): | |
| st.markdown(f"**Step {i}:** {step}") | |
| with tab5: | |
| st.subheader("π Key Learning Points") | |
| for point in feedback.learning_points: | |
| st.markdown(f"β’ {point}") | |
| with tab6: | |
| st.subheader("β Comprehension Questions") | |
| st.markdown( | |
| f"**Question:** {feedback.comprehension_question}") | |
| st.markdown(f"**Answer:** {feedback.comprehension_answer}") | |
| st.markdown(f"**Explanation:** {feedback.explanation}") | |
| with tab7: | |
| st.subheader("π§ Improved Code") | |
| st.code(feedback.improved_code, language="python") | |
| st.markdown("**What Changed:**") | |
| st.info(feedback.fix_explanation) | |
| st.success( | |
| "β Analysis complete! Review each tab for comprehensive feedback.") | |
| except Exception as e: | |
| st.error(f"β CRITICAL ERROR: Unexpected error during analysis") | |
| st.error(f"π Error Type: {type(e).__name__}") | |
| st.error(f"π Error Message: {str(e)}") | |
| st.error("π Full error details:") | |
| st.code(str(e), language="text") | |
| st.error("π‘ This is a production app - all errors must be fixed") | |
| st.stop() | |
| if __name__ == "__main__": | |
| try: | |
| main() | |
| except Exception as e: | |
| st.error(f"β CRITICAL APPLICATION ERROR: {e}") | |
| st.error("π‘ This is a production app - please fix all errors") | |
| st.stop() | |