""" AI Programming Tutor - Production Version with Fine-tuned Model Only No demo fallbacks - shows exact errors for debugging Version: 3.0 - Production Ready, No Demo Mode """ import streamlit as st import os import tempfile # Configure page st.set_page_config( page_title="AI Programming Tutor", page_icon="🤖", layout="wide" ) # Set up proper cache directories for HF Spaces def setup_cache_directories(): """Set up cache directories with proper permissions""" try: # Create cache directories in writable locations cache_dirs = { 'HF_HOME': os.path.join(tempfile.gettempdir(), 'huggingface'), 'TRANSFORMERS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'transformers'), 'HF_DATASETS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'datasets'), } for env_var, cache_path in cache_dirs.items(): os.environ[env_var] = cache_path os.makedirs(cache_path, exist_ok=True) st.info(f"📁 Cache directory set: {env_var} = {cache_path}") return True except Exception as e: st.error(f"❌ Failed to set up cache directories: {e}") return False # Set up cache directories setup_cache_directories() # Try to import the fine-tuned model components try: from fine import ProgrammingEducationAI, ComprehensiveFeedback MODEL_AVAILABLE = True except Exception as e: MODEL_AVAILABLE = False st.error(f"❌ CRITICAL ERROR: Cannot import fine-tuned model components") st.error(f"🔍 Import Error: {e}") st.error("💡 This is a production app - the fine-tuned model MUST be available") st.stop() # Note: Using public model - no HF_TOKEN required HF_TOKEN = None # Set to None for public model def main(): st.title("🤖 AI Programming Tutor - Production") st.markdown("### Fine-tuned CodeLlama-7B for Programming Education") # Sidebar for settings with st.sidebar: st.header("⚙️ Settings") student_level = st.selectbox( "Student Level:", ["beginner", "intermediate", "advanced"], help="Adjusts feedback complexity" ) st.markdown("---") st.markdown("### 📚 About") st.markdown(""" This AI tutor provides structured feedback on programming code: - **Strengths**: What you did well - **Weaknesses**: Areas for improvement - **Issues**: Problems to fix - **Improvements**: Step-by-step guidance - **Learning Points**: Key concepts to understand - **Questions**: Test your comprehension - **Code Fix**: Improved version """) # Show model status st.success("✅ Fine-tuned model available") st.success("🌐 Using public model - no authentication required") st.info(f"📁 Model path: FaroukTomori/codellama-7b-programming-education") # Memory optimization info st.markdown("---") st.markdown("### 💾 Memory Optimization") st.info("🔧 8-bit quantization (with fallback)") st.info("📁 Proper cache directories configured") st.info("⚡ Auto device mapping for efficiency") st.warning("⚠️ Model size: ~13GB (quantized to ~7GB)") st.info("🔄 Auto-fallback if quantization fails") # Show if model is loaded in session if 'ai_tutor' in st.session_state: st.success("✅ Model loaded in session") else: st.info("⏳ Model not loaded yet - will load when you analyze code") # Main content st.markdown("---") # Code input code_input = st.text_area( "📝 Enter your code here:", height=200, placeholder="def hello_world():\n print('Hello, World!')\n return 'success'", help="Paste your Python code here for analysis" ) if st.button("🚀 Analyze Code", type="primary"): if not code_input.strip(): st.warning("⚠️ Please enter some code to analyze") return with st.spinner("🤖 Analyzing your code..."): try: # Check if model is already loaded if 'ai_tutor' not in st.session_state: with st.spinner("🚀 Loading fine-tuned model (this may take 5-10 minutes on HF Spaces)..."): try: # Use Hugging Face Model Hub model_path = "FaroukTomori/codellama-7b-programming-education" st.info( "🌐 Using public model - no authentication required") st.info( f"🔍 Attempting to load model from: {model_path}") # Memory optimization settings for HF Spaces st.info( "🔧 Loading with memory optimization for HF Spaces...") st.info( "💾 Using 8-bit quantization to reduce memory usage") st.info( "📁 Using proper cache directories for permissions") ai_tutor = ProgrammingEducationAI(model_path) st.success( "✅ Model class instantiated successfully") # Load model with memory optimization (with fallback) try: ai_tutor.load_model( load_in_8bit=True, device_map="auto") st.success( "✅ Model loaded with 8-bit quantization!") except ImportError as e: if "bitsandbytes" in str(e): st.warning( "⚠️ 8-bit quantization failed, trying without it...") ai_tutor.load_model( load_in_8bit=False, device_map="auto") st.success( "✅ Model loaded without 8-bit quantization!") else: raise e st.session_state['ai_tutor'] = ai_tutor st.success( "✅ Fine-tuned model loaded successfully!") except PermissionError as e: st.error( f"❌ CACHE PERMISSION ERROR: Model download blocked") st.error(f"🔍 Error Type: {type(e).__name__}") st.error(f"🔍 Error Message: {str(e)}") st.error("🔍 This usually means:") st.error( " 1. Another user is downloading the same model (wait 5-10 minutes)") st.error( " 2. A previous download was interrupted (cache needs clearing)") st.error("🔧 Solutions:") st.error(" • Wait 5-10 minutes and try again") st.error( " • Restart your HF Space to clear cache") st.error( " • The model will download automatically on retry") st.error( "💡 This is temporary - the model will load once cache is cleared") st.stop() except Exception as e: st.error( f"❌ CRITICAL ERROR: Failed to load fine-tuned model") st.error(f"🔍 Error Type: {type(e).__name__}") st.error(f"🔍 Error Message: {str(e)}") st.error("🔍 Full error details:") st.code(str(e), language="text") st.error( "💡 This is a production app - the model MUST load successfully") st.error( "💡 Check the error above and fix the model loading issue") st.stop() # Stop the app completely # Use fine-tuned model try: feedback = st.session_state['ai_tutor'].generate_comprehensive_feedback( code_input, student_level) st.success("✅ Feedback generated using fine-tuned model!") except Exception as e: st.error(f"❌ CRITICAL ERROR: Failed to generate feedback") st.error(f"🔍 Error Type: {type(e).__name__}") st.error(f"🔍 Error Message: {str(e)}") st.error("🔍 Full error details:") st.code(str(e), language="text") st.error( "💡 Check the error above to fix the feedback generation issue") st.stop() # Display AI feedback in tabs tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs([ "✅ Strengths", "❌ Weaknesses", "🚨 Issues", "📈 Improvements", "🎓 Learning", "❓ Questions", "🔧 Code Fix" ]) with tab1: st.subheader("✅ Code Strengths") for strength in feedback.strengths: st.markdown(f"• {strength}") with tab2: st.subheader("❌ Areas for Improvement") for weakness in feedback.weaknesses: st.markdown(f"• {weakness}") with tab3: st.subheader("🚨 Issues to Address") for issue in feedback.issues: st.markdown(f"• {issue}") with tab4: st.subheader("📈 Step-by-Step Improvements") for i, step in enumerate(feedback.step_by_step_improvement, 1): st.markdown(f"**Step {i}:** {step}") with tab5: st.subheader("🎓 Key Learning Points") for point in feedback.learning_points: st.markdown(f"• {point}") with tab6: st.subheader("❓ Comprehension Questions") st.markdown( f"**Question:** {feedback.comprehension_question}") st.markdown(f"**Answer:** {feedback.comprehension_answer}") st.markdown(f"**Explanation:** {feedback.explanation}") with tab7: st.subheader("🔧 Improved Code") st.code(feedback.improved_code, language="python") st.markdown("**What Changed:**") st.info(feedback.fix_explanation) st.success( "✅ Analysis complete! Review each tab for comprehensive feedback.") except Exception as e: st.error(f"❌ CRITICAL ERROR: Unexpected error during analysis") st.error(f"🔍 Error Type: {type(e).__name__}") st.error(f"🔍 Error Message: {str(e)}") st.error("🔍 Full error details:") st.code(str(e), language="text") st.error("💡 This is a production app - all errors must be fixed") st.stop() if __name__ == "__main__": try: main() except Exception as e: st.error(f"❌ CRITICAL APPLICATION ERROR: {e}") st.error("💡 This is a production app - please fix all errors") st.stop()