AstonProject / src /streamlit_app.py
FaroukTomori's picture
Update src/streamlit_app.py
99818db verified
"""
AI Programming Tutor - Production Version with Fine-tuned Model Only
No demo fallbacks - shows exact errors for debugging
Version: 3.0 - Production Ready, No Demo Mode
"""
import streamlit as st
import os
import tempfile
# Configure page
st.set_page_config(
page_title="AI Programming Tutor",
page_icon="πŸ€–",
layout="wide"
)
# Set up proper cache directories for HF Spaces
def setup_cache_directories():
"""Set up cache directories with proper permissions"""
try:
# Create cache directories in writable locations
cache_dirs = {
'HF_HOME': os.path.join(tempfile.gettempdir(), 'huggingface'),
'TRANSFORMERS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'transformers'),
'HF_DATASETS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'datasets'),
}
for env_var, cache_path in cache_dirs.items():
os.environ[env_var] = cache_path
os.makedirs(cache_path, exist_ok=True)
st.info(f"πŸ“ Cache directory set: {env_var} = {cache_path}")
return True
except Exception as e:
st.error(f"❌ Failed to set up cache directories: {e}")
return False
# Set up cache directories
setup_cache_directories()
# Try to import the fine-tuned model components
try:
from fine import ProgrammingEducationAI, ComprehensiveFeedback
MODEL_AVAILABLE = True
except Exception as e:
MODEL_AVAILABLE = False
st.error(f"❌ CRITICAL ERROR: Cannot import fine-tuned model components")
st.error(f"πŸ” Import Error: {e}")
st.error("πŸ’‘ This is a production app - the fine-tuned model MUST be available")
st.stop()
# Note: Using public model - no HF_TOKEN required
HF_TOKEN = None # Set to None for public model
def main():
st.title("πŸ€– AI Programming Tutor - Production")
st.markdown("### Fine-tuned CodeLlama-7B for Programming Education")
# Sidebar for settings
with st.sidebar:
st.header("βš™οΈ Settings")
student_level = st.selectbox(
"Student Level:",
["beginner", "intermediate", "advanced"],
help="Adjusts feedback complexity"
)
st.markdown("---")
st.markdown("### πŸ“š About")
st.markdown("""
This AI tutor provides structured feedback on programming code:
- **Strengths**: What you did well
- **Weaknesses**: Areas for improvement
- **Issues**: Problems to fix
- **Improvements**: Step-by-step guidance
- **Learning Points**: Key concepts to understand
- **Questions**: Test your comprehension
- **Code Fix**: Improved version
""")
# Show model status
st.success("βœ… Fine-tuned model available")
st.success("🌐 Using public model - no authentication required")
st.info(f"πŸ“ Model path: FaroukTomori/codellama-7b-programming-education")
# Memory optimization info
st.markdown("---")
st.markdown("### πŸ’Ύ Memory Optimization")
st.info("πŸ”§ 8-bit quantization (with fallback)")
st.info("πŸ“ Proper cache directories configured")
st.info("⚑ Auto device mapping for efficiency")
st.warning("⚠️ Model size: ~13GB (quantized to ~7GB)")
st.info("πŸ”„ Auto-fallback if quantization fails")
# Show if model is loaded in session
if 'ai_tutor' in st.session_state:
st.success("βœ… Model loaded in session")
else:
st.info("⏳ Model not loaded yet - will load when you analyze code")
# Main content
st.markdown("---")
# Code input
code_input = st.text_area(
"πŸ“ Enter your code here:",
height=200,
placeholder="def hello_world():\n print('Hello, World!')\n return 'success'",
help="Paste your Python code here for analysis"
)
if st.button("πŸš€ Analyze Code", type="primary"):
if not code_input.strip():
st.warning("⚠️ Please enter some code to analyze")
return
with st.spinner("πŸ€– Analyzing your code..."):
try:
# Check if model is already loaded
if 'ai_tutor' not in st.session_state:
with st.spinner("πŸš€ Loading fine-tuned model (this may take 5-10 minutes on HF Spaces)..."):
try:
# Use Hugging Face Model Hub
model_path = "FaroukTomori/codellama-7b-programming-education"
st.info(
"🌐 Using public model - no authentication required")
st.info(
f"πŸ” Attempting to load model from: {model_path}")
# Memory optimization settings for HF Spaces
st.info(
"πŸ”§ Loading with memory optimization for HF Spaces...")
st.info(
"πŸ’Ύ Using 8-bit quantization to reduce memory usage")
st.info(
"πŸ“ Using proper cache directories for permissions")
ai_tutor = ProgrammingEducationAI(model_path)
st.success(
"βœ… Model class instantiated successfully")
# Load model with memory optimization (with fallback)
try:
ai_tutor.load_model(
load_in_8bit=True, device_map="auto")
st.success(
"βœ… Model loaded with 8-bit quantization!")
except ImportError as e:
if "bitsandbytes" in str(e):
st.warning(
"⚠️ 8-bit quantization failed, trying without it...")
ai_tutor.load_model(
load_in_8bit=False, device_map="auto")
st.success(
"βœ… Model loaded without 8-bit quantization!")
else:
raise e
st.session_state['ai_tutor'] = ai_tutor
st.success(
"βœ… Fine-tuned model loaded successfully!")
except PermissionError as e:
st.error(
f"❌ CACHE PERMISSION ERROR: Model download blocked")
st.error(f"πŸ” Error Type: {type(e).__name__}")
st.error(f"πŸ” Error Message: {str(e)}")
st.error("πŸ” This usually means:")
st.error(
" 1. Another user is downloading the same model (wait 5-10 minutes)")
st.error(
" 2. A previous download was interrupted (cache needs clearing)")
st.error("πŸ”§ Solutions:")
st.error(" β€’ Wait 5-10 minutes and try again")
st.error(
" β€’ Restart your HF Space to clear cache")
st.error(
" β€’ The model will download automatically on retry")
st.error(
"πŸ’‘ This is temporary - the model will load once cache is cleared")
st.stop()
except Exception as e:
st.error(
f"❌ CRITICAL ERROR: Failed to load fine-tuned model")
st.error(f"πŸ” Error Type: {type(e).__name__}")
st.error(f"πŸ” Error Message: {str(e)}")
st.error("πŸ” Full error details:")
st.code(str(e), language="text")
st.error(
"πŸ’‘ This is a production app - the model MUST load successfully")
st.error(
"πŸ’‘ Check the error above and fix the model loading issue")
st.stop() # Stop the app completely
# Use fine-tuned model
try:
feedback = st.session_state['ai_tutor'].generate_comprehensive_feedback(
code_input, student_level)
st.success("βœ… Feedback generated using fine-tuned model!")
except Exception as e:
st.error(f"❌ CRITICAL ERROR: Failed to generate feedback")
st.error(f"πŸ” Error Type: {type(e).__name__}")
st.error(f"πŸ” Error Message: {str(e)}")
st.error("πŸ” Full error details:")
st.code(str(e), language="text")
st.error(
"πŸ’‘ Check the error above to fix the feedback generation issue")
st.stop()
# Display AI feedback in tabs
tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs([
"βœ… Strengths", "❌ Weaknesses", "🚨 Issues",
"πŸ“ˆ Improvements", "πŸŽ“ Learning", "❓ Questions", "πŸ”§ Code Fix"
])
with tab1:
st.subheader("βœ… Code Strengths")
for strength in feedback.strengths:
st.markdown(f"β€’ {strength}")
with tab2:
st.subheader("❌ Areas for Improvement")
for weakness in feedback.weaknesses:
st.markdown(f"β€’ {weakness}")
with tab3:
st.subheader("🚨 Issues to Address")
for issue in feedback.issues:
st.markdown(f"β€’ {issue}")
with tab4:
st.subheader("πŸ“ˆ Step-by-Step Improvements")
for i, step in enumerate(feedback.step_by_step_improvement, 1):
st.markdown(f"**Step {i}:** {step}")
with tab5:
st.subheader("πŸŽ“ Key Learning Points")
for point in feedback.learning_points:
st.markdown(f"β€’ {point}")
with tab6:
st.subheader("❓ Comprehension Questions")
st.markdown(
f"**Question:** {feedback.comprehension_question}")
st.markdown(f"**Answer:** {feedback.comprehension_answer}")
st.markdown(f"**Explanation:** {feedback.explanation}")
with tab7:
st.subheader("πŸ”§ Improved Code")
st.code(feedback.improved_code, language="python")
st.markdown("**What Changed:**")
st.info(feedback.fix_explanation)
st.success(
"βœ… Analysis complete! Review each tab for comprehensive feedback.")
except Exception as e:
st.error(f"❌ CRITICAL ERROR: Unexpected error during analysis")
st.error(f"πŸ” Error Type: {type(e).__name__}")
st.error(f"πŸ” Error Message: {str(e)}")
st.error("πŸ” Full error details:")
st.code(str(e), language="text")
st.error("πŸ’‘ This is a production app - all errors must be fixed")
st.stop()
if __name__ == "__main__":
try:
main()
except Exception as e:
st.error(f"❌ CRITICAL APPLICATION ERROR: {e}")
st.error("πŸ’‘ This is a production app - please fix all errors")
st.stop()