Spaces:
Sleeping
Sleeping
File size: 12,169 Bytes
d855178 6edbc4c d855178 aebfa1c d855178 0837167 aebfa1c d855178 aebfa1c 0837167 d855178 6edbc4c d855178 6edbc4c d855178 6edbc4c d855178 6edbc4c 0837167 99818db 0837167 99818db 0837167 6edbc4c d855178 6edbc4c d855178 6edbc4c d855178 6edbc4c 0837167 6edbc4c d855178 6edbc4c 99818db 6edbc4c e334ddf d855178 6edbc4c d855178 6edbc4c d855178 6edbc4c d855178 6edbc4c aebfa1c d855178 6edbc4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 |
"""
AI Programming Tutor - Production Version with Fine-tuned Model Only
No demo fallbacks - shows exact errors for debugging
Version: 3.0 - Production Ready, No Demo Mode
"""
import streamlit as st
import os
import tempfile
# Configure page
st.set_page_config(
page_title="AI Programming Tutor",
page_icon="π€",
layout="wide"
)
# Set up proper cache directories for HF Spaces
def setup_cache_directories():
"""Set up cache directories with proper permissions"""
try:
# Create cache directories in writable locations
cache_dirs = {
'HF_HOME': os.path.join(tempfile.gettempdir(), 'huggingface'),
'TRANSFORMERS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'transformers'),
'HF_DATASETS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'datasets'),
}
for env_var, cache_path in cache_dirs.items():
os.environ[env_var] = cache_path
os.makedirs(cache_path, exist_ok=True)
st.info(f"π Cache directory set: {env_var} = {cache_path}")
return True
except Exception as e:
st.error(f"β Failed to set up cache directories: {e}")
return False
# Set up cache directories
setup_cache_directories()
# Try to import the fine-tuned model components
try:
from fine import ProgrammingEducationAI, ComprehensiveFeedback
MODEL_AVAILABLE = True
except Exception as e:
MODEL_AVAILABLE = False
st.error(f"β CRITICAL ERROR: Cannot import fine-tuned model components")
st.error(f"π Import Error: {e}")
st.error("π‘ This is a production app - the fine-tuned model MUST be available")
st.stop()
# Note: Using public model - no HF_TOKEN required
HF_TOKEN = None # Set to None for public model
def main():
st.title("π€ AI Programming Tutor - Production")
st.markdown("### Fine-tuned CodeLlama-7B for Programming Education")
# Sidebar for settings
with st.sidebar:
st.header("βοΈ Settings")
student_level = st.selectbox(
"Student Level:",
["beginner", "intermediate", "advanced"],
help="Adjusts feedback complexity"
)
st.markdown("---")
st.markdown("### π About")
st.markdown("""
This AI tutor provides structured feedback on programming code:
- **Strengths**: What you did well
- **Weaknesses**: Areas for improvement
- **Issues**: Problems to fix
- **Improvements**: Step-by-step guidance
- **Learning Points**: Key concepts to understand
- **Questions**: Test your comprehension
- **Code Fix**: Improved version
""")
# Show model status
st.success("β
Fine-tuned model available")
st.success("π Using public model - no authentication required")
st.info(f"π Model path: FaroukTomori/codellama-7b-programming-education")
# Memory optimization info
st.markdown("---")
st.markdown("### πΎ Memory Optimization")
st.info("π§ 8-bit quantization (with fallback)")
st.info("π Proper cache directories configured")
st.info("β‘ Auto device mapping for efficiency")
st.warning("β οΈ Model size: ~13GB (quantized to ~7GB)")
st.info("π Auto-fallback if quantization fails")
# Show if model is loaded in session
if 'ai_tutor' in st.session_state:
st.success("β
Model loaded in session")
else:
st.info("β³ Model not loaded yet - will load when you analyze code")
# Main content
st.markdown("---")
# Code input
code_input = st.text_area(
"π Enter your code here:",
height=200,
placeholder="def hello_world():\n print('Hello, World!')\n return 'success'",
help="Paste your Python code here for analysis"
)
if st.button("π Analyze Code", type="primary"):
if not code_input.strip():
st.warning("β οΈ Please enter some code to analyze")
return
with st.spinner("π€ Analyzing your code..."):
try:
# Check if model is already loaded
if 'ai_tutor' not in st.session_state:
with st.spinner("π Loading fine-tuned model (this may take 5-10 minutes on HF Spaces)..."):
try:
# Use Hugging Face Model Hub
model_path = "FaroukTomori/codellama-7b-programming-education"
st.info(
"π Using public model - no authentication required")
st.info(
f"π Attempting to load model from: {model_path}")
# Memory optimization settings for HF Spaces
st.info(
"π§ Loading with memory optimization for HF Spaces...")
st.info(
"πΎ Using 8-bit quantization to reduce memory usage")
st.info(
"π Using proper cache directories for permissions")
ai_tutor = ProgrammingEducationAI(model_path)
st.success(
"β
Model class instantiated successfully")
# Load model with memory optimization (with fallback)
try:
ai_tutor.load_model(
load_in_8bit=True, device_map="auto")
st.success(
"β
Model loaded with 8-bit quantization!")
except ImportError as e:
if "bitsandbytes" in str(e):
st.warning(
"β οΈ 8-bit quantization failed, trying without it...")
ai_tutor.load_model(
load_in_8bit=False, device_map="auto")
st.success(
"β
Model loaded without 8-bit quantization!")
else:
raise e
st.session_state['ai_tutor'] = ai_tutor
st.success(
"β
Fine-tuned model loaded successfully!")
except PermissionError as e:
st.error(
f"β CACHE PERMISSION ERROR: Model download blocked")
st.error(f"π Error Type: {type(e).__name__}")
st.error(f"π Error Message: {str(e)}")
st.error("π This usually means:")
st.error(
" 1. Another user is downloading the same model (wait 5-10 minutes)")
st.error(
" 2. A previous download was interrupted (cache needs clearing)")
st.error("π§ Solutions:")
st.error(" β’ Wait 5-10 minutes and try again")
st.error(
" β’ Restart your HF Space to clear cache")
st.error(
" β’ The model will download automatically on retry")
st.error(
"π‘ This is temporary - the model will load once cache is cleared")
st.stop()
except Exception as e:
st.error(
f"β CRITICAL ERROR: Failed to load fine-tuned model")
st.error(f"π Error Type: {type(e).__name__}")
st.error(f"π Error Message: {str(e)}")
st.error("π Full error details:")
st.code(str(e), language="text")
st.error(
"π‘ This is a production app - the model MUST load successfully")
st.error(
"π‘ Check the error above and fix the model loading issue")
st.stop() # Stop the app completely
# Use fine-tuned model
try:
feedback = st.session_state['ai_tutor'].generate_comprehensive_feedback(
code_input, student_level)
st.success("β
Feedback generated using fine-tuned model!")
except Exception as e:
st.error(f"β CRITICAL ERROR: Failed to generate feedback")
st.error(f"π Error Type: {type(e).__name__}")
st.error(f"π Error Message: {str(e)}")
st.error("π Full error details:")
st.code(str(e), language="text")
st.error(
"π‘ Check the error above to fix the feedback generation issue")
st.stop()
# Display AI feedback in tabs
tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs([
"β
Strengths", "β Weaknesses", "π¨ Issues",
"π Improvements", "π Learning", "β Questions", "π§ Code Fix"
])
with tab1:
st.subheader("β
Code Strengths")
for strength in feedback.strengths:
st.markdown(f"β’ {strength}")
with tab2:
st.subheader("β Areas for Improvement")
for weakness in feedback.weaknesses:
st.markdown(f"β’ {weakness}")
with tab3:
st.subheader("π¨ Issues to Address")
for issue in feedback.issues:
st.markdown(f"β’ {issue}")
with tab4:
st.subheader("π Step-by-Step Improvements")
for i, step in enumerate(feedback.step_by_step_improvement, 1):
st.markdown(f"**Step {i}:** {step}")
with tab5:
st.subheader("π Key Learning Points")
for point in feedback.learning_points:
st.markdown(f"β’ {point}")
with tab6:
st.subheader("β Comprehension Questions")
st.markdown(
f"**Question:** {feedback.comprehension_question}")
st.markdown(f"**Answer:** {feedback.comprehension_answer}")
st.markdown(f"**Explanation:** {feedback.explanation}")
with tab7:
st.subheader("π§ Improved Code")
st.code(feedback.improved_code, language="python")
st.markdown("**What Changed:**")
st.info(feedback.fix_explanation)
st.success(
"β
Analysis complete! Review each tab for comprehensive feedback.")
except Exception as e:
st.error(f"β CRITICAL ERROR: Unexpected error during analysis")
st.error(f"π Error Type: {type(e).__name__}")
st.error(f"π Error Message: {str(e)}")
st.error("π Full error details:")
st.code(str(e), language="text")
st.error("π‘ This is a production app - all errors must be fixed")
st.stop()
if __name__ == "__main__":
try:
main()
except Exception as e:
st.error(f"β CRITICAL APPLICATION ERROR: {e}")
st.error("π‘ This is a production app - please fix all errors")
st.stop()
|