File size: 12,169 Bytes
d855178
6edbc4c
 
 
d855178
 
aebfa1c
d855178
0837167
aebfa1c
d855178
 
 
 
 
 
aebfa1c
0837167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d855178
 
 
 
 
 
6edbc4c
 
 
 
d855178
 
 
 
 
 
6edbc4c
 
d855178
6edbc4c
d855178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6edbc4c
 
 
 
0837167
 
 
99818db
0837167
 
 
99818db
0837167
6edbc4c
 
 
d855178
6edbc4c
d855178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6edbc4c
 
 
d855178
6edbc4c
 
 
 
 
 
 
 
0837167
 
 
 
 
 
 
 
6edbc4c
d855178
6edbc4c
 
99818db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6edbc4c
 
 
e334ddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d855178
6edbc4c
 
 
 
d855178
 
6edbc4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d855178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6edbc4c
 
 
d855178
 
6edbc4c
 
aebfa1c
 
d855178
 
 
 
6edbc4c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
"""
AI Programming Tutor - Production Version with Fine-tuned Model Only
No demo fallbacks - shows exact errors for debugging
Version: 3.0 - Production Ready, No Demo Mode
"""

import streamlit as st
import os
import tempfile

# Configure page
st.set_page_config(
    page_title="AI Programming Tutor",
    page_icon="πŸ€–",
    layout="wide"
)

# Set up proper cache directories for HF Spaces


def setup_cache_directories():
    """Set up cache directories with proper permissions"""
    try:
        # Create cache directories in writable locations
        cache_dirs = {
            'HF_HOME': os.path.join(tempfile.gettempdir(), 'huggingface'),
            'TRANSFORMERS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'transformers'),
            'HF_DATASETS_CACHE': os.path.join(tempfile.gettempdir(), 'huggingface', 'datasets'),
        }

        for env_var, cache_path in cache_dirs.items():
            os.environ[env_var] = cache_path
            os.makedirs(cache_path, exist_ok=True)
            st.info(f"πŸ“ Cache directory set: {env_var} = {cache_path}")

        return True
    except Exception as e:
        st.error(f"❌ Failed to set up cache directories: {e}")
        return False


# Set up cache directories
setup_cache_directories()

# Try to import the fine-tuned model components
try:
    from fine import ProgrammingEducationAI, ComprehensiveFeedback
    MODEL_AVAILABLE = True
except Exception as e:
    MODEL_AVAILABLE = False
    st.error(f"❌ CRITICAL ERROR: Cannot import fine-tuned model components")
    st.error(f"πŸ” Import Error: {e}")
    st.error("πŸ’‘ This is a production app - the fine-tuned model MUST be available")
    st.stop()

# Note: Using public model - no HF_TOKEN required
HF_TOKEN = None  # Set to None for public model


def main():
    st.title("πŸ€– AI Programming Tutor - Production")
    st.markdown("### Fine-tuned CodeLlama-7B for Programming Education")

    # Sidebar for settings
    with st.sidebar:
        st.header("βš™οΈ Settings")

        student_level = st.selectbox(
            "Student Level:",
            ["beginner", "intermediate", "advanced"],
            help="Adjusts feedback complexity"
        )

        st.markdown("---")
        st.markdown("### πŸ“š About")
        st.markdown("""
        This AI tutor provides structured feedback on programming code:
        
        - **Strengths**: What you did well
        - **Weaknesses**: Areas for improvement
        - **Issues**: Problems to fix
        - **Improvements**: Step-by-step guidance
        - **Learning Points**: Key concepts to understand
        - **Questions**: Test your comprehension
        - **Code Fix**: Improved version
        """)

        # Show model status
        st.success("βœ… Fine-tuned model available")
        st.success("🌐 Using public model - no authentication required")
        st.info(f"πŸ“ Model path: FaroukTomori/codellama-7b-programming-education")

        # Memory optimization info
        st.markdown("---")
        st.markdown("### πŸ’Ύ Memory Optimization")
        st.info("πŸ”§ 8-bit quantization (with fallback)")
        st.info("πŸ“ Proper cache directories configured")
        st.info("⚑ Auto device mapping for efficiency")
        st.warning("⚠️ Model size: ~13GB (quantized to ~7GB)")
        st.info("πŸ”„ Auto-fallback if quantization fails")

        # Show if model is loaded in session
        if 'ai_tutor' in st.session_state:
            st.success("βœ… Model loaded in session")
        else:
            st.info("⏳ Model not loaded yet - will load when you analyze code")

    # Main content
    st.markdown("---")

    # Code input
    code_input = st.text_area(
        "πŸ“ Enter your code here:",
        height=200,
        placeholder="def hello_world():\n    print('Hello, World!')\n    return 'success'",
        help="Paste your Python code here for analysis"
    )

    if st.button("πŸš€ Analyze Code", type="primary"):
        if not code_input.strip():
            st.warning("⚠️ Please enter some code to analyze")
            return

        with st.spinner("πŸ€– Analyzing your code..."):
            try:
                # Check if model is already loaded
                if 'ai_tutor' not in st.session_state:
                    with st.spinner("πŸš€ Loading fine-tuned model (this may take 5-10 minutes on HF Spaces)..."):
                        try:
                            # Use Hugging Face Model Hub
                            model_path = "FaroukTomori/codellama-7b-programming-education"

                            st.info(
                                "🌐 Using public model - no authentication required")
                            st.info(
                                f"πŸ” Attempting to load model from: {model_path}")

                            # Memory optimization settings for HF Spaces
                            st.info(
                                "πŸ”§ Loading with memory optimization for HF Spaces...")
                            st.info(
                                "πŸ’Ύ Using 8-bit quantization to reduce memory usage")
                            st.info(
                                "πŸ“ Using proper cache directories for permissions")

                            ai_tutor = ProgrammingEducationAI(model_path)
                            st.success(
                                "βœ… Model class instantiated successfully")

                            # Load model with memory optimization (with fallback)
                            try:
                                ai_tutor.load_model(
                                    load_in_8bit=True, device_map="auto")
                                st.success(
                                    "βœ… Model loaded with 8-bit quantization!")
                            except ImportError as e:
                                if "bitsandbytes" in str(e):
                                    st.warning(
                                        "⚠️ 8-bit quantization failed, trying without it...")
                                    ai_tutor.load_model(
                                        load_in_8bit=False, device_map="auto")
                                    st.success(
                                        "βœ… Model loaded without 8-bit quantization!")
                                else:
                                    raise e
                            st.session_state['ai_tutor'] = ai_tutor
                            st.success(
                                "βœ… Fine-tuned model loaded successfully!")
                        except PermissionError as e:
                            st.error(
                                f"❌ CACHE PERMISSION ERROR: Model download blocked")
                            st.error(f"πŸ” Error Type: {type(e).__name__}")
                            st.error(f"πŸ” Error Message: {str(e)}")
                            st.error("πŸ” This usually means:")
                            st.error(
                                "   1. Another user is downloading the same model (wait 5-10 minutes)")
                            st.error(
                                "   2. A previous download was interrupted (cache needs clearing)")
                            st.error("πŸ”§ Solutions:")
                            st.error("   β€’ Wait 5-10 minutes and try again")
                            st.error(
                                "   β€’ Restart your HF Space to clear cache")
                            st.error(
                                "   β€’ The model will download automatically on retry")
                            st.error(
                                "πŸ’‘ This is temporary - the model will load once cache is cleared")
                            st.stop()
                        except Exception as e:
                            st.error(
                                f"❌ CRITICAL ERROR: Failed to load fine-tuned model")
                            st.error(f"πŸ” Error Type: {type(e).__name__}")
                            st.error(f"πŸ” Error Message: {str(e)}")
                            st.error("πŸ” Full error details:")
                            st.code(str(e), language="text")
                            st.error(
                                "πŸ’‘ This is a production app - the model MUST load successfully")
                            st.error(
                                "πŸ’‘ Check the error above and fix the model loading issue")
                            st.stop()  # Stop the app completely

                # Use fine-tuned model
                try:
                    feedback = st.session_state['ai_tutor'].generate_comprehensive_feedback(
                        code_input, student_level)
                    st.success("βœ… Feedback generated using fine-tuned model!")
                except Exception as e:
                    st.error(f"❌ CRITICAL ERROR: Failed to generate feedback")
                    st.error(f"πŸ” Error Type: {type(e).__name__}")
                    st.error(f"πŸ” Error Message: {str(e)}")
                    st.error("πŸ” Full error details:")
                    st.code(str(e), language="text")
                    st.error(
                        "πŸ’‘ Check the error above to fix the feedback generation issue")
                    st.stop()

                # Display AI feedback in tabs
                tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs([
                    "βœ… Strengths", "❌ Weaknesses", "🚨 Issues",
                    "πŸ“ˆ Improvements", "πŸŽ“ Learning", "❓ Questions", "πŸ”§ Code Fix"
                ])

                with tab1:
                    st.subheader("βœ… Code Strengths")
                    for strength in feedback.strengths:
                        st.markdown(f"β€’ {strength}")

                with tab2:
                    st.subheader("❌ Areas for Improvement")
                    for weakness in feedback.weaknesses:
                        st.markdown(f"β€’ {weakness}")

                with tab3:
                    st.subheader("🚨 Issues to Address")
                    for issue in feedback.issues:
                        st.markdown(f"β€’ {issue}")

                with tab4:
                    st.subheader("πŸ“ˆ Step-by-Step Improvements")
                    for i, step in enumerate(feedback.step_by_step_improvement, 1):
                        st.markdown(f"**Step {i}:** {step}")

                with tab5:
                    st.subheader("πŸŽ“ Key Learning Points")
                    for point in feedback.learning_points:
                        st.markdown(f"β€’ {point}")

                with tab6:
                    st.subheader("❓ Comprehension Questions")
                    st.markdown(
                        f"**Question:** {feedback.comprehension_question}")
                    st.markdown(f"**Answer:** {feedback.comprehension_answer}")
                    st.markdown(f"**Explanation:** {feedback.explanation}")

                with tab7:
                    st.subheader("πŸ”§ Improved Code")
                    st.code(feedback.improved_code, language="python")
                    st.markdown("**What Changed:**")
                    st.info(feedback.fix_explanation)

                st.success(
                    "βœ… Analysis complete! Review each tab for comprehensive feedback.")

            except Exception as e:
                st.error(f"❌ CRITICAL ERROR: Unexpected error during analysis")
                st.error(f"πŸ” Error Type: {type(e).__name__}")
                st.error(f"πŸ” Error Message: {str(e)}")
                st.error("πŸ” Full error details:")
                st.code(str(e), language="text")
                st.error("πŸ’‘ This is a production app - all errors must be fixed")
                st.stop()


if __name__ == "__main__":
    try:
        main()
    except Exception as e:
        st.error(f"❌ CRITICAL APPLICATION ERROR: {e}")
        st.error("πŸ’‘ This is a production app - please fix all errors")
        st.stop()