File size: 25,995 Bytes
a9228cc
 
 
 
 
 
 
 
 
 
 
 
 
faeb908
 
a9228cc
 
 
 
 
 
 
 
f8dc8be
 
a9228cc
f8dc8be
c802a06
a9228cc
 
 
 
 
 
 
 
 
 
 
 
 
 
34d428f
a9228cc
 
faeb908
 
a9228cc
 
 
 
 
 
 
 
 
 
faeb908
a9228cc
 
faeb908
a9228cc
 
 
faeb908
a9228cc
 
 
 
 
 
 
 
 
faeb908
a9228cc
 
 
faeb908
a9228cc
faeb908
a9228cc
 
c802a06
a9228cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c802a06
a9228cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8dc8be
 
 
 
a9228cc
f8dc8be
 
 
a9228cc
f8dc8be
 
 
 
 
a9228cc
f8dc8be
 
 
a9228cc
f8dc8be
 
 
 
a9228cc
f8dc8be
 
 
a9228cc
f8dc8be
 
a9228cc
 
f8dc8be
 
a9228cc
f8dc8be
a9228cc
f8dc8be
 
a9228cc
f8dc8be
 
a9228cc
 
 
f8dc8be
 
a9228cc
f8dc8be
a9228cc
c802a06
a9228cc
c802a06
f8dc8be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9228cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8dc8be
c802a06
 
 
a9228cc
c802a06
 
f8dc8be
 
 
 
 
c802a06
 
 
f8dc8be
 
 
c802a06
a9228cc
f8dc8be
 
c802a06
f8dc8be
 
 
c802a06
 
 
f8dc8be
c802a06
 
 
 
 
 
a9228cc
c802a06
 
 
 
f8dc8be
 
c802a06
 
a9228cc
c802a06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8dc8be
faeb908
 
 
 
 
 
 
 
35ef221
faeb908
c802a06
 
 
 
faeb908
 
 
 
c802a06
 
 
faeb908
 
c802a06
 
faeb908
 
 
 
 
 
 
 
a9228cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c802a06
f8dc8be
a9228cc
 
f8dc8be
 
a9228cc
 
f8dc8be
a9228cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8dc8be
a9228cc
 
 
 
f8dc8be
 
 
 
 
a9228cc
 
 
 
 
 
f8dc8be
 
a9228cc
f8dc8be
a9228cc
 
 
 
 
f8dc8be
a9228cc
f8dc8be
 
a9228cc
 
 
 
 
 
f8dc8be
a9228cc
f8dc8be
a9228cc
f8dc8be
 
a9228cc
 
 
 
f8dc8be
a9228cc
f8dc8be
a9228cc
 
 
f8dc8be
a9228cc
f8dc8be
a9228cc
 
 
f8dc8be
a9228cc
f8dc8be
a9228cc
 
 
 
 
 
f8dc8be
 
 
 
 
a9228cc
faeb908
288b0d7
bbe3cbe
 
b3cc9df
a9228cc
 
 
faeb908
 
a9228cc
40a1f82
a9228cc
 
288b0d7
3246bc0
a9228cc
efb7cc8
 
 
 
a9228cc
 
f8dc8be
a9228cc
f8dc8be
 
 
efb7cc8
 
 
 
 
f8dc8be
 
 
 
 
3246bc0
a9228cc
c802a06
a9228cc
c802a06
 
 
 
 
 
faeb908
a9228cc
f8dc8be
 
 
 
 
 
a9228cc
f8dc8be
a9228cc
c802a06
 
 
 
 
 
a9228cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c802a06
 
a9228cc
c802a06
 
 
 
 
f8dc8be
c802a06
 
 
f8dc8be
c802a06
 
 
a9228cc
c802a06
 
faeb908
 
 
c802a06
faeb908
 
 
 
 
a9228cc
f8dc8be
a9228cc
f8dc8be
 
 
 
 
c802a06
f8dc8be
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
# road2success_app.py
"""
Road2Success Gradio app (fixed resume parsing + improved AI mentor).
Optional: set OPENAI_API_KEY env var to use OpenAI Chat completions for the mentor (recommended).
Install optional dependencies for best results:
    pip install gradio requests beautifulsoup4 transformers torch PyPDF2 pdfminer.six openai
If you don't install heavy libs, the app will fallback gracefully and show messages.
"""

import os
import json
import datetime
import requests
from bs4 import BeautifulSoup

import gradio as gr

# Optional ML / API libs (wrapped in try..except to avoid crashes)
try:
    from transformers import pipeline
except Exception:
    pipeline = None

try:
    import PyPDF2
except Exception:
    PyPDF2 = None

# pdfminer fallback
try:
    from pdfminer.high_level import extract_text as pdfminer_extract_text
except Exception:
    pdfminer_extract_text = None

# Optional OpenAI path -- recommended for accurate mentor answers
OPENAI_KEY = os.environ.get("OPENAI_API_KEY", None)
if OPENAI_KEY:
    try:
        import openai
        openai.api_key = OPENAI_KEY
    except Exception:
        openai = None

# Leaderboard path
LEADERBOARD_FILE = "road2success_leaderboard.json"
if not os.path.exists(LEADERBOARD_FILE):
    with open(LEADERBOARD_FILE, "w") as f:
        json.dump({"scores": []}, f, indent=2)

# ------------------ AI MENTOR ------------------
# Helper: HF generator (lazy init)
_hf_gen = None
def get_hf_generator(model_name="distilgpt2"):
    global _hf_gen
    if _hf_gen is None:
        if pipeline is None:
            return None
        try:
            # text-generation pipeline; CPU if no GPU
            _hf_gen = pipeline("text-generation", model=model_name, device=-1)
        except Exception as e:
            print("HF generator load failed:", e)
            _hf_gen = None
    return _hf_gen

def ai_mentor_query(prompt, short_answer=True, eli5=False, max_new_tokens=120):
    """
    Returns concise text answer.
    Priority path:
      1) OpenAI ChatCompletion (if key present and openai installed)
      2) HuggingFace local generator (if transformers available)
      3) Fallback helpful message
    """
    if not prompt or not str(prompt).strip():
        return "Ask a clear question about study plans, hackathons, or projects."

    # Build system + user prompt for clarity
    modifiers = []
    if short_answer:
        modifiers.append("Give a concise answer in at most 3 sentences.")
    if eli5:
        modifiers.append("Explain simply, like you're explaining to someone new (ELI5).")
    modifier_text = " ".join(modifiers)

    # 1) OpenAI ChatCompletion path (best quality) ------------------------------------------------
    if OPENAI_KEY and 'openai' in globals() and openai is not None:
        try:
            system_prompt = (
                "You are an expert mentor for students and early-career engineers. "
                "Be helpful, concise, and provide actionable steps when relevant."
            )
            user_prompt = f"{prompt.strip()}\n\n{modifier_text}".strip()
            resp = openai.ChatCompletion.create(
                model="gpt-4o-mini" if getattr(openai, "ChatCompletion", None) else "gpt-4o",
                messages=[{"role":"system","content":system_prompt},{"role":"user","content":user_prompt}],
                temperature=0.2,
                max_tokens=300,
            )
            # compatible with different openai versions
            if isinstance(resp, dict):
                # older style
                ans = resp.get("choices", [{}])[0].get("message", {}).get("content", "")
            else:
                ans = str(resp)
            return ans.strip()
        except Exception as e:
            print("OpenAI path failed:", e)
            # fall through to HF path

    # 2) HF local generator fallback ------------------------------------------------------------
    gen = get_hf_generator()
    if gen is not None:
        try:
            # Construct prompt with a brief instruction
            hf_prompt = f"{prompt.strip()}\n\n{modifier_text}\nAnswer:"
            # Use return_full_text=False or max_new_tokens to avoid repeating prompt (some transformers versions)
            out = gen(hf_prompt, max_new_tokens=max_new_tokens, do_sample=False, num_return_sequences=1, return_full_text=False)
            # pipeline may return list of dicts or other shapes
            if isinstance(out, list) and out:
                text = out[0].get("generated_text") or out[0].get("text") or str(out[0])
            else:
                text = str(out)
            # Clean up (remove prompt if it was included)
            # Keep answer short — take first 900 chars
            text = text.replace("\n", " ").strip()
            # If the generator repeated the prompt, attempt to trim repeated portion
            if hf_prompt in text:
                text = text.split(hf_prompt, 1)[-1].strip()
            return text[:900]
        except Exception as e:
            print("HF generation failed:", e)

    # 3) Final graceful fallback ----------------------------------------------------------------
    return ("AI model unavailable locally. To get accurate mentor answers enable the OpenAI path "
            "(set OPENAI_API_KEY env var) or install transformers and a text-generation model.")


# ------------------ ASR / TTS (optional) ------------------
_asr = None
_tts = None
def get_asr():
    global _asr
    if _asr is None and pipeline is not None:
        try:
            _asr = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device=-1)
        except Exception as e:
            print("ASR load failed:", e)
            _asr = None
    return _asr

def get_tts():
    global _tts
    if _tts is None and pipeline is not None:
        try:
            _tts = pipeline("text-to-speech", model="facebook/mms-tts-eng", device=-1)
        except Exception as e:
            print("TTS load failed:", e)
            _tts = None
    return _tts

def voice_mentor(audio_path):
    if not audio_path:
        return "No audio received.", "Please record a question.", None
    asr = get_asr()
    if asr is None:
        transcript = "Speech recognition model not available. Type the question instead."
    else:
        try:
            trans = asr(audio_path)
            transcript = trans.get("text","").strip() or "Could not transcribe audio."
        except Exception as e:
            print("ASR error:", e)
            transcript = "Could not transcribe (ASR error)."

    answer = ai_mentor_query(transcript if transcript else "Help me with my studies.")
    tts = get_tts()
    if tts is None:
        return transcript, answer, None
    try:
        tts_out = tts(answer)
        sr = tts_out.get("sampling_rate")
        audio = tts_out.get("audio")
        return transcript, answer, (sr, audio)
    except Exception as e:
        print("TTS error:", e)
        return transcript, answer, None

# ------------------ ROADMAP & TRACKS (unchanged) ------------------
ROADMAPS = {
    # (same as your original ROADMAPS; trimmed here for brevity in message)
}

TRACK_TIPS = {
    "C": [
        "Master pointers, arrays, and memory management.",
        "Focus on writing small CLI utilities to build confidence."
    ],
    "C++": [
        "Practice STL (vectors, maps, sets) regularly.",
        "Implement OOP concepts with small projects (Bank app, Library management)."
    ],
    "Java": [
        "Learn OOP pillars clearly (inheritance, polymorphism, abstraction, encapsulation).",
        "Build small console or Spring Boot projects."
    ],
    "Python": [
        "Practice basics + modules like requests and pandas.",
        "Try automation scripts and simple data analysis notebooks."
    ],
    "Frontend Developer": [
        "Focus on HTML, CSS, JavaScript and one framework (React).",
        "Clone 2–3 real websites (UI clones) to learn layouts."
    ],
    "Full Stack Developer": [
        "Learn one frontend framework (React) + one backend (Node/Django).",
        "Build 2–3 full apps: auth, CRUD, deployment."
    ]
}

TRACK_VIDEOS = {
    "Python": [
        ("Python for Beginners - freeCodeCamp", "https://www.youtube.com/watch?v=rfscVS0vtbw"),
        ("Python DSA Playlist", "https://www.youtube.com/watch?v=pkYVOmU3MgA")
    ]
}

# Example minimal ROADMAPS entries used by UI (so UI won't crash)
ROADMAPS = {
    "B.Tech 1st Year": {
        "objective": "Strengthen programming, DSA basics, and system thinking.",
        "subjects": ["Python / C / C++", "Arrays, Linked Lists, Recursion", "Discrete Math"],
        "projects": ["Student Management System", "Simple Game"],
        "resources": [("Coursera - Python", "https://www.coursera.org/")],
        "videos": [],
        "coding_questions": [("Reverse Linked List", "https://leetcode.com/problems/reverse-linked-list/")]
    },
    "B.Tech 2nd Year": {
        "objective": "Web basics, DBMS, OOP, intermediate DSA.",
        "subjects": ["Stacks/Queues/Trees/Graphs", "SQL/MySQL", "HTML/CSS/JS"],
        "projects": ["Online Quiz System", "Portfolio Website"],
        "resources": [],
        "videos": [],
        "coding_questions": []
    },
    "B.Tech 3rd Year": {
        "objective": "Specialize: DS/Backend/Cloud and prepare for internships.",
        "subjects": ["Pandas/NumPy/ML basics", "Node/React/Django", "AWS basics"],
        "projects": ["Recommendation System", "E-commerce backend"],
        "resources": [],
        "videos": [],
        "coding_questions": []
    },
    "B.Tech Final Year": {
        "objective": "Capstone project, system design, placements, and open-source contributions.",
        "subjects": ["Advanced algorithms", "System design basics", "Cloud architecture"],
        "projects": ["AI Chatbot capstone", "Inventory SaaS app"],
        "resources": [],
        "videos": [],
        "coding_questions": []
    }
}

def render_roadmap(level, track=None):
    info = ROADMAPS.get(level, None)
    if not info:
        return "<b>No roadmap available.</b>"
    html = f"<div style='padding:12px; border-radius:8px;'>"
    html += f"<h2 style='color:#00aaff'>{level} Roadmap</h2>"
    html += f"<p><b>Objective:</b> {info.get('objective','-')}</p>"
    if track:
        html += f"<p><b>Selected Track:</b> {track}</p>"
        tips = TRACK_TIPS.get(track, [])
        if tips:
            html += "<b>Track tips:</b><ul>" + "".join(f"<li>{t}</li>" for t in tips) + "</ul>"
    html += "<b>Subjects:</b><ul>" + "".join(f"<li>{s}</li>" for s in info.get('subjects',[])) + "</ul>"
    html += "<b>Projects:</b><ul>" + "".join(f"<li>{p}</li>" for p in info.get('projects',[])) + "</ul>"
    if info.get("resources"):
        html += "<b>Resources:</b><ul>" + "".join(
            f"<li><a href='{r[1]}' target='_blank'>{r[0]}</a></li>" for r in info.get('resources',[])
        ) + "</ul>"
    if info.get("videos"):
        html += "<b>Videos:</b><ul>" + "".join(
            f"<li><a href='{v[1]}' target='_blank'>{v[0]}</a></li>" for v in info.get('videos',[])
        ) + "</ul>"
    if info.get("coding_questions"):
        html += "<b>Coding Practice:</b><ul>" + "".join(
            f"<li><a href='{c[1]}' target='_blank'>{c[0]}</a></li>" for c in info.get('coding_questions',[])
        ) + "</ul>"
    html += "</div>"
    return html

def generate_quick_plan(level, track, days=30):
    base = ROADMAPS.get(level, None)
    if not base:
        return "No roadmap available."
    subjects = base.get("subjects", [])
    if not subjects:
        return "No subjects found for this level."
    weeks = max(1, int(days) // 7)
    plan = []
    for w in range(weeks):
        subj = subjects[w % len(subjects)]
        plan.append(f"Week {w+1}: Focus on — {subj}. Practice exercises + 3 problems.")
    if track:
        plan.append(f"<br><b>Track Focus ({track}):</b> Every week, build at least one mini-feature or mini-project using {track}.")
    return "<br>".join(plan)

# ------------------ INTERNSHIPS & HACKATHONS (unchanged) ------------------
def fetch_internships_remotive(query="intern"):
    try:
        r = requests.get(f"https://remotive.com/api/remote-jobs?search={query}&limit=20", timeout=6)
        r.raise_for_status()
        data = r.json().get("jobs", [])
        out = ""
        for j in data[:8]:
            title = j.get("title","")
            company = j.get("company_name","")
            url = j.get("url","#")
            out += f"• {title} at {company}\n{url}\n\n"
        return out if out else "No internships found for this query."
    except Exception as e:
        return f"Internship fetch error: {e}"

def fetch_hackathons_devpost(keyword=""):
    try:
        r = requests.get("https://devpost.com/hackathons", timeout=6)
        r.raise_for_status()
        soup = BeautifulSoup(r.text, "html.parser")
        titles = soup.select("h3.title") or soup.select("h5.title") or soup.find_all("h3")
        results = []
        for t in titles[:12]:
            text = t.get_text(strip=True)
            if not keyword or keyword.lower() in text.lower():
                results.append("• " + text)
        return "\n".join(results) if results else "No hackathons found."
    except Exception as e:
        return f"Hackathon fetch error: {e}"

# ------------------ QUIZ & LEADERBOARD ------------------
SAMPLE_QUIZ = [
    ("What is the time complexity of binary search?", ["O(1)","O(n)","O(log n)","O(n log n)"], "O(log n)"),
    ("Which Python library is used for machine learning?", ["NumPy","scikit-learn","Pandas","Matplotlib"], "scikit-learn"),
    ("Which structure follows FIFO?", ["Stack","Queue","Graph","Tree"], "Queue")
]

def grade_and_record(name, a1, a2, a3):
    answers = [a1, a2, a3]
    correct = sum(1 for i, a in enumerate(answers) if a == SAMPLE_QUIZ[i][2])
    score = int(correct)
    try:
        with open(LEADERBOARD_FILE, "r") as f:
            data = json.load(f)
    except:
        data = {"scores": []}
    entry = {"name": name or "Anonymous", "score": score, "time": datetime.datetime.now().isoformat()}
    data["scores"].append(entry)
    data["scores"] = sorted(data["scores"], key=lambda x: (-x["score"], x["time"]))[:40]
    with open(LEADERBOARD_FILE, "w") as f:
        json.dump(data, f, indent=2)
    return f"Score: {score}/{len(SAMPLE_QUIZ)} — recorded!"

def show_leaderboard():
    with open(LEADERBOARD_FILE, "r") as f:
        data = json.load(f)
    rows = data.get("scores", [])[:10]
    if not rows:
        return "No scores yet."
    md = "### Leaderboard (Top 10)\n\n"
    for i, r in enumerate(rows, start=1):
        md += f"{i}. **{r['name']}** — {r['score']}/3  ({r['time'].split('T')[0]})\n\n"
    return md

# ------------------ RESUME PARSING & ANALYSIS (fixed) ------------------
def _resolve_uploaded_path(file_obj):
    """
    Gradio returns different shapes depending on version:
      - an object with .name (path)
      - a dict with 'name' containing path
      - a string path (rare)
    Return actual filesystem path or None.
    """
    if not file_obj:
        return None
    # direct path string
    if isinstance(file_obj, str) and os.path.exists(file_obj):
        return file_obj
    # object with .name attribute
    if hasattr(file_obj, "name") and isinstance(file_obj.name, str) and os.path.exists(file_obj.name):
        return file_obj.name
    # dict-style (older gradio)
    if isinstance(file_obj, dict):
        # try tmp_path or name
        for k in ("name", "tmp_path", "tmpfile", "file"):
            p = file_obj.get(k)
            if isinstance(p, str) and os.path.exists(p):
                return p
    return None

def extract_text_from_file(file_obj):
    path = _resolve_uploaded_path(file_obj)
    if not path:
        return ""
    try:
        if path.lower().endswith(".txt"):
            with open(path, "r", encoding="utf-8", errors="ignore") as f:
                return f.read()
        elif path.lower().endswith(".pdf"):
            # Try PyPDF2 first
            if PyPDF2 is not None:
                try:
                    text = ""
                    with open(path, "rb") as f:
                        reader = PyPDF2.PdfReader(f)
                        for p in reader.pages:
                            text += (p.extract_text() or "")
                    return text
                except Exception as e:
                    print("PyPDF2 read failed:", e)
            # Try pdfminer as fallback
            if pdfminer_extract_text is not None:
                try:
                    return pdfminer_extract_text(path)
                except Exception as e:
                    print("pdfminer read failed:", e)
            # If both unavailable or failed
            return ""
        else:
            # unknown extension - attempt to read as text
            with open(path, "r", encoding="utf-8", errors="ignore") as f:
                return f.read()
    except Exception as e:
        print("Resume read error:", e)
        return ""

def analyze_resume(file_obj):
    """
    Improved resume analyzer:
      - robustly reads uploaded file
      - gives actionable checklist
    """
    if not file_obj:
        return "Upload a resume file (PDF or TXT) to analyze."

    text = extract_text_from_file(file_obj) or ""
    if not text.strip():
        tips = [
            "Could not extract text from this file. If it's a scanned PDF, OCR is required. Install PyPDF2 or pdfminer.six.",
            "Try uploading a machine-generated PDF (export from Word) or a plain .txt version of your resume."
        ]
        return "### Resume Analysis\n\n" + "\n".join(f"- {t}" for t in tips)

    tl = text.lower()
    feedback = []

    # length heuristics
    length = len(text.split())
    if length < 200:
        feedback.append("- Resume is short (<200 words). Add more details: projects, technical bullets, and metrics.")
    elif length > 2500:
        feedback.append("- Resume is long (>2500 words). Target 1–2 pages; prioritize relevant points for the role.")

    # section presence
    for section in ["education", "projects", "experience", "skills"]:
        if section not in tl:
            feedback.append(f"- Add a clear **{section.capitalize()}** section with a heading.")

    # tech detection
    tech_keywords = ["c++", "python", "java", "javascript", "react", "node", "sql", "django", "aws", "tensorflow", "pytorch"]
    if any(k in tl for k in tech_keywords):
        feedback.append("- Good: technical skills detected. Group them in a 'Skills' section and order by relevance.")
    else:
        feedback.append("- Add a **Skills** list (languages, frameworks, tools). Be specific (e.g., Python, React, PostgreSQL).")

    # projects
    if "project" not in tl and "projects" not in tl:
        feedback.append("- Add at least 2–3 **Projects** with bullets: tech used, your role, and measurable impact.")
    else:
        feedback.append("- For each project, include: Tech stack, your role, 1 measurable outcome (time saved/users/revenue).")

    # academics
    if any(k in tl for k in ["cgpa", "gpa", "%", "percentage"]):
        feedback.append("- Academic performance listed. Keep it concise and visible under Education.")
    else:
        feedback.append("- If your CGPA/percentage is good, include it under Education (e.g., CGPA: 8.2/10).")

    # general suggestions
    feedback += [
        "- Use action verbs: Built, Designed, Implemented, Optimized.",
        "- Tailor the resume's top section to the role (SDE / Frontend / Fullstack). Put relevant skills/projects first.",
        "- Remove irrelevant details; focus on impact."
    ]

    result = "### Resume Analysis & Recommendations\n\n"
    result += "\n".join(feedback)
    return result

# ------------------ UI / CSS (kept simple) ------------------
GLASS_CSS = """
:root {
  --accent: #0077cc;
  --text-main: #012b50;
}
body { font-family: Inter, sans-serif; }
h1 { color: var(--accent); }
.card { background: #f7fcff; padding: 16px; border-radius: 12px; }
"""

# ------------------ Build Gradio UI ------------------
with gr.Blocks(css=GLASS_CSS, title="Road2Success — Dashboard") as app:
    gr.Markdown("<h1 style='text-align:center'>🚀 Road2Success — Dashboard (fixed)</h1>")
    gr.Markdown("<p style='text-align:center'>Fixes: Resume parsing + AI Mentor (OpenAI optional for best accuracy).</p>")

    with gr.Tabs():
        # Roadmap Tab
        with gr.TabItem("📚 Roadmap"):
            with gr.Row():
                col1, col2 = gr.Column(scale=1), gr.Column(scale=2)
                with col1:
                    btech_levels = list(ROADMAPS.keys())
                    level = gr.Dropdown(choices=btech_levels, value=btech_levels[0], label="Select year")
                    track_dropdown = gr.Dropdown(
                        choices=list(TRACK_TIPS.keys()),
                        value="Python",
                        label="Preferred Track"
                    )
                    quick_days = gr.Radio(choices=["30","60","90"], value="30", label="Days plan")
                    show_btn = gr.Button("Show Roadmap & Plan")
                with col2:
                    roadmap_html = gr.HTML()
                    plan_html = gr.HTML()
            show_btn.click(
                lambda l, t, d: (render_roadmap(l, t), generate_quick_plan(l, t, int(d))),
                inputs=[level, track_dropdown, quick_days],
                outputs=[roadmap_html, plan_html]
            )

        # AI Mentor Tab
        with gr.TabItem("🤖 AI Mentor"):
            gr.Markdown("### Text Mentor (OpenAI recommended for best quality)")
            prompt = gr.Textbox(label="Ask mentor", lines=3)
            short_toggle = gr.Checkbox(label="Short answer", value=True)
            eli5_toggle = gr.Checkbox(label="Explain simply (ELI5)", value=False)
            ask_btn = gr.Button("Ask Mentor")
            mentor_out = gr.Textbox(lines=6, label="Answer")
            ask_btn.click(ai_mentor_query, inputs=[prompt, short_toggle, eli5_toggle], outputs=mentor_out)

            gr.Markdown("### 🎙️ Voice AI Agent (optional)")
            with gr.Row():
                voice_in = gr.Audio(sources=["microphone"], type="filepath", label="Ask by voice")
            voice_btn = gr.Button("Ask via Voice")
            voice_transcript = gr.Textbox(label="Heard Question (Transcript)")
            voice_answer_text = gr.Textbox(label="Answer (Text)", lines=4)
            voice_answer_audio = gr.Audio(label="Answer (Audio)")
            voice_btn.click(voice_mentor, inputs=voice_in, outputs=[voice_transcript, voice_answer_text, voice_answer_audio])

        # Hackathon Prep Tab
        with gr.TabItem("🏆 Hackathon Prep"):
            idea_in = gr.Textbox(label="One-line idea")
            team_in = gr.Textbox(label="Team name")
            impact_in = gr.Textbox(label="Impact")
            pitch_btn = gr.Button("Generate Pitch")
            pitch_out = gr.Markdown()
            def generate_pitch_pro(idea, team, impact_one_line):
                if not idea:
                    return "Please provide a one-line idea."
                team = team or "Team Road2Success"
                impact = impact_one_line or "Solves user pain / high impact"
                pitch = (
                    f"🔹 {team} — 60s Pitch\n\n"
                    f"**Idea:** {idea}\n\n"
                    f"**Problem:** {impact}\n\n"
                    f"**Solution (MVP):** One-sentence summary of the product.\n\n"
                    f"**Tech Stack:** Frontend (React/Gradio), Backend (FastAPI/Flask), ML (HuggingFace), DB (Firebase/Postgres)\n\n"
                    f"**Demo Flow:** Landing → Core feature → Impact screen\n\n"
                    f"**Why it wins:** Novel + demo-ready + measurable impact\n\n"
                )
                return pitch
            pitch_btn.click(generate_pitch_pro, inputs=[idea_in, team_in, impact_in], outputs=pitch_out)

        # Internships & Hackathons Tab
        with gr.TabItem("💼 Internships & Hackathons"):
            with gr.Row():
                with gr.Column():
                    intern_query = gr.Textbox(label="Internship search", value="intern")
                    intern_btn = gr.Button("Fetch Internships")
                    intern_out = gr.Textbox(lines=8, label="Internships")
                with gr.Column():
                    hack_kw = gr.Textbox(label="Hackathon keyword", value="")
                    hack_btn = gr.Button("Fetch Hackathons")
                    hack_out = gr.Textbox(lines=8, label="Hackathons")
            intern_btn.click(fetch_internships_remotive, inputs=intern_query, outputs=intern_out)
            hack_btn.click(fetch_hackathons_devpost, inputs=hack_kw, outputs=hack_out)

        # Quiz & Leaderboard Tab
        with gr.TabItem("🧠 Quiz & Leaderboard"):
            name_in = gr.Textbox(label="Your name")
            q1 = gr.Radio(SAMPLE_QUIZ[0][1], label=SAMPLE_QUIZ[0][0])
            q2 = gr.Radio(SAMPLE_QUIZ[1][1], label=SAMPLE_QUIZ[1][0])
            q3 = gr.Radio(SAMPLE_QUIZ[2][1], label=SAMPLE_QUIZ[2][0])
            submit_quiz = gr.Button("Submit Quiz")
            result_box = gr.Textbox(label="Result")
            leaderboard_md = gr.Markdown(show_leaderboard())
            submit_quiz.click(fn=grade_and_record, inputs=[name_in, q1, q2, q3], outputs=result_box)
            submit_quiz.click(fn=lambda: gr.update(value=show_leaderboard()), outputs=leaderboard_md)

        # Resume Analyzer Tab
        with gr.TabItem("📄 Resume Analyzer"):
            gr.Markdown("Upload your resume (PDF or TXT) to get suggestions.")
            resume_file = gr.File(label="Upload resume (PDF or TXT)")
            analyze_btn = gr.Button("Analyze Resume")
            resume_feedback = gr.Markdown()
            analyze_btn.click(analyze_resume, inputs=resume_file, outputs=resume_feedback)

if __name__ == "__main__":
    app.launch()