# road2success_app.py """ Road2Success Gradio app (fixed resume parsing + improved AI mentor). Optional: set OPENAI_API_KEY env var to use OpenAI Chat completions for the mentor (recommended). Install optional dependencies for best results: pip install gradio requests beautifulsoup4 transformers torch PyPDF2 pdfminer.six openai If you don't install heavy libs, the app will fallback gracefully and show messages. """ import os import json import datetime import requests from bs4 import BeautifulSoup import gradio as gr # Optional ML / API libs (wrapped in try..except to avoid crashes) try: from transformers import pipeline except Exception: pipeline = None try: import PyPDF2 except Exception: PyPDF2 = None # pdfminer fallback try: from pdfminer.high_level import extract_text as pdfminer_extract_text except Exception: pdfminer_extract_text = None # Optional OpenAI path -- recommended for accurate mentor answers OPENAI_KEY = os.environ.get("OPENAI_API_KEY", None) if OPENAI_KEY: try: import openai openai.api_key = OPENAI_KEY except Exception: openai = None # Leaderboard path LEADERBOARD_FILE = "road2success_leaderboard.json" if not os.path.exists(LEADERBOARD_FILE): with open(LEADERBOARD_FILE, "w") as f: json.dump({"scores": []}, f, indent=2) # ------------------ AI MENTOR ------------------ # Helper: HF generator (lazy init) _hf_gen = None def get_hf_generator(model_name="distilgpt2"): global _hf_gen if _hf_gen is None: if pipeline is None: return None try: # text-generation pipeline; CPU if no GPU _hf_gen = pipeline("text-generation", model=model_name, device=-1) except Exception as e: print("HF generator load failed:", e) _hf_gen = None return _hf_gen def ai_mentor_query(prompt, short_answer=True, eli5=False, max_new_tokens=120): """ Returns concise text answer. Priority path: 1) OpenAI ChatCompletion (if key present and openai installed) 2) HuggingFace local generator (if transformers available) 3) Fallback helpful message """ if not prompt or not str(prompt).strip(): return "Ask a clear question about study plans, hackathons, or projects." # Build system + user prompt for clarity modifiers = [] if short_answer: modifiers.append("Give a concise answer in at most 3 sentences.") if eli5: modifiers.append("Explain simply, like you're explaining to someone new (ELI5).") modifier_text = " ".join(modifiers) # 1) OpenAI ChatCompletion path (best quality) ------------------------------------------------ if OPENAI_KEY and 'openai' in globals() and openai is not None: try: system_prompt = ( "You are an expert mentor for students and early-career engineers. " "Be helpful, concise, and provide actionable steps when relevant." ) user_prompt = f"{prompt.strip()}\n\n{modifier_text}".strip() resp = openai.ChatCompletion.create( model="gpt-4o-mini" if getattr(openai, "ChatCompletion", None) else "gpt-4o", messages=[{"role":"system","content":system_prompt},{"role":"user","content":user_prompt}], temperature=0.2, max_tokens=300, ) # compatible with different openai versions if isinstance(resp, dict): # older style ans = resp.get("choices", [{}])[0].get("message", {}).get("content", "") else: ans = str(resp) return ans.strip() except Exception as e: print("OpenAI path failed:", e) # fall through to HF path # 2) HF local generator fallback ------------------------------------------------------------ gen = get_hf_generator() if gen is not None: try: # Construct prompt with a brief instruction hf_prompt = f"{prompt.strip()}\n\n{modifier_text}\nAnswer:" # Use return_full_text=False or max_new_tokens to avoid repeating prompt (some transformers versions) out = gen(hf_prompt, max_new_tokens=max_new_tokens, do_sample=False, num_return_sequences=1, return_full_text=False) # pipeline may return list of dicts or other shapes if isinstance(out, list) and out: text = out[0].get("generated_text") or out[0].get("text") or str(out[0]) else: text = str(out) # Clean up (remove prompt if it was included) # Keep answer short — take first 900 chars text = text.replace("\n", " ").strip() # If the generator repeated the prompt, attempt to trim repeated portion if hf_prompt in text: text = text.split(hf_prompt, 1)[-1].strip() return text[:900] except Exception as e: print("HF generation failed:", e) # 3) Final graceful fallback ---------------------------------------------------------------- return ("AI model unavailable locally. To get accurate mentor answers enable the OpenAI path " "(set OPENAI_API_KEY env var) or install transformers and a text-generation model.") # ------------------ ASR / TTS (optional) ------------------ _asr = None _tts = None def get_asr(): global _asr if _asr is None and pipeline is not None: try: _asr = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device=-1) except Exception as e: print("ASR load failed:", e) _asr = None return _asr def get_tts(): global _tts if _tts is None and pipeline is not None: try: _tts = pipeline("text-to-speech", model="facebook/mms-tts-eng", device=-1) except Exception as e: print("TTS load failed:", e) _tts = None return _tts def voice_mentor(audio_path): if not audio_path: return "No audio received.", "Please record a question.", None asr = get_asr() if asr is None: transcript = "Speech recognition model not available. Type the question instead." else: try: trans = asr(audio_path) transcript = trans.get("text","").strip() or "Could not transcribe audio." except Exception as e: print("ASR error:", e) transcript = "Could not transcribe (ASR error)." answer = ai_mentor_query(transcript if transcript else "Help me with my studies.") tts = get_tts() if tts is None: return transcript, answer, None try: tts_out = tts(answer) sr = tts_out.get("sampling_rate") audio = tts_out.get("audio") return transcript, answer, (sr, audio) except Exception as e: print("TTS error:", e) return transcript, answer, None # ------------------ ROADMAP & TRACKS (unchanged) ------------------ ROADMAPS = { # (same as your original ROADMAPS; trimmed here for brevity in message) } TRACK_TIPS = { "C": [ "Master pointers, arrays, and memory management.", "Focus on writing small CLI utilities to build confidence." ], "C++": [ "Practice STL (vectors, maps, sets) regularly.", "Implement OOP concepts with small projects (Bank app, Library management)." ], "Java": [ "Learn OOP pillars clearly (inheritance, polymorphism, abstraction, encapsulation).", "Build small console or Spring Boot projects." ], "Python": [ "Practice basics + modules like requests and pandas.", "Try automation scripts and simple data analysis notebooks." ], "Frontend Developer": [ "Focus on HTML, CSS, JavaScript and one framework (React).", "Clone 2–3 real websites (UI clones) to learn layouts." ], "Full Stack Developer": [ "Learn one frontend framework (React) + one backend (Node/Django).", "Build 2–3 full apps: auth, CRUD, deployment." ] } TRACK_VIDEOS = { "Python": [ ("Python for Beginners - freeCodeCamp", "https://www.youtube.com/watch?v=rfscVS0vtbw"), ("Python DSA Playlist", "https://www.youtube.com/watch?v=pkYVOmU3MgA") ] } # Example minimal ROADMAPS entries used by UI (so UI won't crash) ROADMAPS = { "B.Tech 1st Year": { "objective": "Strengthen programming, DSA basics, and system thinking.", "subjects": ["Python / C / C++", "Arrays, Linked Lists, Recursion", "Discrete Math"], "projects": ["Student Management System", "Simple Game"], "resources": [("Coursera - Python", "https://www.coursera.org/")], "videos": [], "coding_questions": [("Reverse Linked List", "https://leetcode.com/problems/reverse-linked-list/")] }, "B.Tech 2nd Year": { "objective": "Web basics, DBMS, OOP, intermediate DSA.", "subjects": ["Stacks/Queues/Trees/Graphs", "SQL/MySQL", "HTML/CSS/JS"], "projects": ["Online Quiz System", "Portfolio Website"], "resources": [], "videos": [], "coding_questions": [] }, "B.Tech 3rd Year": { "objective": "Specialize: DS/Backend/Cloud and prepare for internships.", "subjects": ["Pandas/NumPy/ML basics", "Node/React/Django", "AWS basics"], "projects": ["Recommendation System", "E-commerce backend"], "resources": [], "videos": [], "coding_questions": [] }, "B.Tech Final Year": { "objective": "Capstone project, system design, placements, and open-source contributions.", "subjects": ["Advanced algorithms", "System design basics", "Cloud architecture"], "projects": ["AI Chatbot capstone", "Inventory SaaS app"], "resources": [], "videos": [], "coding_questions": [] } } def render_roadmap(level, track=None): info = ROADMAPS.get(level, None) if not info: return "No roadmap available." html = f"
Objective: {info.get('objective','-')}
" if track: html += f"Selected Track: {track}
" tips = TRACK_TIPS.get(track, []) if tips: html += "Track tips:Fixes: Resume parsing + AI Mentor (OpenAI optional for best accuracy).
") with gr.Tabs(): # Roadmap Tab with gr.TabItem("📚 Roadmap"): with gr.Row(): col1, col2 = gr.Column(scale=1), gr.Column(scale=2) with col1: btech_levels = list(ROADMAPS.keys()) level = gr.Dropdown(choices=btech_levels, value=btech_levels[0], label="Select year") track_dropdown = gr.Dropdown( choices=list(TRACK_TIPS.keys()), value="Python", label="Preferred Track" ) quick_days = gr.Radio(choices=["30","60","90"], value="30", label="Days plan") show_btn = gr.Button("Show Roadmap & Plan") with col2: roadmap_html = gr.HTML() plan_html = gr.HTML() show_btn.click( lambda l, t, d: (render_roadmap(l, t), generate_quick_plan(l, t, int(d))), inputs=[level, track_dropdown, quick_days], outputs=[roadmap_html, plan_html] ) # AI Mentor Tab with gr.TabItem("🤖 AI Mentor"): gr.Markdown("### Text Mentor (OpenAI recommended for best quality)") prompt = gr.Textbox(label="Ask mentor", lines=3) short_toggle = gr.Checkbox(label="Short answer", value=True) eli5_toggle = gr.Checkbox(label="Explain simply (ELI5)", value=False) ask_btn = gr.Button("Ask Mentor") mentor_out = gr.Textbox(lines=6, label="Answer") ask_btn.click(ai_mentor_query, inputs=[prompt, short_toggle, eli5_toggle], outputs=mentor_out) gr.Markdown("### 🎙️ Voice AI Agent (optional)") with gr.Row(): voice_in = gr.Audio(sources=["microphone"], type="filepath", label="Ask by voice") voice_btn = gr.Button("Ask via Voice") voice_transcript = gr.Textbox(label="Heard Question (Transcript)") voice_answer_text = gr.Textbox(label="Answer (Text)", lines=4) voice_answer_audio = gr.Audio(label="Answer (Audio)") voice_btn.click(voice_mentor, inputs=voice_in, outputs=[voice_transcript, voice_answer_text, voice_answer_audio]) # Hackathon Prep Tab with gr.TabItem("🏆 Hackathon Prep"): idea_in = gr.Textbox(label="One-line idea") team_in = gr.Textbox(label="Team name") impact_in = gr.Textbox(label="Impact") pitch_btn = gr.Button("Generate Pitch") pitch_out = gr.Markdown() def generate_pitch_pro(idea, team, impact_one_line): if not idea: return "Please provide a one-line idea." team = team or "Team Road2Success" impact = impact_one_line or "Solves user pain / high impact" pitch = ( f"🔹 {team} — 60s Pitch\n\n" f"**Idea:** {idea}\n\n" f"**Problem:** {impact}\n\n" f"**Solution (MVP):** One-sentence summary of the product.\n\n" f"**Tech Stack:** Frontend (React/Gradio), Backend (FastAPI/Flask), ML (HuggingFace), DB (Firebase/Postgres)\n\n" f"**Demo Flow:** Landing → Core feature → Impact screen\n\n" f"**Why it wins:** Novel + demo-ready + measurable impact\n\n" ) return pitch pitch_btn.click(generate_pitch_pro, inputs=[idea_in, team_in, impact_in], outputs=pitch_out) # Internships & Hackathons Tab with gr.TabItem("💼 Internships & Hackathons"): with gr.Row(): with gr.Column(): intern_query = gr.Textbox(label="Internship search", value="intern") intern_btn = gr.Button("Fetch Internships") intern_out = gr.Textbox(lines=8, label="Internships") with gr.Column(): hack_kw = gr.Textbox(label="Hackathon keyword", value="") hack_btn = gr.Button("Fetch Hackathons") hack_out = gr.Textbox(lines=8, label="Hackathons") intern_btn.click(fetch_internships_remotive, inputs=intern_query, outputs=intern_out) hack_btn.click(fetch_hackathons_devpost, inputs=hack_kw, outputs=hack_out) # Quiz & Leaderboard Tab with gr.TabItem("🧠 Quiz & Leaderboard"): name_in = gr.Textbox(label="Your name") q1 = gr.Radio(SAMPLE_QUIZ[0][1], label=SAMPLE_QUIZ[0][0]) q2 = gr.Radio(SAMPLE_QUIZ[1][1], label=SAMPLE_QUIZ[1][0]) q3 = gr.Radio(SAMPLE_QUIZ[2][1], label=SAMPLE_QUIZ[2][0]) submit_quiz = gr.Button("Submit Quiz") result_box = gr.Textbox(label="Result") leaderboard_md = gr.Markdown(show_leaderboard()) submit_quiz.click(fn=grade_and_record, inputs=[name_in, q1, q2, q3], outputs=result_box) submit_quiz.click(fn=lambda: gr.update(value=show_leaderboard()), outputs=leaderboard_md) # Resume Analyzer Tab with gr.TabItem("📄 Resume Analyzer"): gr.Markdown("Upload your resume (PDF or TXT) to get suggestions.") resume_file = gr.File(label="Upload resume (PDF or TXT)") analyze_btn = gr.Button("Analyze Resume") resume_feedback = gr.Markdown() analyze_btn.click(analyze_resume, inputs=resume_file, outputs=resume_feedback) if __name__ == "__main__": app.launch()