Spaces:
Runtime error
Runtime error
| # road2success_app.py | |
| """ | |
| Road2Success Gradio app (fixed resume parsing + improved AI mentor). | |
| Optional: set OPENAI_API_KEY env var to use OpenAI Chat completions for the mentor (recommended). | |
| Install optional dependencies for best results: | |
| pip install gradio requests beautifulsoup4 transformers torch PyPDF2 pdfminer.six openai | |
| If you don't install heavy libs, the app will fallback gracefully and show messages. | |
| """ | |
| import os | |
| import json | |
| import datetime | |
| import requests | |
| from bs4 import BeautifulSoup | |
| import gradio as gr | |
| # Optional ML / API libs (wrapped in try..except to avoid crashes) | |
| try: | |
| from transformers import pipeline | |
| except Exception: | |
| pipeline = None | |
| try: | |
| import PyPDF2 | |
| except Exception: | |
| PyPDF2 = None | |
| # pdfminer fallback | |
| try: | |
| from pdfminer.high_level import extract_text as pdfminer_extract_text | |
| except Exception: | |
| pdfminer_extract_text = None | |
| # Optional OpenAI path -- recommended for accurate mentor answers | |
| OPENAI_KEY = os.environ.get("OPENAI_API_KEY", None) | |
| if OPENAI_KEY: | |
| try: | |
| import openai | |
| openai.api_key = OPENAI_KEY | |
| except Exception: | |
| openai = None | |
| # Leaderboard path | |
| LEADERBOARD_FILE = "road2success_leaderboard.json" | |
| if not os.path.exists(LEADERBOARD_FILE): | |
| with open(LEADERBOARD_FILE, "w") as f: | |
| json.dump({"scores": []}, f, indent=2) | |
| # ------------------ AI MENTOR ------------------ | |
| # Helper: HF generator (lazy init) | |
| _hf_gen = None | |
| def get_hf_generator(model_name="distilgpt2"): | |
| global _hf_gen | |
| if _hf_gen is None: | |
| if pipeline is None: | |
| return None | |
| try: | |
| # text-generation pipeline; CPU if no GPU | |
| _hf_gen = pipeline("text-generation", model=model_name, device=-1) | |
| except Exception as e: | |
| print("HF generator load failed:", e) | |
| _hf_gen = None | |
| return _hf_gen | |
| def ai_mentor_query(prompt, short_answer=True, eli5=False, max_new_tokens=120): | |
| """ | |
| Returns concise text answer. | |
| Priority path: | |
| 1) OpenAI ChatCompletion (if key present and openai installed) | |
| 2) HuggingFace local generator (if transformers available) | |
| 3) Fallback helpful message | |
| """ | |
| if not prompt or not str(prompt).strip(): | |
| return "Ask a clear question about study plans, hackathons, or projects." | |
| # Build system + user prompt for clarity | |
| modifiers = [] | |
| if short_answer: | |
| modifiers.append("Give a concise answer in at most 3 sentences.") | |
| if eli5: | |
| modifiers.append("Explain simply, like you're explaining to someone new (ELI5).") | |
| modifier_text = " ".join(modifiers) | |
| # 1) OpenAI ChatCompletion path (best quality) ------------------------------------------------ | |
| if OPENAI_KEY and 'openai' in globals() and openai is not None: | |
| try: | |
| system_prompt = ( | |
| "You are an expert mentor for students and early-career engineers. " | |
| "Be helpful, concise, and provide actionable steps when relevant." | |
| ) | |
| user_prompt = f"{prompt.strip()}\n\n{modifier_text}".strip() | |
| resp = openai.ChatCompletion.create( | |
| model="gpt-4o-mini" if getattr(openai, "ChatCompletion", None) else "gpt-4o", | |
| messages=[{"role":"system","content":system_prompt},{"role":"user","content":user_prompt}], | |
| temperature=0.2, | |
| max_tokens=300, | |
| ) | |
| # compatible with different openai versions | |
| if isinstance(resp, dict): | |
| # older style | |
| ans = resp.get("choices", [{}])[0].get("message", {}).get("content", "") | |
| else: | |
| ans = str(resp) | |
| return ans.strip() | |
| except Exception as e: | |
| print("OpenAI path failed:", e) | |
| # fall through to HF path | |
| # 2) HF local generator fallback ------------------------------------------------------------ | |
| gen = get_hf_generator() | |
| if gen is not None: | |
| try: | |
| # Construct prompt with a brief instruction | |
| hf_prompt = f"{prompt.strip()}\n\n{modifier_text}\nAnswer:" | |
| # Use return_full_text=False or max_new_tokens to avoid repeating prompt (some transformers versions) | |
| out = gen(hf_prompt, max_new_tokens=max_new_tokens, do_sample=False, num_return_sequences=1, return_full_text=False) | |
| # pipeline may return list of dicts or other shapes | |
| if isinstance(out, list) and out: | |
| text = out[0].get("generated_text") or out[0].get("text") or str(out[0]) | |
| else: | |
| text = str(out) | |
| # Clean up (remove prompt if it was included) | |
| # Keep answer short — take first 900 chars | |
| text = text.replace("\n", " ").strip() | |
| # If the generator repeated the prompt, attempt to trim repeated portion | |
| if hf_prompt in text: | |
| text = text.split(hf_prompt, 1)[-1].strip() | |
| return text[:900] | |
| except Exception as e: | |
| print("HF generation failed:", e) | |
| # 3) Final graceful fallback ---------------------------------------------------------------- | |
| return ("AI model unavailable locally. To get accurate mentor answers enable the OpenAI path " | |
| "(set OPENAI_API_KEY env var) or install transformers and a text-generation model.") | |
| # ------------------ ASR / TTS (optional) ------------------ | |
| _asr = None | |
| _tts = None | |
| def get_asr(): | |
| global _asr | |
| if _asr is None and pipeline is not None: | |
| try: | |
| _asr = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device=-1) | |
| except Exception as e: | |
| print("ASR load failed:", e) | |
| _asr = None | |
| return _asr | |
| def get_tts(): | |
| global _tts | |
| if _tts is None and pipeline is not None: | |
| try: | |
| _tts = pipeline("text-to-speech", model="facebook/mms-tts-eng", device=-1) | |
| except Exception as e: | |
| print("TTS load failed:", e) | |
| _tts = None | |
| return _tts | |
| def voice_mentor(audio_path): | |
| if not audio_path: | |
| return "No audio received.", "Please record a question.", None | |
| asr = get_asr() | |
| if asr is None: | |
| transcript = "Speech recognition model not available. Type the question instead." | |
| else: | |
| try: | |
| trans = asr(audio_path) | |
| transcript = trans.get("text","").strip() or "Could not transcribe audio." | |
| except Exception as e: | |
| print("ASR error:", e) | |
| transcript = "Could not transcribe (ASR error)." | |
| answer = ai_mentor_query(transcript if transcript else "Help me with my studies.") | |
| tts = get_tts() | |
| if tts is None: | |
| return transcript, answer, None | |
| try: | |
| tts_out = tts(answer) | |
| sr = tts_out.get("sampling_rate") | |
| audio = tts_out.get("audio") | |
| return transcript, answer, (sr, audio) | |
| except Exception as e: | |
| print("TTS error:", e) | |
| return transcript, answer, None | |
| # ------------------ ROADMAP & TRACKS (unchanged) ------------------ | |
| ROADMAPS = { | |
| # (same as your original ROADMAPS; trimmed here for brevity in message) | |
| } | |
| TRACK_TIPS = { | |
| "C": [ | |
| "Master pointers, arrays, and memory management.", | |
| "Focus on writing small CLI utilities to build confidence." | |
| ], | |
| "C++": [ | |
| "Practice STL (vectors, maps, sets) regularly.", | |
| "Implement OOP concepts with small projects (Bank app, Library management)." | |
| ], | |
| "Java": [ | |
| "Learn OOP pillars clearly (inheritance, polymorphism, abstraction, encapsulation).", | |
| "Build small console or Spring Boot projects." | |
| ], | |
| "Python": [ | |
| "Practice basics + modules like requests and pandas.", | |
| "Try automation scripts and simple data analysis notebooks." | |
| ], | |
| "Frontend Developer": [ | |
| "Focus on HTML, CSS, JavaScript and one framework (React).", | |
| "Clone 2–3 real websites (UI clones) to learn layouts." | |
| ], | |
| "Full Stack Developer": [ | |
| "Learn one frontend framework (React) + one backend (Node/Django).", | |
| "Build 2–3 full apps: auth, CRUD, deployment." | |
| ] | |
| } | |
| TRACK_VIDEOS = { | |
| "Python": [ | |
| ("Python for Beginners - freeCodeCamp", "https://www.youtube.com/watch?v=rfscVS0vtbw"), | |
| ("Python DSA Playlist", "https://www.youtube.com/watch?v=pkYVOmU3MgA") | |
| ] | |
| } | |
| # Example minimal ROADMAPS entries used by UI (so UI won't crash) | |
| ROADMAPS = { | |
| "B.Tech 1st Year": { | |
| "objective": "Strengthen programming, DSA basics, and system thinking.", | |
| "subjects": ["Python / C / C++", "Arrays, Linked Lists, Recursion", "Discrete Math"], | |
| "projects": ["Student Management System", "Simple Game"], | |
| "resources": [("Coursera - Python", "https://www.coursera.org/")], | |
| "videos": [], | |
| "coding_questions": [("Reverse Linked List", "https://leetcode.com/problems/reverse-linked-list/")] | |
| }, | |
| "B.Tech 2nd Year": { | |
| "objective": "Web basics, DBMS, OOP, intermediate DSA.", | |
| "subjects": ["Stacks/Queues/Trees/Graphs", "SQL/MySQL", "HTML/CSS/JS"], | |
| "projects": ["Online Quiz System", "Portfolio Website"], | |
| "resources": [], | |
| "videos": [], | |
| "coding_questions": [] | |
| }, | |
| "B.Tech 3rd Year": { | |
| "objective": "Specialize: DS/Backend/Cloud and prepare for internships.", | |
| "subjects": ["Pandas/NumPy/ML basics", "Node/React/Django", "AWS basics"], | |
| "projects": ["Recommendation System", "E-commerce backend"], | |
| "resources": [], | |
| "videos": [], | |
| "coding_questions": [] | |
| }, | |
| "B.Tech Final Year": { | |
| "objective": "Capstone project, system design, placements, and open-source contributions.", | |
| "subjects": ["Advanced algorithms", "System design basics", "Cloud architecture"], | |
| "projects": ["AI Chatbot capstone", "Inventory SaaS app"], | |
| "resources": [], | |
| "videos": [], | |
| "coding_questions": [] | |
| } | |
| } | |
| def render_roadmap(level, track=None): | |
| info = ROADMAPS.get(level, None) | |
| if not info: | |
| return "<b>No roadmap available.</b>" | |
| html = f"<div style='padding:12px; border-radius:8px;'>" | |
| html += f"<h2 style='color:#00aaff'>{level} Roadmap</h2>" | |
| html += f"<p><b>Objective:</b> {info.get('objective','-')}</p>" | |
| if track: | |
| html += f"<p><b>Selected Track:</b> {track}</p>" | |
| tips = TRACK_TIPS.get(track, []) | |
| if tips: | |
| html += "<b>Track tips:</b><ul>" + "".join(f"<li>{t}</li>" for t in tips) + "</ul>" | |
| html += "<b>Subjects:</b><ul>" + "".join(f"<li>{s}</li>" for s in info.get('subjects',[])) + "</ul>" | |
| html += "<b>Projects:</b><ul>" + "".join(f"<li>{p}</li>" for p in info.get('projects',[])) + "</ul>" | |
| if info.get("resources"): | |
| html += "<b>Resources:</b><ul>" + "".join( | |
| f"<li><a href='{r[1]}' target='_blank'>{r[0]}</a></li>" for r in info.get('resources',[]) | |
| ) + "</ul>" | |
| if info.get("videos"): | |
| html += "<b>Videos:</b><ul>" + "".join( | |
| f"<li><a href='{v[1]}' target='_blank'>{v[0]}</a></li>" for v in info.get('videos',[]) | |
| ) + "</ul>" | |
| if info.get("coding_questions"): | |
| html += "<b>Coding Practice:</b><ul>" + "".join( | |
| f"<li><a href='{c[1]}' target='_blank'>{c[0]}</a></li>" for c in info.get('coding_questions',[]) | |
| ) + "</ul>" | |
| html += "</div>" | |
| return html | |
| def generate_quick_plan(level, track, days=30): | |
| base = ROADMAPS.get(level, None) | |
| if not base: | |
| return "No roadmap available." | |
| subjects = base.get("subjects", []) | |
| if not subjects: | |
| return "No subjects found for this level." | |
| weeks = max(1, int(days) // 7) | |
| plan = [] | |
| for w in range(weeks): | |
| subj = subjects[w % len(subjects)] | |
| plan.append(f"Week {w+1}: Focus on — {subj}. Practice exercises + 3 problems.") | |
| if track: | |
| plan.append(f"<br><b>Track Focus ({track}):</b> Every week, build at least one mini-feature or mini-project using {track}.") | |
| return "<br>".join(plan) | |
| # ------------------ INTERNSHIPS & HACKATHONS (unchanged) ------------------ | |
| def fetch_internships_remotive(query="intern"): | |
| try: | |
| r = requests.get(f"https://remotive.com/api/remote-jobs?search={query}&limit=20", timeout=6) | |
| r.raise_for_status() | |
| data = r.json().get("jobs", []) | |
| out = "" | |
| for j in data[:8]: | |
| title = j.get("title","") | |
| company = j.get("company_name","") | |
| url = j.get("url","#") | |
| out += f"• {title} at {company}\n{url}\n\n" | |
| return out if out else "No internships found for this query." | |
| except Exception as e: | |
| return f"Internship fetch error: {e}" | |
| def fetch_hackathons_devpost(keyword=""): | |
| try: | |
| r = requests.get("https://devpost.com/hackathons", timeout=6) | |
| r.raise_for_status() | |
| soup = BeautifulSoup(r.text, "html.parser") | |
| titles = soup.select("h3.title") or soup.select("h5.title") or soup.find_all("h3") | |
| results = [] | |
| for t in titles[:12]: | |
| text = t.get_text(strip=True) | |
| if not keyword or keyword.lower() in text.lower(): | |
| results.append("• " + text) | |
| return "\n".join(results) if results else "No hackathons found." | |
| except Exception as e: | |
| return f"Hackathon fetch error: {e}" | |
| # ------------------ QUIZ & LEADERBOARD ------------------ | |
| SAMPLE_QUIZ = [ | |
| ("What is the time complexity of binary search?", ["O(1)","O(n)","O(log n)","O(n log n)"], "O(log n)"), | |
| ("Which Python library is used for machine learning?", ["NumPy","scikit-learn","Pandas","Matplotlib"], "scikit-learn"), | |
| ("Which structure follows FIFO?", ["Stack","Queue","Graph","Tree"], "Queue") | |
| ] | |
| def grade_and_record(name, a1, a2, a3): | |
| answers = [a1, a2, a3] | |
| correct = sum(1 for i, a in enumerate(answers) if a == SAMPLE_QUIZ[i][2]) | |
| score = int(correct) | |
| try: | |
| with open(LEADERBOARD_FILE, "r") as f: | |
| data = json.load(f) | |
| except: | |
| data = {"scores": []} | |
| entry = {"name": name or "Anonymous", "score": score, "time": datetime.datetime.now().isoformat()} | |
| data["scores"].append(entry) | |
| data["scores"] = sorted(data["scores"], key=lambda x: (-x["score"], x["time"]))[:40] | |
| with open(LEADERBOARD_FILE, "w") as f: | |
| json.dump(data, f, indent=2) | |
| return f"Score: {score}/{len(SAMPLE_QUIZ)} — recorded!" | |
| def show_leaderboard(): | |
| with open(LEADERBOARD_FILE, "r") as f: | |
| data = json.load(f) | |
| rows = data.get("scores", [])[:10] | |
| if not rows: | |
| return "No scores yet." | |
| md = "### Leaderboard (Top 10)\n\n" | |
| for i, r in enumerate(rows, start=1): | |
| md += f"{i}. **{r['name']}** — {r['score']}/3 ({r['time'].split('T')[0]})\n\n" | |
| return md | |
| # ------------------ RESUME PARSING & ANALYSIS (fixed) ------------------ | |
| def _resolve_uploaded_path(file_obj): | |
| """ | |
| Gradio returns different shapes depending on version: | |
| - an object with .name (path) | |
| - a dict with 'name' containing path | |
| - a string path (rare) | |
| Return actual filesystem path or None. | |
| """ | |
| if not file_obj: | |
| return None | |
| # direct path string | |
| if isinstance(file_obj, str) and os.path.exists(file_obj): | |
| return file_obj | |
| # object with .name attribute | |
| if hasattr(file_obj, "name") and isinstance(file_obj.name, str) and os.path.exists(file_obj.name): | |
| return file_obj.name | |
| # dict-style (older gradio) | |
| if isinstance(file_obj, dict): | |
| # try tmp_path or name | |
| for k in ("name", "tmp_path", "tmpfile", "file"): | |
| p = file_obj.get(k) | |
| if isinstance(p, str) and os.path.exists(p): | |
| return p | |
| return None | |
| def extract_text_from_file(file_obj): | |
| path = _resolve_uploaded_path(file_obj) | |
| if not path: | |
| return "" | |
| try: | |
| if path.lower().endswith(".txt"): | |
| with open(path, "r", encoding="utf-8", errors="ignore") as f: | |
| return f.read() | |
| elif path.lower().endswith(".pdf"): | |
| # Try PyPDF2 first | |
| if PyPDF2 is not None: | |
| try: | |
| text = "" | |
| with open(path, "rb") as f: | |
| reader = PyPDF2.PdfReader(f) | |
| for p in reader.pages: | |
| text += (p.extract_text() or "") | |
| return text | |
| except Exception as e: | |
| print("PyPDF2 read failed:", e) | |
| # Try pdfminer as fallback | |
| if pdfminer_extract_text is not None: | |
| try: | |
| return pdfminer_extract_text(path) | |
| except Exception as e: | |
| print("pdfminer read failed:", e) | |
| # If both unavailable or failed | |
| return "" | |
| else: | |
| # unknown extension - attempt to read as text | |
| with open(path, "r", encoding="utf-8", errors="ignore") as f: | |
| return f.read() | |
| except Exception as e: | |
| print("Resume read error:", e) | |
| return "" | |
| def analyze_resume(file_obj): | |
| """ | |
| Improved resume analyzer: | |
| - robustly reads uploaded file | |
| - gives actionable checklist | |
| """ | |
| if not file_obj: | |
| return "Upload a resume file (PDF or TXT) to analyze." | |
| text = extract_text_from_file(file_obj) or "" | |
| if not text.strip(): | |
| tips = [ | |
| "Could not extract text from this file. If it's a scanned PDF, OCR is required. Install PyPDF2 or pdfminer.six.", | |
| "Try uploading a machine-generated PDF (export from Word) or a plain .txt version of your resume." | |
| ] | |
| return "### Resume Analysis\n\n" + "\n".join(f"- {t}" for t in tips) | |
| tl = text.lower() | |
| feedback = [] | |
| # length heuristics | |
| length = len(text.split()) | |
| if length < 200: | |
| feedback.append("- Resume is short (<200 words). Add more details: projects, technical bullets, and metrics.") | |
| elif length > 2500: | |
| feedback.append("- Resume is long (>2500 words). Target 1–2 pages; prioritize relevant points for the role.") | |
| # section presence | |
| for section in ["education", "projects", "experience", "skills"]: | |
| if section not in tl: | |
| feedback.append(f"- Add a clear **{section.capitalize()}** section with a heading.") | |
| # tech detection | |
| tech_keywords = ["c++", "python", "java", "javascript", "react", "node", "sql", "django", "aws", "tensorflow", "pytorch"] | |
| if any(k in tl for k in tech_keywords): | |
| feedback.append("- Good: technical skills detected. Group them in a 'Skills' section and order by relevance.") | |
| else: | |
| feedback.append("- Add a **Skills** list (languages, frameworks, tools). Be specific (e.g., Python, React, PostgreSQL).") | |
| # projects | |
| if "project" not in tl and "projects" not in tl: | |
| feedback.append("- Add at least 2–3 **Projects** with bullets: tech used, your role, and measurable impact.") | |
| else: | |
| feedback.append("- For each project, include: Tech stack, your role, 1 measurable outcome (time saved/users/revenue).") | |
| # academics | |
| if any(k in tl for k in ["cgpa", "gpa", "%", "percentage"]): | |
| feedback.append("- Academic performance listed. Keep it concise and visible under Education.") | |
| else: | |
| feedback.append("- If your CGPA/percentage is good, include it under Education (e.g., CGPA: 8.2/10).") | |
| # general suggestions | |
| feedback += [ | |
| "- Use action verbs: Built, Designed, Implemented, Optimized.", | |
| "- Tailor the resume's top section to the role (SDE / Frontend / Fullstack). Put relevant skills/projects first.", | |
| "- Remove irrelevant details; focus on impact." | |
| ] | |
| result = "### Resume Analysis & Recommendations\n\n" | |
| result += "\n".join(feedback) | |
| return result | |
| # ------------------ UI / CSS (kept simple) ------------------ | |
| GLASS_CSS = """ | |
| :root { | |
| --accent: #0077cc; | |
| --text-main: #012b50; | |
| } | |
| body { font-family: Inter, sans-serif; } | |
| h1 { color: var(--accent); } | |
| .card { background: #f7fcff; padding: 16px; border-radius: 12px; } | |
| """ | |
| # ------------------ Build Gradio UI ------------------ | |
| with gr.Blocks(css=GLASS_CSS, title="Road2Success — Dashboard") as app: | |
| gr.Markdown("<h1 style='text-align:center'>🚀 Road2Success — Dashboard (fixed)</h1>") | |
| gr.Markdown("<p style='text-align:center'>Fixes: Resume parsing + AI Mentor (OpenAI optional for best accuracy).</p>") | |
| with gr.Tabs(): | |
| # Roadmap Tab | |
| with gr.TabItem("📚 Roadmap"): | |
| with gr.Row(): | |
| col1, col2 = gr.Column(scale=1), gr.Column(scale=2) | |
| with col1: | |
| btech_levels = list(ROADMAPS.keys()) | |
| level = gr.Dropdown(choices=btech_levels, value=btech_levels[0], label="Select year") | |
| track_dropdown = gr.Dropdown( | |
| choices=list(TRACK_TIPS.keys()), | |
| value="Python", | |
| label="Preferred Track" | |
| ) | |
| quick_days = gr.Radio(choices=["30","60","90"], value="30", label="Days plan") | |
| show_btn = gr.Button("Show Roadmap & Plan") | |
| with col2: | |
| roadmap_html = gr.HTML() | |
| plan_html = gr.HTML() | |
| show_btn.click( | |
| lambda l, t, d: (render_roadmap(l, t), generate_quick_plan(l, t, int(d))), | |
| inputs=[level, track_dropdown, quick_days], | |
| outputs=[roadmap_html, plan_html] | |
| ) | |
| # AI Mentor Tab | |
| with gr.TabItem("🤖 AI Mentor"): | |
| gr.Markdown("### Text Mentor (OpenAI recommended for best quality)") | |
| prompt = gr.Textbox(label="Ask mentor", lines=3) | |
| short_toggle = gr.Checkbox(label="Short answer", value=True) | |
| eli5_toggle = gr.Checkbox(label="Explain simply (ELI5)", value=False) | |
| ask_btn = gr.Button("Ask Mentor") | |
| mentor_out = gr.Textbox(lines=6, label="Answer") | |
| ask_btn.click(ai_mentor_query, inputs=[prompt, short_toggle, eli5_toggle], outputs=mentor_out) | |
| gr.Markdown("### 🎙️ Voice AI Agent (optional)") | |
| with gr.Row(): | |
| voice_in = gr.Audio(sources=["microphone"], type="filepath", label="Ask by voice") | |
| voice_btn = gr.Button("Ask via Voice") | |
| voice_transcript = gr.Textbox(label="Heard Question (Transcript)") | |
| voice_answer_text = gr.Textbox(label="Answer (Text)", lines=4) | |
| voice_answer_audio = gr.Audio(label="Answer (Audio)") | |
| voice_btn.click(voice_mentor, inputs=voice_in, outputs=[voice_transcript, voice_answer_text, voice_answer_audio]) | |
| # Hackathon Prep Tab | |
| with gr.TabItem("🏆 Hackathon Prep"): | |
| idea_in = gr.Textbox(label="One-line idea") | |
| team_in = gr.Textbox(label="Team name") | |
| impact_in = gr.Textbox(label="Impact") | |
| pitch_btn = gr.Button("Generate Pitch") | |
| pitch_out = gr.Markdown() | |
| def generate_pitch_pro(idea, team, impact_one_line): | |
| if not idea: | |
| return "Please provide a one-line idea." | |
| team = team or "Team Road2Success" | |
| impact = impact_one_line or "Solves user pain / high impact" | |
| pitch = ( | |
| f"🔹 {team} — 60s Pitch\n\n" | |
| f"**Idea:** {idea}\n\n" | |
| f"**Problem:** {impact}\n\n" | |
| f"**Solution (MVP):** One-sentence summary of the product.\n\n" | |
| f"**Tech Stack:** Frontend (React/Gradio), Backend (FastAPI/Flask), ML (HuggingFace), DB (Firebase/Postgres)\n\n" | |
| f"**Demo Flow:** Landing → Core feature → Impact screen\n\n" | |
| f"**Why it wins:** Novel + demo-ready + measurable impact\n\n" | |
| ) | |
| return pitch | |
| pitch_btn.click(generate_pitch_pro, inputs=[idea_in, team_in, impact_in], outputs=pitch_out) | |
| # Internships & Hackathons Tab | |
| with gr.TabItem("💼 Internships & Hackathons"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| intern_query = gr.Textbox(label="Internship search", value="intern") | |
| intern_btn = gr.Button("Fetch Internships") | |
| intern_out = gr.Textbox(lines=8, label="Internships") | |
| with gr.Column(): | |
| hack_kw = gr.Textbox(label="Hackathon keyword", value="") | |
| hack_btn = gr.Button("Fetch Hackathons") | |
| hack_out = gr.Textbox(lines=8, label="Hackathons") | |
| intern_btn.click(fetch_internships_remotive, inputs=intern_query, outputs=intern_out) | |
| hack_btn.click(fetch_hackathons_devpost, inputs=hack_kw, outputs=hack_out) | |
| # Quiz & Leaderboard Tab | |
| with gr.TabItem("🧠 Quiz & Leaderboard"): | |
| name_in = gr.Textbox(label="Your name") | |
| q1 = gr.Radio(SAMPLE_QUIZ[0][1], label=SAMPLE_QUIZ[0][0]) | |
| q2 = gr.Radio(SAMPLE_QUIZ[1][1], label=SAMPLE_QUIZ[1][0]) | |
| q3 = gr.Radio(SAMPLE_QUIZ[2][1], label=SAMPLE_QUIZ[2][0]) | |
| submit_quiz = gr.Button("Submit Quiz") | |
| result_box = gr.Textbox(label="Result") | |
| leaderboard_md = gr.Markdown(show_leaderboard()) | |
| submit_quiz.click(fn=grade_and_record, inputs=[name_in, q1, q2, q3], outputs=result_box) | |
| submit_quiz.click(fn=lambda: gr.update(value=show_leaderboard()), outputs=leaderboard_md) | |
| # Resume Analyzer Tab | |
| with gr.TabItem("📄 Resume Analyzer"): | |
| gr.Markdown("Upload your resume (PDF or TXT) to get suggestions.") | |
| resume_file = gr.File(label="Upload resume (PDF or TXT)") | |
| analyze_btn = gr.Button("Analyze Resume") | |
| resume_feedback = gr.Markdown() | |
| analyze_btn.click(analyze_resume, inputs=resume_file, outputs=resume_feedback) | |
| if __name__ == "__main__": | |
| app.launch() | |