Spaces:
Sleeping
Sleeping
File size: 17,332 Bytes
db70c95 9753f0a db70c95 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 |
# modules/llm_handler.py
import os
import config
import re
from groq import Groq
from modules.web_search import search_for_example_answers
from modules.pm_frameworks import get_framework_suggestion, get_relevant_metrics
# Initialize the Groq client with proper error handling
def get_groq_client():
api_key = config.GROQ_API_KEY or os.environ.get("GROQ_API_KEY")
if not api_key:
raise ValueError("GROQ_API_KEY is required. Please set it in .env file or environment variables.")
return Groq(api_key=api_key)
try:
client = get_groq_client()
MODEL = config.GROQ_MODEL
print(f"β
Groq API connected successfully with model: {MODEL}")
except Exception as e:
print(f"β Failed to initialize Groq client: {e}")
client = None
MODEL = "llama3-8b-8192"
def generate_coaching_question(coaching_type, document_text, question_number):
"""Generate a personalized, interview-style product management coaching question based on resume."""
if not client:
return "I need to ask you a product management scenario, but there's an API connection issue."
# Base scenarios for each coaching type
base_scenarios = {
'Product Strategy & Vision': [
"developing product vision for a struggling mobile application",
"evaluating a strategic pivot from B2C to B2B",
"presenting product strategy to secure funding",
"responding to competitive threats strategically",
"integrating acquired startup products"
],
'Market Research & Analysis': [
"conducting competitive analysis after competitor launch",
"expanding into new geographic markets",
"resolving conflicting user research feedback",
"researching with limited budget constraints",
"validating demand for new product category"
],
'User Experience & Design Thinking': [
"redesigning high drop-off onboarding flow",
"investigating declining Net Promoter Score",
"balancing UX goals with technical constraints",
"prioritizing UX improvements across segments",
"leading design sprint for new experience"
],
'Product Roadmap Planning': [
"prioritizing features with limited engineering capacity",
"handling sales pressure vs roadmap alignment",
"communicating roadmap changes after setbacks",
"re-prioritizing due to security vulnerabilities",
"adjusting roadmap for aggressive growth targets"
],
'Metrics & Analytics': [
"investigating sudden drop in engagement metrics",
"defining success metrics for new premium tier",
"designing A/B test for conversion optimization",
"selecting North Star metric for product team",
"analyzing low-adoption, high-engagement features"
],
'Stakeholder Management': [
"navigating conflicting priorities across teams",
"managing stakeholder disagreement and escalation",
"building alignment across multiple teams",
"resolving timeline conflicts with engineering",
"handling customer churn threats and demands"
],
'Product Launch Strategy': [
"handling critical bugs close to launch",
"designing go-to-market for enterprise expansion",
"deciding on launch with mixed early metrics",
"adjusting strategy due to competitor timing",
"managing feature impact on core metrics"
],
'Competitive Analysis': [
"responding to well-funded direct competitor",
"analyzing acquisition threat in adjacent space",
"capitalizing on competitor retention struggles",
"competing against superior marketing with inferior product",
"defending against tech giant market entry"
],
'Feature Prioritization': [
"prioritizing with limited resources and high impact opportunities",
"balancing customer demands vs product vision",
"evaluating build vs buy vs partner decisions",
"weighing revenue features vs foundational improvements",
"deciding on resource allocation for new capabilities"
],
'Customer Development': [
"investigating low adoption of launched features",
"structuring customer interviews for validation",
"managing custom feature requests from major customers",
"addressing user confusion while maintaining power features",
"analyzing high new-user churn vs strong retention"
],
'Resume & Application Strategy': [
"positioning experience for fintech PM role transition",
"demonstrating quantified impact in interviews",
"addressing PM framework gaps during applications",
"showcasing PM skills from non-PM background",
"differentiating against FAANG-experienced candidates"
]
}
scenarios = base_scenarios.get(coaching_type, [
"solving complex product management challenge",
"making strategic product decision under pressure",
"applying PM frameworks to real-world scenario"
])
# Select base scenario
scenario_index = (question_number - 1) % len(scenarios)
base_scenario = scenarios[scenario_index]
# Create personalized prompt using resume context
if document_text and document_text.strip():
prompt = f"""You are an expert product management interviewer. Generate ONE realistic, challenging PM interview question that:
1. Focuses on: {base_scenario}
2. Is tailored to this candidate's background: {document_text[:1000]}
3. Coaching area: {coaching_type}
Create a specific scenario with:
- Realistic context (company size, industry, metrics)
- Clear constraints and timeline
- Multiple stakeholders involved
- Quantifiable elements (users, revenue, etc.)
Make it feel like a real PM interview question that considers their background. Keep it focused and actionable.
Example format: "You're the PM at [specific company type]. [Specific situation with numbers]. [Key challenge]. How do you approach this?"
Generate just the question scenario, no additional text:"""
else:
prompt = f"""You are an expert product management interviewer. Generate ONE realistic, challenging PM interview question about {base_scenario} in the {coaching_type} area.
Create a specific scenario with:
- Realistic context (company size, industry, metrics)
- Clear constraints and timeline
- Multiple stakeholders involved
- Quantifiable elements (users, revenue, etc.)
Example format: "You're the PM at [specific company type]. [Specific situation with numbers]. [Key challenge]. How do you approach this?"
Generate just the question scenario, no additional text:"""
try:
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model=MODEL,
temperature=0.7,
max_tokens=200
)
generated_question = chat_completion.choices[0].message.content.strip()
# Add context note if resume was provided
if document_text and document_text.strip():
context_note = "\n\nNote: Consider your background and experience when answering."
return generated_question + context_note
return generated_question
except Exception as e:
print(f"β Error generating personalized question: {e}")
# Fallback to basic scenario
fallback_questions = {
'Product Strategy & Vision': f"You're the PM for a mobile app with declining user engagement. How would you develop a new product vision to turn this around?",
'Market Research & Analysis': f"A competitor just launched a feature similar to yours. How do you conduct competitive analysis and determine your response?",
'Resume & Application Strategy': f"You're applying for a Senior PM role but lack direct PM experience. How do you position your background effectively?"
}
return fallback_questions.get(coaching_type, f"Walk me through how you'd approach {base_scenario} as a product manager.")
def evaluate_response(question, response, coaching_type):
"""Advanced product management coaching evaluation with comprehensive feedback."""
if not client:
return "Unable to provide feedback - API connection issue. Please check your Groq API key."
# Get relevant frameworks for this coaching type
suggested_frameworks = get_framework_suggestion('general', coaching_type)
framework_names = [fw['name'] for fw in suggested_frameworks[:2]] # Top 2 frameworks
# Get relevant metrics
relevant_metrics = get_relevant_metrics(coaching_type)[:3] # Top 3 metrics
# Advanced evaluation prompt with scoring criteria - requesting clean formatting
prompt = f"""
You are a world-class product management coach and former VP of Product at top tech companies.
Evaluate this product manager's response with the depth and insight of an experienced mentor.
SCENARIO: {question}
PM'S RESPONSE: {response}
COACHING AREA: {coaching_type}
Please provide a professional coaching evaluation using this EXACT structure (no markdown formatting, no ** or # symbols):
EVALUATION SCORES (Rate 1-10 for each):
β’ Strategic Thinking: [X]/10 - How well they approached the big picture
β’ Problem Analysis: [X]/10 - Depth of problem understanding
β’ Framework Application: [X]/10 - Use of PM methodologies
β’ Stakeholder Awareness: [X]/10 - Consideration of different perspectives
β’ Execution Focus: [X]/10 - Practicality and actionability
β’ Communication: [X]/10 - Clarity and structure of response
STRENGTHS:
[List 2-3 specific things they did well]
AREAS FOR GROWTH:
[List 2-3 specific areas for improvement]
FRAMEWORK RECOMMENDATIONS:
Consider applying: {', '.join(framework_names) if framework_names else 'RICE prioritization, Jobs-to-be-Done'}
KEY METRICS TO TRACK:
Focus on: {', '.join(relevant_metrics) if relevant_metrics else 'user engagement, business impact'}
ACTIONABLE NEXT STEPS:
[1-2 concrete actions they can take]
EXPERT INSIGHT:
[One advanced tip that a seasoned PM would know]
IMPORTANT: Use plain text formatting only - NO markdown symbols like ** or # or _. Keep it professional and readable.
"""
try:
chat_completion = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are an expert product management coach with 15+ years of experience. Provide detailed, actionable feedback using clean, professional formatting without markdown symbols."},
{"role": "user", "content": prompt}
],
model=MODEL,
temperature=0.7, # Slightly creative but consistent
max_tokens=1000 # Allow for comprehensive feedback
)
return chat_completion.choices[0].message.content
except Exception as e:
# Fallback response with framework suggestions
framework_text = f" Consider applying {framework_names[0]} framework" if framework_names else ""
metric_text = f" Focus on metrics like {relevant_metrics[0]}" if relevant_metrics else ""
return f"""
FEEDBACK: Great thinking on this scenario!{framework_text}.{metric_text}
STRENGTHS: You showed good product thinking and approached the problem systematically.
AREAS FOR GROWTH: Consider exploring more PM frameworks and being more specific about metrics and success criteria.
NEXT STEPS: Practice more scenarios in {coaching_type} and study relevant PM frameworks.
Note: Full AI evaluation unavailable - {str(e)}
"""
def generate_coaching_feedback(coaching_log, coaching_type, name):
"""Generate overall coaching feedback based on the entire session."""
responses_summary = "\n".join([
f"Scenario: {item['question'][:100]}...\nResponse: {item['response'][:200]}...\n"
for item in coaching_log
])
prompt = f"""
You are an expert product management coach providing a comprehensive development summary for {name} who just completed a coaching session on {coaching_type}.
SESSION SUMMARY:
{responses_summary}
Please provide a comprehensive coaching summary that includes:
1. **Overall Performance**: Key strengths demonstrated
2. **Growth Areas**: 3 specific areas for continued development
3. **Recommended Learning**: Books, frameworks, or practices to explore
4. **Action Plan**: Concrete next steps for skill development
5. **Encouragement**: Motivational closing focused on their PM journey
Make it personal, actionable, and encouraging for a product manager's growth.
"""
try:
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model=MODEL,
)
return chat_completion.choices[0].message.content
except Exception as e:
return f"Excellent work in this {coaching_type} coaching session, {name}! Continue practicing these scenarios and exploring product management frameworks to enhance your skills."
# Legacy function for backward compatibility
def evaluate_answer(question, answer):
"""Legacy function - redirects to evaluate_response for coaching context."""
return evaluate_response(question, answer, "General Product Management")
def parse_scores_from_evaluation(evaluation_text: str) -> dict:
"""Extract numerical scores from the advanced evaluation text."""
scores = {
'Strategic Thinking': 0,
'Problem Analysis': 0,
'Framework Application': 0,
'Stakeholder Awareness': 0,
'Execution Focus': 0,
'Communication': 0
}
# Updated regex patterns for the new scoring format
patterns = {
'Strategic Thinking': r'Strategic Thinking:\s*(\d+)/10',
'Problem Analysis': r'Problem Analysis:\s*(\d+)/10',
'Framework Application': r'Framework Application:\s*(\d+)/10',
'Stakeholder Awareness': r'Stakeholder Awareness:\s*(\d+)/10',
'Execution Focus': r'Execution Focus:\s*(\d+)/10',
'Communication': r'Communication:\s*(\d+)/10'
}
for category, pattern in patterns.items():
match = re.search(pattern, evaluation_text, re.IGNORECASE)
if match:
try:
scores[category] = int(match.group(1))
except (ValueError, IndexError):
scores[category] = 7 # Default score if parsing fails
return scores
def get_overall_score(scores_dict: dict) -> float:
"""Calculate overall score from individual category scores."""
if not scores_dict or not any(scores_dict.values()):
return 7.0 # Default score
total_score = sum(scores_dict.values())
max_possible = len(scores_dict) * 10
return round((total_score / max_possible) * 10, 1)
def get_score_interpretation(overall_score: float) -> str:
"""Provide interpretation of the overall score."""
if overall_score >= 9.0:
return "π Exceptional - You demonstrated expert-level product management thinking!"
elif overall_score >= 8.0:
return "π Excellent - Strong PM skills with minor areas for refinement"
elif overall_score >= 7.0:
return "β
Good - Solid foundation with clear growth opportunities"
elif overall_score >= 6.0:
return "π Developing - Good start, focus on applying more frameworks"
elif overall_score >= 5.0:
return "πͺ Building - Keep practicing, you're on the right track"
else:
return "π― Learning - Focus on fundamentals and PM best practices"
scores = {
'Factual Accuracy': 0,
'Relevance & Directness': 0,
'Structure & Clarity': 0
}
pattern = r"(Factual Accuracy|Relevance & Directness|Structure & Clarity):\s*\[?(\d{1,2})\]?\/10"
matches = re.findall(pattern, evaluation_text, re.IGNORECASE)
for match in matches:
category_name, score_value = match[0].strip(), int(match[1])
if category_name in scores:
scores[category_name] = score_value
print(f"π Parsed scores: {scores}")
return scores
def generate_holistic_feedback(full_interview_log):
prompt = f"""
You are a senior interview coach. Based on the entire Q&A log, provide a high-level "Overall Performance Summary" and an "Actionable Improvement Plan".
**FULL INTERVIEW LOG:** --- {full_interview_log} ---
"""
try:
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model=MODEL,
)
return chat_completion.choices[0].message.content
except Exception as e:
return "Could not generate holistic feedback due to an error." |