Product-ai / modules /llm_handler.py
Parimal Kalpande
this is new update
9753f0a
# modules/llm_handler.py
import os
import config
import re
from groq import Groq
from modules.web_search import search_for_example_answers
from modules.pm_frameworks import get_framework_suggestion, get_relevant_metrics
# Initialize the Groq client with proper error handling
def get_groq_client():
api_key = config.GROQ_API_KEY or os.environ.get("GROQ_API_KEY")
if not api_key:
raise ValueError("GROQ_API_KEY is required. Please set it in .env file or environment variables.")
return Groq(api_key=api_key)
try:
client = get_groq_client()
MODEL = config.GROQ_MODEL
print(f"βœ… Groq API connected successfully with model: {MODEL}")
except Exception as e:
print(f"❌ Failed to initialize Groq client: {e}")
client = None
MODEL = "llama3-8b-8192"
def generate_coaching_question(coaching_type, document_text, question_number):
"""Generate a personalized, interview-style product management coaching question based on resume."""
if not client:
return "I need to ask you a product management scenario, but there's an API connection issue."
# Base scenarios for each coaching type
base_scenarios = {
'Product Strategy & Vision': [
"developing product vision for a struggling mobile application",
"evaluating a strategic pivot from B2C to B2B",
"presenting product strategy to secure funding",
"responding to competitive threats strategically",
"integrating acquired startup products"
],
'Market Research & Analysis': [
"conducting competitive analysis after competitor launch",
"expanding into new geographic markets",
"resolving conflicting user research feedback",
"researching with limited budget constraints",
"validating demand for new product category"
],
'User Experience & Design Thinking': [
"redesigning high drop-off onboarding flow",
"investigating declining Net Promoter Score",
"balancing UX goals with technical constraints",
"prioritizing UX improvements across segments",
"leading design sprint for new experience"
],
'Product Roadmap Planning': [
"prioritizing features with limited engineering capacity",
"handling sales pressure vs roadmap alignment",
"communicating roadmap changes after setbacks",
"re-prioritizing due to security vulnerabilities",
"adjusting roadmap for aggressive growth targets"
],
'Metrics & Analytics': [
"investigating sudden drop in engagement metrics",
"defining success metrics for new premium tier",
"designing A/B test for conversion optimization",
"selecting North Star metric for product team",
"analyzing low-adoption, high-engagement features"
],
'Stakeholder Management': [
"navigating conflicting priorities across teams",
"managing stakeholder disagreement and escalation",
"building alignment across multiple teams",
"resolving timeline conflicts with engineering",
"handling customer churn threats and demands"
],
'Product Launch Strategy': [
"handling critical bugs close to launch",
"designing go-to-market for enterprise expansion",
"deciding on launch with mixed early metrics",
"adjusting strategy due to competitor timing",
"managing feature impact on core metrics"
],
'Competitive Analysis': [
"responding to well-funded direct competitor",
"analyzing acquisition threat in adjacent space",
"capitalizing on competitor retention struggles",
"competing against superior marketing with inferior product",
"defending against tech giant market entry"
],
'Feature Prioritization': [
"prioritizing with limited resources and high impact opportunities",
"balancing customer demands vs product vision",
"evaluating build vs buy vs partner decisions",
"weighing revenue features vs foundational improvements",
"deciding on resource allocation for new capabilities"
],
'Customer Development': [
"investigating low adoption of launched features",
"structuring customer interviews for validation",
"managing custom feature requests from major customers",
"addressing user confusion while maintaining power features",
"analyzing high new-user churn vs strong retention"
],
'Resume & Application Strategy': [
"positioning experience for fintech PM role transition",
"demonstrating quantified impact in interviews",
"addressing PM framework gaps during applications",
"showcasing PM skills from non-PM background",
"differentiating against FAANG-experienced candidates"
]
}
scenarios = base_scenarios.get(coaching_type, [
"solving complex product management challenge",
"making strategic product decision under pressure",
"applying PM frameworks to real-world scenario"
])
# Select base scenario
scenario_index = (question_number - 1) % len(scenarios)
base_scenario = scenarios[scenario_index]
# Create personalized prompt using resume context
if document_text and document_text.strip():
prompt = f"""You are an expert product management interviewer. Generate ONE realistic, challenging PM interview question that:
1. Focuses on: {base_scenario}
2. Is tailored to this candidate's background: {document_text[:1000]}
3. Coaching area: {coaching_type}
Create a specific scenario with:
- Realistic context (company size, industry, metrics)
- Clear constraints and timeline
- Multiple stakeholders involved
- Quantifiable elements (users, revenue, etc.)
Make it feel like a real PM interview question that considers their background. Keep it focused and actionable.
Example format: "You're the PM at [specific company type]. [Specific situation with numbers]. [Key challenge]. How do you approach this?"
Generate just the question scenario, no additional text:"""
else:
prompt = f"""You are an expert product management interviewer. Generate ONE realistic, challenging PM interview question about {base_scenario} in the {coaching_type} area.
Create a specific scenario with:
- Realistic context (company size, industry, metrics)
- Clear constraints and timeline
- Multiple stakeholders involved
- Quantifiable elements (users, revenue, etc.)
Example format: "You're the PM at [specific company type]. [Specific situation with numbers]. [Key challenge]. How do you approach this?"
Generate just the question scenario, no additional text:"""
try:
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model=MODEL,
temperature=0.7,
max_tokens=200
)
generated_question = chat_completion.choices[0].message.content.strip()
# Add context note if resume was provided
if document_text and document_text.strip():
context_note = "\n\nNote: Consider your background and experience when answering."
return generated_question + context_note
return generated_question
except Exception as e:
print(f"❌ Error generating personalized question: {e}")
# Fallback to basic scenario
fallback_questions = {
'Product Strategy & Vision': f"You're the PM for a mobile app with declining user engagement. How would you develop a new product vision to turn this around?",
'Market Research & Analysis': f"A competitor just launched a feature similar to yours. How do you conduct competitive analysis and determine your response?",
'Resume & Application Strategy': f"You're applying for a Senior PM role but lack direct PM experience. How do you position your background effectively?"
}
return fallback_questions.get(coaching_type, f"Walk me through how you'd approach {base_scenario} as a product manager.")
def evaluate_response(question, response, coaching_type):
"""Advanced product management coaching evaluation with comprehensive feedback."""
if not client:
return "Unable to provide feedback - API connection issue. Please check your Groq API key."
# Get relevant frameworks for this coaching type
suggested_frameworks = get_framework_suggestion('general', coaching_type)
framework_names = [fw['name'] for fw in suggested_frameworks[:2]] # Top 2 frameworks
# Get relevant metrics
relevant_metrics = get_relevant_metrics(coaching_type)[:3] # Top 3 metrics
# Advanced evaluation prompt with scoring criteria - requesting clean formatting
prompt = f"""
You are a world-class product management coach and former VP of Product at top tech companies.
Evaluate this product manager's response with the depth and insight of an experienced mentor.
SCENARIO: {question}
PM'S RESPONSE: {response}
COACHING AREA: {coaching_type}
Please provide a professional coaching evaluation using this EXACT structure (no markdown formatting, no ** or # symbols):
EVALUATION SCORES (Rate 1-10 for each):
β€’ Strategic Thinking: [X]/10 - How well they approached the big picture
β€’ Problem Analysis: [X]/10 - Depth of problem understanding
β€’ Framework Application: [X]/10 - Use of PM methodologies
β€’ Stakeholder Awareness: [X]/10 - Consideration of different perspectives
β€’ Execution Focus: [X]/10 - Practicality and actionability
β€’ Communication: [X]/10 - Clarity and structure of response
STRENGTHS:
[List 2-3 specific things they did well]
AREAS FOR GROWTH:
[List 2-3 specific areas for improvement]
FRAMEWORK RECOMMENDATIONS:
Consider applying: {', '.join(framework_names) if framework_names else 'RICE prioritization, Jobs-to-be-Done'}
KEY METRICS TO TRACK:
Focus on: {', '.join(relevant_metrics) if relevant_metrics else 'user engagement, business impact'}
ACTIONABLE NEXT STEPS:
[1-2 concrete actions they can take]
EXPERT INSIGHT:
[One advanced tip that a seasoned PM would know]
IMPORTANT: Use plain text formatting only - NO markdown symbols like ** or # or _. Keep it professional and readable.
"""
try:
chat_completion = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are an expert product management coach with 15+ years of experience. Provide detailed, actionable feedback using clean, professional formatting without markdown symbols."},
{"role": "user", "content": prompt}
],
model=MODEL,
temperature=0.7, # Slightly creative but consistent
max_tokens=1000 # Allow for comprehensive feedback
)
return chat_completion.choices[0].message.content
except Exception as e:
# Fallback response with framework suggestions
framework_text = f" Consider applying {framework_names[0]} framework" if framework_names else ""
metric_text = f" Focus on metrics like {relevant_metrics[0]}" if relevant_metrics else ""
return f"""
FEEDBACK: Great thinking on this scenario!{framework_text}.{metric_text}
STRENGTHS: You showed good product thinking and approached the problem systematically.
AREAS FOR GROWTH: Consider exploring more PM frameworks and being more specific about metrics and success criteria.
NEXT STEPS: Practice more scenarios in {coaching_type} and study relevant PM frameworks.
Note: Full AI evaluation unavailable - {str(e)}
"""
def generate_coaching_feedback(coaching_log, coaching_type, name):
"""Generate overall coaching feedback based on the entire session."""
responses_summary = "\n".join([
f"Scenario: {item['question'][:100]}...\nResponse: {item['response'][:200]}...\n"
for item in coaching_log
])
prompt = f"""
You are an expert product management coach providing a comprehensive development summary for {name} who just completed a coaching session on {coaching_type}.
SESSION SUMMARY:
{responses_summary}
Please provide a comprehensive coaching summary that includes:
1. **Overall Performance**: Key strengths demonstrated
2. **Growth Areas**: 3 specific areas for continued development
3. **Recommended Learning**: Books, frameworks, or practices to explore
4. **Action Plan**: Concrete next steps for skill development
5. **Encouragement**: Motivational closing focused on their PM journey
Make it personal, actionable, and encouraging for a product manager's growth.
"""
try:
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model=MODEL,
)
return chat_completion.choices[0].message.content
except Exception as e:
return f"Excellent work in this {coaching_type} coaching session, {name}! Continue practicing these scenarios and exploring product management frameworks to enhance your skills."
# Legacy function for backward compatibility
def evaluate_answer(question, answer):
"""Legacy function - redirects to evaluate_response for coaching context."""
return evaluate_response(question, answer, "General Product Management")
def parse_scores_from_evaluation(evaluation_text: str) -> dict:
"""Extract numerical scores from the advanced evaluation text."""
scores = {
'Strategic Thinking': 0,
'Problem Analysis': 0,
'Framework Application': 0,
'Stakeholder Awareness': 0,
'Execution Focus': 0,
'Communication': 0
}
# Updated regex patterns for the new scoring format
patterns = {
'Strategic Thinking': r'Strategic Thinking:\s*(\d+)/10',
'Problem Analysis': r'Problem Analysis:\s*(\d+)/10',
'Framework Application': r'Framework Application:\s*(\d+)/10',
'Stakeholder Awareness': r'Stakeholder Awareness:\s*(\d+)/10',
'Execution Focus': r'Execution Focus:\s*(\d+)/10',
'Communication': r'Communication:\s*(\d+)/10'
}
for category, pattern in patterns.items():
match = re.search(pattern, evaluation_text, re.IGNORECASE)
if match:
try:
scores[category] = int(match.group(1))
except (ValueError, IndexError):
scores[category] = 7 # Default score if parsing fails
return scores
def get_overall_score(scores_dict: dict) -> float:
"""Calculate overall score from individual category scores."""
if not scores_dict or not any(scores_dict.values()):
return 7.0 # Default score
total_score = sum(scores_dict.values())
max_possible = len(scores_dict) * 10
return round((total_score / max_possible) * 10, 1)
def get_score_interpretation(overall_score: float) -> str:
"""Provide interpretation of the overall score."""
if overall_score >= 9.0:
return "🌟 Exceptional - You demonstrated expert-level product management thinking!"
elif overall_score >= 8.0:
return "πŸš€ Excellent - Strong PM skills with minor areas for refinement"
elif overall_score >= 7.0:
return "βœ… Good - Solid foundation with clear growth opportunities"
elif overall_score >= 6.0:
return "πŸ“ˆ Developing - Good start, focus on applying more frameworks"
elif overall_score >= 5.0:
return "πŸ’ͺ Building - Keep practicing, you're on the right track"
else:
return "🎯 Learning - Focus on fundamentals and PM best practices"
scores = {
'Factual Accuracy': 0,
'Relevance & Directness': 0,
'Structure & Clarity': 0
}
pattern = r"(Factual Accuracy|Relevance & Directness|Structure & Clarity):\s*\[?(\d{1,2})\]?\/10"
matches = re.findall(pattern, evaluation_text, re.IGNORECASE)
for match in matches:
category_name, score_value = match[0].strip(), int(match[1])
if category_name in scores:
scores[category_name] = score_value
print(f"πŸ“Š Parsed scores: {scores}")
return scores
def generate_holistic_feedback(full_interview_log):
prompt = f"""
You are a senior interview coach. Based on the entire Q&A log, provide a high-level "Overall Performance Summary" and an "Actionable Improvement Plan".
**FULL INTERVIEW LOG:** --- {full_interview_log} ---
"""
try:
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model=MODEL,
)
return chat_completion.choices[0].message.content
except Exception as e:
return "Could not generate holistic feedback due to an error."