| |
| """ |
| Complete Safe Conversational Startup Journey Advisor |
| - All testing and validation features restored |
| - CSP-safe implementation |
| - Async model loading to prevent startup blocking |
| - Full conversation intelligence with proper error handling |
| """ |
|
|
| import os |
| import json |
| import secrets |
| import asyncio |
| import threading |
| from typing import Dict, List, Any, Optional |
| from datetime import datetime |
| import torch |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| from passlib.context import CryptContext |
| from dotenv import load_dotenv |
|
|
| |
| from fastapi import FastAPI, Request, HTTPException, Depends, BackgroundTasks |
| from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials |
| from fastapi.middleware.cors import CORSMiddleware |
| from fastapi.responses import HTMLResponse |
|
|
| load_dotenv() |
|
|
| print("=" * 60) |
| print("π COMPLETE SAFE CONVERSATIONAL STARTUP ADVISOR") |
| print("=" * 60) |
|
|
| |
| SUPABASE_URL = os.getenv("SUPABASE_URL") |
| SUPABASE_SERVICE_KEY = os.getenv("SUPABASE_SERVICE_KEY") |
| HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
| print(f"π§ Supabase: {'β
Configured' if SUPABASE_URL and SUPABASE_SERVICE_KEY else 'β Missing'}") |
| print(f"π§ HF Token: {'β
Set' if HF_TOKEN else 'β Missing'}") |
|
|
| |
| SUPABASE_CONNECTED = False |
| supabase = None |
|
|
| if SUPABASE_URL and SUPABASE_SERVICE_KEY: |
| try: |
| from supabase import create_client, Client |
| supabase: Client = create_client(SUPABASE_URL, SUPABASE_SERVICE_KEY) |
| test_result = supabase.table("companies").select("count", count="exact").limit(1).execute() |
| SUPABASE_CONNECTED = True |
| print("β
Supabase connected and tested") |
| except Exception as e: |
| print(f"β οΈ Supabase connection failed: {e}") |
| SUPABASE_CONNECTED = False |
| else: |
| print("β οΈ Supabase not configured - using fallback mode") |
|
|
| |
| MODEL_NAME = "NousResearch/Hermes-2-Pro-Mistral-7B" |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| MODEL_LOADED = False |
| MODEL_LOADING = False |
| model = None |
| tokenizer = None |
|
|
| def load_model_async(): |
| """Load model in background thread""" |
| global MODEL_LOADED, MODEL_LOADING, model, tokenizer |
| |
| if MODEL_LOADING or MODEL_LOADED: |
| return |
| |
| MODEL_LOADING = True |
| print(f"π€ Starting async model loading: {MODEL_NAME} on {device}...") |
| |
| try: |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HF_TOKEN, trust_remote_code=True) |
| model = AutoModelForCausalLM.from_pretrained( |
| MODEL_NAME, token=HF_TOKEN, torch_dtype=torch.float16 if device == "cuda" else torch.float32, |
| device_map="auto" if device == "cuda" else None, trust_remote_code=True, |
| load_in_8bit=True if device == "cuda" else False |
| ) |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
| model.eval() |
| MODEL_LOADED = True |
| print("β
Model loaded successfully in background!") |
| except Exception as e: |
| print(f"β Model loading failed: {e}") |
| MODEL_LOADED = False |
| finally: |
| MODEL_LOADING = False |
|
|
| |
| model_thread = threading.Thread(target=load_model_async, daemon=True) |
| model_thread.start() |
|
|
| |
| |
| |
|
|
| @app.on_event("startup") |
| async def startup_event(): |
| """Startup event handler""" |
| print("π Application startup complete") |
| print("π‘ Server is ready to accept requests") |
| print("π€ Model loading in background...") |
|
|
| @app.on_event("shutdown") |
| async def shutdown_event(): |
| """Shutdown event handler""" |
| print("π Application shutting down...") |
| print("πΎ Cleaning up resources...") |
|
|
| |
| |
| |
|
|
| class ConversationIntelligence: |
| """Complete conversation tracking with validation and testing""" |
| |
| def __init__(self): |
| self.topics_discussed = set() |
| self.questions_asked = [] |
| self.conversation_quality_scores = [] |
| self.response_validation_logs = [] |
| |
| def analyze_conversation_depth(self, conversation_history: List[Dict]) -> Dict[str, Any]: |
| """Deep conversation analysis with full validation""" |
| |
| if not conversation_history: |
| return { |
| "has_history": False, |
| "user_level": "unknown", |
| "main_concern": "unknown", |
| "conversation_stage": "beginning", |
| "topics_covered": [], |
| "last_real_need": None, |
| "avoid_repeating": [], |
| "context_summary": "This is our first interaction.", |
| "quality_score": 1.0, |
| "validation_status": "no_history" |
| } |
| |
| |
| user_responses = [ex.get('user_query', '') for ex in conversation_history] |
| ai_responses = [ex.get('model_response', '') for ex in conversation_history] |
| combined_user_text = ' '.join(user_responses).lower() |
| combined_ai_text = ' '.join(ai_responses).lower() |
| |
| |
| user_level = self.detect_user_level_validated(combined_user_text) |
| |
| |
| main_concern, concern_confidence = self.extract_real_concern_scored(conversation_history) |
| |
| |
| topics_covered = self.extract_and_validate_topics(conversation_history) |
| |
| |
| advice_given = self.extract_advice_given_comprehensive(ai_responses) |
| |
| |
| quality_score = self.assess_conversation_quality(conversation_history) |
| |
| |
| context_summary = self.create_intelligent_summary_validated(conversation_history) |
| |
| |
| last_ai_question = self.extract_last_question_validated(conversation_history) |
| |
| return { |
| "has_history": True, |
| "user_level": user_level, |
| "main_concern": main_concern, |
| "concern_confidence": concern_confidence, |
| "conversation_stage": self.identify_conversation_stage_detailed(conversation_history), |
| "topics_covered": topics_covered, |
| "last_real_need": self.extract_last_need_validated(conversation_history), |
| "avoid_repeating": advice_given, |
| "context_summary": context_summary, |
| "last_ai_question": last_ai_question, |
| "quality_score": quality_score, |
| "validation_status": "analyzed", |
| "conversation_flow": self.analyze_flow_detailed(conversation_history), |
| "continuity_score": self.calculate_continuity_score(conversation_history) |
| } |
| |
| def detect_user_level_validated(self, combined_text: str) -> str: |
| """Enhanced user level detection with validation""" |
| level_indicators = { |
| "complete_beginner": ['just have an idea', 'where do i start', 'how do i even', 'never done this', 'don\'t know'], |
| "early_stage": ['have a prototype', 'built something', 'some customers', 'testing', 'mvp'], |
| "growth_stage": ['scaling', 'growing', 'hiring', 'series', 'funding round'], |
| "experienced": ['raised', 'previous startup', 'sold', 'exit', 'portfolio'] |
| } |
| |
| scores = {} |
| for level, indicators in level_indicators.items(): |
| score = sum(1 for indicator in indicators if indicator in combined_text) |
| if score > 0: |
| scores[level] = score |
| |
| if not scores: |
| return "unclear" |
| |
| return max(scores, key=scores.get) |
| |
| def extract_real_concern_scored(self, conversation_history: List[Dict]) -> tuple[str, float]: |
| """Extract concern with confidence score""" |
| if not conversation_history: |
| return "unknown", 0.0 |
| |
| last_exchange = conversation_history[-1] |
| last_user_query = last_exchange.get('user_query', '').lower() |
| |
| concern_patterns = { |
| "communication_anxiety": (['how do i talk', 'how to talk', 'scared', 'nervous', 'don\'t know what to say'], 0.9), |
| "customer_discovery_mechanics": (['find customers', 'find people', 'who to talk to', 'where to find'], 0.8), |
| "overwhelmed_by_process": (['where do i start', 'too much', 'overwhelmed', 'don\'t know'], 0.7), |
| "validation_process": (['validate', 'test idea', 'prove', 'market research'], 0.8), |
| "funding_concerns": (['money', 'funding', 'investor', 'capital', 'raise'], 0.8) |
| } |
| |
| for concern, (patterns, base_confidence) in concern_patterns.items(): |
| matches = sum(1 for pattern in patterns if pattern in last_user_query) |
| if matches > 0: |
| confidence = min(base_confidence * matches, 1.0) |
| return concern, confidence |
| |
| return "general_guidance", 0.5 |
| |
| def extract_and_validate_topics(self, conversation_history: List[Dict]) -> List[str]: |
| """Extract topics with validation""" |
| topics = set() |
| |
| for exchange in conversation_history: |
| text = (exchange.get('user_query', '') + ' ' + exchange.get('model_response', '')).lower() |
| |
| if any(word in text for word in ['customer', 'user', 'interview', 'feedback']): |
| topics.add('customer_development') |
| if any(word in text for word in ['funding', 'investor', 'money', 'raise', 'capital']): |
| topics.add('fundraising') |
| if any(word in text for word in ['product', 'feature', 'build', 'develop', 'mvp']): |
| topics.add('product_development') |
| if any(word in text for word in ['marketing', 'growth', 'acquisition', 'launch']): |
| topics.add('marketing') |
| if any(word in text for word in ['team', 'hire', 'hiring', 'employee']): |
| topics.add('team_building') |
| |
| return list(topics) |
| |
| def extract_advice_given_comprehensive(self, ai_responses: List[str]) -> List[str]: |
| """Comprehensive advice tracking""" |
| advice_given = [] |
| combined_text = ' '.join(ai_responses).lower() |
| |
| advice_patterns = { |
| 'market_research': ['market research', 'research the market', 'analyze the market'], |
| 'business_plan': ['business plan', 'create a plan', 'planning'], |
| 'social_media': ['social media', 'linkedin', 'twitter', 'facebook'], |
| 'landing_page': ['landing page', 'website', 'web page'], |
| 'customer_interviews': ['customer interviews', 'talk to customers', 'interview'], |
| 'networking': ['networking', 'events', 'meetups', 'conferences'], |
| 'mvp': ['mvp', 'minimum viable product', 'prototype'] |
| } |
| |
| for advice_type, patterns in advice_patterns.items(): |
| if any(pattern in combined_text for pattern in patterns): |
| advice_given.append(advice_type) |
| |
| return advice_given |
| |
| def assess_conversation_quality(self, conversation_history: List[Dict]) -> float: |
| """Assess overall conversation quality""" |
| if not conversation_history: |
| return 1.0 |
| |
| quality_factors = [] |
| |
| |
| topics_per_exchange = [] |
| for i, exchange in enumerate(conversation_history): |
| topics = self.extract_and_validate_topics([exchange]) |
| topics_per_exchange.append(len(topics)) |
| |
| |
| topic_consistency = 1.0 |
| if len(topics_per_exchange) > 1: |
| topic_jumps = sum(1 for i in range(1, len(topics_per_exchange)) |
| if topics_per_exchange[i] != topics_per_exchange[i-1]) |
| topic_consistency = max(0.5, 1.0 - (topic_jumps * 0.2)) |
| |
| quality_factors.append(topic_consistency) |
| |
| |
| relevance_score = 1.0 |
| for i in range(1, len(conversation_history)): |
| prev_query = conversation_history[i-1].get('user_query', '').lower() |
| curr_response = conversation_history[i-1].get('model_response', '').lower() |
| |
| |
| if len(prev_query) > 10 and len(curr_response) > 10: |
| common_words = set(prev_query.split()) & set(curr_response.split()) |
| if len(common_words) < 2: |
| relevance_score -= 0.1 |
| |
| quality_factors.append(max(0.0, relevance_score)) |
| |
| return sum(quality_factors) / len(quality_factors) if quality_factors else 1.0 |
| |
| def calculate_continuity_score(self, conversation_history: List[Dict]) -> float: |
| """Calculate conversation continuity score""" |
| if len(conversation_history) < 2: |
| return 1.0 |
| |
| continuity_score = 1.0 |
| |
| for i in range(1, len(conversation_history)): |
| prev_response = conversation_history[i-1].get('model_response', '').lower() |
| curr_query = conversation_history[i].get('user_query', '').lower() |
| |
| |
| if '?' in prev_response: |
| |
| if any(word in curr_query for word in ['yes', 'no', 'yeah', 'nope']): |
| continuity_score += 0.1 |
| |
| |
| prev_topics = self.extract_and_validate_topics([conversation_history[i-1]]) |
| curr_topics = self.extract_and_validate_topics([conversation_history[i]]) |
| |
| if prev_topics and curr_topics: |
| topic_overlap = len(set(prev_topics) & set(curr_topics)) |
| if topic_overlap == 0: |
| continuity_score -= 0.2 |
| |
| return max(0.0, min(1.0, continuity_score)) |
| |
| def extract_last_question_validated(self, conversation_history: List[Dict]) -> Optional[str]: |
| """Extract and validate last AI question""" |
| if not conversation_history: |
| return None |
| |
| last_exchange = conversation_history[-1] |
| last_ai_response = last_exchange.get('model_response', '') |
| |
| |
| sentences = last_ai_response.split('?') |
| if len(sentences) > 1: |
| |
| for sentence in reversed(sentences[:-1]): |
| question = sentence.strip() |
| if len(question) > 10: |
| |
| question_parts = question.split('.') |
| if question_parts: |
| return question_parts[-1].strip() + '?' |
| |
| return None |
| |
| def create_intelligent_summary_validated(self, conversation_history: List[Dict]) -> str: |
| """Create validated intelligent summary""" |
| if not conversation_history: |
| return "Starting fresh conversation" |
| |
| analysis = self.analyze_conversation_depth(conversation_history) |
| |
| summary_parts = [] |
| |
| |
| summary_parts.append(f"User is {analysis['user_level']}") |
| |
| |
| if analysis['main_concern'] != 'unknown': |
| summary_parts.append(f"struggling with {analysis['main_concern']}") |
| |
| |
| if analysis['topics_covered']: |
| summary_parts.append(f"discussed {', '.join(analysis['topics_covered'])}") |
| |
| |
| if analysis['avoid_repeating']: |
| summary_parts.append(f"already covered {', '.join(analysis['avoid_repeating'])}") |
| |
| |
| if analysis['quality_score'] < 0.7: |
| summary_parts.append("conversation needs refocusing") |
| |
| return ". ".join(summary_parts).capitalize() + "." |
| |
| def identify_conversation_stage_detailed(self, conversation_history: List[Dict]) -> str: |
| """Detailed conversation stage identification""" |
| num_exchanges = len(conversation_history) |
| |
| if num_exchanges == 0: |
| return "initial_contact" |
| elif num_exchanges == 1: |
| return "understanding_needs" |
| elif num_exchanges <= 3: |
| return "clarifying_and_advising" |
| elif num_exchanges <= 6: |
| return "deep_guidance" |
| else: |
| return "ongoing_mentorship" |
| |
| def extract_last_need_validated(self, conversation_history: List[Dict]) -> Optional[str]: |
| """Extract and validate user's most recent need""" |
| if not conversation_history: |
| return None |
| |
| last_query = conversation_history[-1].get('user_query', '').lower() |
| |
| need_patterns = { |
| "wants_conversation_scripts": ['how do i talk', 'what do i say', 'conversation'], |
| "wants_specific_tactics": ['how to find', 'where to find', 'specific ways'], |
| "admits_no_progress": ['haven\'t yet', 'not really', 'haven\'t done'], |
| "seeks_validation": ['is this right', 'am i doing', 'should i'], |
| "needs_encouragement": ['struggling', 'hard', 'difficult', 'stuck'] |
| } |
| |
| for need, patterns in need_patterns.items(): |
| if any(pattern in last_query for pattern in patterns): |
| return need |
| |
| return None |
| |
| def analyze_flow_detailed(self, conversation_history: List[Dict]) -> str: |
| """Detailed conversation flow analysis""" |
| if not conversation_history: |
| return "starting" |
| |
| last_exchange = conversation_history[-1] |
| user_query = last_exchange.get('user_query', '').lower() |
| |
| flow_indicators = { |
| "answering_question": ['yes', 'no', 'yeah', 'nope', 'sure', 'definitely'], |
| "asking_clarification": ['what do you mean', 'can you explain', 'how do i'], |
| "expressing_frustration": ['but', 'however', 'struggling', 'difficult'], |
| "showing_agreement": ['exactly', 'right', 'that makes sense', 'i see'], |
| "changing_topic": ['also', 'another thing', 'what about'], |
| "seeking_specifics": ['specific', 'example', 'exactly how'] |
| } |
| |
| for flow_type, indicators in flow_indicators.items(): |
| if any(indicator in user_query for indicator in indicators): |
| return flow_type |
| |
| return "continuing" |
|
|
| |
| conv_intel = ConversationIntelligence() |
|
|
| |
| |
| |
|
|
| def create_validated_prompt(user_query: str, company_context: Dict[str, Any], conversation_history: List[Dict] = None) -> str: |
| """Create prompts with full validation and testing""" |
| |
| |
| analysis = conv_intel.analyze_conversation_depth(conversation_history or []) |
| |
| stage = company_context.get('stage', 'startup') |
| industry = company_context.get('industry', 'tech') |
| |
| |
| print(f"π§ ANALYSIS: Level={analysis['user_level']}, Concern={analysis['main_concern']}, Quality={analysis['quality_score']:.2f}") |
| |
| |
| if not analysis["has_history"]: |
| |
| system_prompt = f"""You are a startup advisor meeting a {stage} {industry} founder for the first time. |
| |
| INSTRUCTIONS: |
| - Give specific, helpful advice about their question |
| - Be professional but warm and approachable |
| - Keep response to 2-3 sentences + one relevant follow-up question |
| - Establish rapport and show expertise |
| |
| USER QUESTION: {user_query}""" |
| |
| else: |
| |
| concern = analysis['main_concern'] |
| user_level = analysis['user_level'] |
| last_question = analysis.get('last_ai_question') |
| conversation_flow = analysis['conversation_flow'] |
| |
| if conversation_flow == "answering_question" and last_question: |
| |
| system_prompt = f"""CRITICAL: You are continuing a conversation. You asked: "{last_question}" |
| |
| CONTEXT: They just answered your question. Their response: "{user_query}" |
| |
| INSTRUCTIONS: |
| - ACKNOWLEDGE their specific answer to your question |
| - BUILD directly on what they just told you |
| - Give advice that relates to their answer |
| - Ask a natural follow-up about the SAME topic |
| - Show you're listening and building on the conversation |
| |
| AVOID repeating: {', '.join(analysis['avoid_repeating'])}""" |
| |
| elif user_level == "complete_beginner" and concern == "communication_anxiety": |
| |
| system_prompt = f"""You are a supportive startup mentor. User is a complete beginner with anxiety about customer conversations. |
| |
| CONTEXT: {analysis['context_summary']} |
| CURRENT QUESTION: "{user_query}" |
| |
| INSTRUCTIONS: |
| - Be encouraging and reduce their anxiety |
| - Give ONE simple, concrete step |
| - Make it feel less scary and more achievable |
| - Focus on emotional support + practical guidance |
| |
| AVOID repeating: {', '.join(analysis['avoid_repeating'])}""" |
| |
| elif concern == "overwhelmed_by_process": |
| |
| system_prompt = f"""You are a calming startup mentor. User feels overwhelmed by startup advice. |
| |
| CONTEXT: {analysis['context_summary']} |
| CURRENT QUESTION: "{user_query}" |
| |
| INSTRUCTIONS: |
| - Acknowledge there's too much conflicting advice |
| - Give them ONE single next step (not a list) |
| - Make it feel manageable and specific |
| - Reduce overwhelm, don't add to it |
| |
| AVOID repeating: {', '.join(analysis['avoid_repeating'])}""" |
| |
| else: |
| |
| system_prompt = f"""You are an experienced startup advisor in an ongoing conversation. |
| |
| CONTEXT: {analysis['context_summary']} |
| CURRENT QUESTION: "{user_query}" |
| CONVERSATION QUALITY: {analysis['quality_score']:.2f}/1.0 |
| |
| INSTRUCTIONS: |
| - Continue the conversation naturally |
| - Reference previous context when relevant |
| - Give practical, actionable advice |
| - Ask thoughtful follow-up questions |
| - Maintain conversation flow and continuity |
| |
| AVOID repeating: {', '.join(analysis['avoid_repeating'])}""" |
| |
| return f"""<|im_start|>system |
| {system_prompt} |
| <|im_end|> |
| <|im_start|>assistant""" |
|
|
| def validate_response_quality(response: str, analysis: Dict[str, Any], user_query: str) -> tuple[bool, str]: |
| """Comprehensive response quality validation""" |
| |
| validation_issues = [] |
| |
| |
| if len(response) < 20: |
| validation_issues.append("Response too short") |
| |
| if len(response) > 500: |
| validation_issues.append("Response too long") |
| |
| |
| response_lower = response.lower() |
| for avoided_topic in analysis.get('avoid_repeating', []): |
| topic_words = avoided_topic.replace('_', ' ').split() |
| if all(word in response_lower for word in topic_words): |
| validation_issues.append(f"Repeated avoided topic: {avoided_topic}") |
| |
| |
| if analysis.get('has_history') and analysis.get('last_ai_question'): |
| last_question = analysis['last_ai_question'].lower() |
| if 'angel' in last_question and 'angel' not in response_lower and 'investor' not in response_lower: |
| validation_issues.append("Lost topic continuity") |
| |
| |
| generic_phrases = ['market research', 'business plan', 'social media strategy'] |
| if analysis.get('user_level') == 'complete_beginner': |
| for phrase in generic_phrases: |
| if phrase in response_lower and phrase.replace(' ', '_') in analysis.get('avoid_repeating', []): |
| validation_issues.append(f"Generic advice: {phrase}") |
| |
| is_valid = len(validation_issues) == 0 |
| validation_summary = "; ".join(validation_issues) if validation_issues else "Response validated successfully" |
| |
| |
| conv_intel.response_validation_logs.append({ |
| "timestamp": datetime.now().isoformat(), |
| "is_valid": is_valid, |
| "issues": validation_issues, |
| "response_length": len(response), |
| "user_level": analysis.get('user_level'), |
| "main_concern": analysis.get('main_concern') |
| }) |
| |
| return is_valid, validation_summary |
|
|
| def generate_enhanced_fallback(user_query: str, company_context: Dict[str, Any], analysis: Dict[str, Any]) -> str: |
| """Enhanced fallback with comprehensive conversation awareness""" |
| |
| user_level = analysis.get('user_level', 'unknown') |
| concern = analysis.get('main_concern', 'unknown') |
| conversation_flow = analysis.get('conversation_flow', 'continuing') |
| last_question = analysis.get('last_ai_question') |
| |
| print(f"π FALLBACK: Level={user_level}, Concern={concern}, Flow={conversation_flow}") |
| |
| |
| if conversation_flow == "answering_question" and last_question: |
| |
| if 'no' in user_query.lower() or 'haven\'t' in user_query.lower(): |
| return f"""No problem at all - many founders haven't explored that yet. Since you mentioned that about {concern.replace('_', ' ')}, let me suggest a simple first step that won't feel overwhelming. |
| |
| What feels like the biggest barrier right now - time, knowing where to start, or something else?""" |
| |
| elif 'yes' in user_query.lower(): |
| return f"""Great! Since you have some experience with that, let's build on it. Based on what you just shared about {concern.replace('_', ' ')}, what specific challenge are you facing?""" |
| |
| else: |
| return f"""Thanks for that context. Based on what you're telling me, I think the key is to start simple and build momentum. |
| |
| What feels most urgent to tackle first?""" |
| |
| elif user_level == "complete_beginner": |
| if concern == "communication_anxiety": |
| return """I totally understand - talking to strangers about your idea feels scary! Here's the secret: don't pitch your idea at all. Just ask people about their problems. |
| |
| Try this: "Hey, what's your biggest frustration with [relevant area]?" Then just listen. No pressure to sell anything. |
| |
| What kind of people do you think struggle with the problem you want to solve?""" |
| |
| elif concern == "overwhelmed_by_process": |
| return """I know there's a million pieces of startup advice out there - ignore most of it for now. Here's your only focus this week: have conversations with 5 people about the problem you think you're solving. |
| |
| That's it. Not your solution, just the problem. |
| |
| Who's the first person you could talk to about this?""" |
| |
| elif concern == "customer_discovery_mechanics": |
| return """Since you're just starting out, here's the simplest approach: start with people you already know. Friends, family, coworkers - anyone who might face the problem you're solving. |
| |
| Ask: "Can I get your opinion on something? I'm trying to understand if [problem] is actually frustrating for people." |
| |
| Who in your network might deal with this issue?""" |
| |
| |
| topic_context = f" about {analysis.get('topics_covered', ['startup strategy'])[0]}" if analysis.get('topics_covered') else "" |
| |
| return f"""Starting with an idea is exactly where you should be! The key is validation through conversations before building anything{topic_context}. |
| |
| What feels like the biggest challenge right now - finding the right people to talk to or knowing what questions to ask them?""" |
|
|
| |
| |
| |
|
|
| pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") |
|
|
| def create_company(company_data: Dict[str, Any]) -> Dict[str, Any]: |
| """Create company with validation""" |
| required_fields = ["name", "stage", "industry"] |
| |
| |
| for field in required_fields: |
| if field not in company_data: |
| return {"success": False, "error": f"Missing required field: {field}"} |
| |
| |
| company_data["name"] = company_data["name"].strip()[:100] |
| |
| if not SUPABASE_CONNECTED: |
| api_key = f"demo-{secrets.token_urlsafe(16)}" |
| return { |
| "success": True, |
| "api_key": api_key, |
| "company": { |
| "id": f"demo-{secrets.token_urlsafe(8)}", |
| **company_data |
| }, |
| "message": "Demo mode - using fallback storage" |
| } |
| |
| try: |
| api_key = secrets.token_urlsafe(32) |
| |
| company_record = { |
| "name": company_data["name"], |
| "stage": company_data["stage"], |
| "industry": company_data["industry"], |
| "description": company_data.get("description", "")[:500], |
| "team_size": min(max(company_data.get("team_size", 1), 1), 10000), |
| "founded_year": company_data.get("founded_year"), |
| "api_key_hash": pwd_context.hash(api_key), |
| "created_at": datetime.utcnow().isoformat(), |
| "conversation_count": 0 |
| } |
| |
| result = supabase.table("companies").insert(company_record).execute() |
| |
| if result.data: |
| return { |
| "success": True, |
| "api_key": api_key, |
| "company": result.data[0] |
| } |
| else: |
| return {"success": False, "error": "Failed to create company record"} |
| |
| except Exception as e: |
| print(f"β Company creation error: {e}") |
| return {"success": False, "error": f"Database error: {str(e)}"} |
|
|
| def get_company_by_api_key(api_key: str) -> Optional[Dict[str, Any]]: |
| """Get company with validation""" |
| if not api_key or len(api_key) < 10: |
| return None |
| |
| if not SUPABASE_CONNECTED: |
| return { |
| "id": "demo-company", |
| "name": "Demo Company", |
| "stage": "idea", |
| "industry": "tech", |
| "team_size": 1 |
| } |
| |
| try: |
| companies = supabase.table("companies").select("*").execute() |
| |
| for company in companies.data: |
| if pwd_context.verify(api_key, company["api_key_hash"]): |
| return company |
| |
| return None |
| |
| except Exception as e: |
| print(f"β API key validation error: {e}") |
| return None |
|
|
| def get_conversation_history_validated(company_id: str, limit: int = 10) -> List[Dict]: |
| """Get conversation history with validation""" |
| if not company_id: |
| return [] |
| |
| if not SUPABASE_CONNECTED: |
| return [] |
| |
| try: |
| interactions = supabase.table("company_interactions")\ |
| .select("user_query, model_response, created_at")\ |
| .eq("company_id", company_id)\ |
| .order("created_at", desc=True)\ |
| .limit(min(max(limit, 1), 20))\ |
| .execute() |
| |
| if interactions.data: |
| |
| cleaned_data = [] |
| for interaction in interactions.data: |
| if interaction.get('user_query') and interaction.get('model_response'): |
| cleaned_data.append(interaction) |
| |
| return list(reversed(cleaned_data)) |
| return [] |
| |
| except Exception as e: |
| print(f"β Conversation history error: {e}") |
| return [] |
|
|
| async def track_interaction_async(company_id: str, interaction_data: Dict[str, Any]): |
| """Async interaction tracking with validation""" |
| if not SUPABASE_CONNECTED or not company_id: |
| return |
| |
| try: |
| |
| validated_data = { |
| "company_id": company_id, |
| "interaction_type": interaction_data.get("interaction_type", "chat")[:50], |
| "user_query": interaction_data.get("user_query", "")[:1000], |
| "model_response": interaction_data.get("model_response", "")[:2000], |
| "processing_time_ms": min(max(interaction_data.get("processing_time_ms", 0), 0), 30000), |
| "created_at": datetime.utcnow().isoformat(), |
| "quality_score": interaction_data.get("quality_score", 1.0), |
| "validation_status": interaction_data.get("validation_status", "unknown") |
| } |
| |
| supabase.table("company_interactions").insert(validated_data).execute() |
| |
| except Exception as e: |
| print(f"β Interaction tracking error: {e}") |
|
|
| def generate_ai_response_validated(user_query: str, company_context: Dict[str, Any], conversation_history: List[Dict] = None) -> tuple[str, Dict[str, Any]]: |
| """Generate AI response with full validation and testing""" |
| |
| start_time = datetime.now() |
| |
| |
| user_query = user_query.strip()[:500] |
| |
| |
| analysis = conv_intel.analyze_conversation_depth(conversation_history or []) |
| |
| |
| response = "" |
| validation_result = {"is_valid": False, "validation_summary": "Not generated"} |
| |
| if MODEL_LOADED and model and tokenizer: |
| try: |
| |
| prompt = create_validated_prompt(user_query, company_context, conversation_history) |
| |
| print(f"π― PROMPT: {prompt[:200]}...") |
| |
| inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1000) |
| if device == "cuda": |
| inputs = {k: v.to(device) for k, v in inputs.items()} |
| |
| with torch.no_grad(): |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=120, |
| do_sample=True, |
| temperature=0.7, |
| top_p=0.9, |
| repetition_penalty=1.1, |
| pad_token_id=tokenizer.eos_token_id, |
| eos_token_id=tokenizer.eos_token_id |
| ) |
| |
| response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True).strip() |
| |
| print(f"π€ MODEL RESPONSE: {response}") |
| |
| |
| is_valid, validation_summary = validate_response_quality(response, analysis, user_query) |
| validation_result = {"is_valid": is_valid, "validation_summary": validation_summary} |
| |
| print(f"β
VALIDATION: {validation_summary}") |
| |
| if not is_valid: |
| print("β οΈ Response failed validation, using enhanced fallback") |
| response = generate_enhanced_fallback(user_query, company_context, analysis) |
| validation_result = {"is_valid": True, "validation_summary": "Enhanced fallback used"} |
| |
| except Exception as e: |
| print(f"β Model generation error: {e}") |
| response = generate_enhanced_fallback(user_query, company_context, analysis) |
| validation_result = {"is_valid": True, "validation_summary": f"Fallback due to model error: {str(e)}"} |
| |
| else: |
| |
| response = generate_enhanced_fallback(user_query, company_context, analysis) |
| validation_result = {"is_valid": True, "validation_summary": "Enhanced fallback (model not loaded)"} |
| |
| processing_time = (datetime.now() - start_time).total_seconds() * 1000 |
| |
| |
| response_metadata = { |
| "analysis": analysis, |
| "validation": validation_result, |
| "processing_time_ms": int(processing_time), |
| "model_used": MODEL_LOADED, |
| "timestamp": datetime.now().isoformat() |
| } |
| |
| return response, response_metadata |
|
|
| |
| |
| |
|
|
| HTML_TEMPLATE = """ |
| <!DOCTYPE html> |
| <html lang="en"> |
| <head> |
| <meta charset="UTF-8"> |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> |
| <title>π Complete Safe Conversational Startup Advisor</title> |
| <script src="https://cdn.tailwindcss.com"></script> |
| <link rel="stylesheet" type="text/css" href="data:text/css, |
| .message { animation: slideIn 0.3s ease-out; } |
| @keyframes slideIn { from { opacity: 0; transform: translateY(10px); } to { opacity: 1; transform: translateY(0); } } |
| .typing-indicator { background: linear-gradient(90deg, #10b981, #3b82f6, #10b981); background-size: 200% 100%; animation: pulse 1.5s ease-in-out infinite; } |
| @keyframes pulse { 0%, 100% { background-position: 0% 50%; } 50% { background-position: 100% 50%; } } |
| .bounce { animation: bounce 1s infinite; } |
| @keyframes bounce { 0%, 100% { transform: translateY(-25%); animation-timing-function: cubic-bezier(0.8,0,1,1); } 50% { transform: none; animation-timing-function: cubic-bezier(0,0,0.2,1); } } |
| .debug-panel { font-family: 'Courier New', monospace; font-size: 0.75rem; } |
| "> |
| </head> |
| <body class="bg-gray-50 min-h-screen"> |
| <div class="container mx-auto px-4 py-8 max-w-6xl"> |
| <div class="bg-white rounded-lg shadow-lg overflow-hidden"> |
| <!-- Header --> |
| <div class="bg-gradient-to-r from-emerald-600 to-blue-600 text-white p-6"> |
| <h1 class="text-2xl font-bold">π Complete Safe Conversational Startup Advisor</h1> |
| <p class="mt-2 opacity-90">Full conversation intelligence with comprehensive testing and validation</p> |
| <div class="mt-3 flex items-center space-x-4 text-sm"> |
| <span id="modelStatus" class="opacity-75">Model: Loading...</span> |
| <span id="dbStatus" class="opacity-75">DB: Checking...</span> |
| </div> |
| </div> |
| |
| <!-- API Key Input --> |
| <div class="p-6 border-b bg-gray-50"> |
| <label class="block text-sm font-medium text-gray-700 mb-2">API Key</label> |
| <input type="password" id="apiKey" placeholder="Enter your API key" |
| class="w-full px-3 py-2 border border-gray-300 rounded-md focus:outline-none focus:ring-2 focus:ring-emerald-500"> |
| <p class="mt-2 text-sm text-gray-600">Don't have one? Create a company profile first.</p> |
| </div> |
| |
| <!-- Controls --> |
| <div class="p-4 bg-yellow-50 border-b flex flex-wrap gap-4 items-center"> |
| <label class="flex items-center"> |
| <input type="checkbox" id="debugMode" class="mr-2"> |
| <span class="text-sm text-yellow-800">Show conversation analysis</span> |
| </label> |
| <label class="flex items-center"> |
| <input type="checkbox" id="validationMode" class="mr-2"> |
| <span class="text-sm text-yellow-800">Show response validation</span> |
| </label> |
| <label class="flex items-center"> |
| <input type="checkbox" id="qualityMode" class="mr-2"> |
| <span class="text-sm text-yellow-800">Show quality metrics</span> |
| </label> |
| <button onclick="clearChat()" class="px-3 py-1 bg-red-500 text-white text-sm rounded hover:bg-red-600"> |
| Clear Chat |
| </button> |
| </div> |
| |
| <!-- Chat Container --> |
| <div id="chatContainer" class="h-96 overflow-y-auto p-6 space-y-4"> |
| <div class="message bg-emerald-50 p-4 rounded-lg"> |
| <div class="message-content"> |
| <strong>Hi! I'm your complete conversational startup advisor.</strong><br> |
| I have full conversation intelligence, comprehensive validation, and detailed testing. I never lose track of our conversation and build naturally on what you tell me.<br><br> |
| <em>What's on your mind today?</em> |
| </div> |
| </div> |
| </div> |
| |
| <!-- Message Input --> |
| <div class="p-6 border-t bg-white"> |
| <div class="flex space-x-3"> |
| <input type="text" id="messageInput" placeholder="Tell me what you're thinking about..." |
| class="flex-1 px-4 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-emerald-500" |
| onkeypress="if(event.key==='Enter') sendMessage()"> |
| <button onclick="sendMessage()" |
| class="px-6 py-2 bg-emerald-600 text-white rounded-lg hover:bg-emerald-700 focus:outline-none focus:ring-2 focus:ring-emerald-500"> |
| Send |
| </button> |
| </div> |
| <div class="mt-2 text-xs text-gray-500" id="statusText">Ready to chat</div> |
| </div> |
| </div> |
| </div> |
| |
| <script> |
| // Check system status |
| async function checkStatus() { |
| try { |
| const response = await fetch('/v1/health'); |
| const data = await response.json(); |
| |
| document.getElementById('modelStatus').textContent = |
| `Model: ${data.model_loaded ? 'β
Loaded' : 'β³ Loading'}`; |
| document.getElementById('dbStatus').textContent = |
| `DB: ${data.supabase_connected ? 'β
Connected' : 'β Offline'}`; |
| |
| if (!data.model_loaded) { |
| setTimeout(checkStatus, 2000); // Check again in 2 seconds |
| } |
| } catch (error) { |
| console.error('Status check failed:', error); |
| } |
| } |
| |
| // Enhanced message sending with validation |
| async function sendMessage() { |
| const apiKey = document.getElementById('apiKey').value.trim(); |
| const messageInput = document.getElementById('messageInput'); |
| const message = messageInput.value.trim(); |
| const debugMode = document.getElementById('debugMode').checked; |
| const validationMode = document.getElementById('validationMode').checked; |
| const qualityMode = document.getElementById('qualityMode').checked; |
| |
| if (!apiKey) { |
| alert('Please enter your API key first'); |
| return; |
| } |
| |
| if (!message) { |
| alert('Please enter a message'); |
| return; |
| } |
| |
| const chatContainer = document.getElementById('chatContainer'); |
| const statusText = document.getElementById('statusText'); |
| |
| // Add user message with XSS protection |
| const userMessageDiv = document.createElement('div'); |
| userMessageDiv.className = 'message user text-right'; |
| userMessageDiv.innerHTML = ` |
| <div class="inline-block bg-gray-100 p-3 rounded-lg max-w-xs lg:max-w-md text-sm"> |
| ${message.replace(/</g, '<').replace(/>/g, '>')} |
| </div> |
| `; |
| chatContainer.appendChild(userMessageDiv); |
| |
| messageInput.value = ''; |
| statusText.textContent = 'Processing with full validation...'; |
| |
| // Add typing indicator |
| const loadingId = 'loading-' + Date.now(); |
| const loadingDiv = document.createElement('div'); |
| loadingDiv.id = loadingId; |
| loadingDiv.className = 'message'; |
| loadingDiv.innerHTML = ` |
| <div class="typing-indicator inline-block bg-emerald-50 p-3 rounded-lg"> |
| <div class="flex space-x-1"> |
| <div class="w-2 h-2 bg-emerald-500 rounded-full bounce"></div> |
| <div class="w-2 h-2 bg-emerald-500 rounded-full bounce" style="animation-delay: 0.1s"></div> |
| <div class="w-2 h-2 bg-emerald-500 rounded-full bounce" style="animation-delay: 0.2s"></div> |
| </div> |
| </div> |
| `; |
| chatContainer.appendChild(loadingDiv); |
| chatContainer.scrollTop = chatContainer.scrollHeight; |
| |
| try { |
| const response = await fetch('/v1/chat/completions', { |
| method: 'POST', |
| headers: { |
| 'Content-Type': 'application/json', |
| 'Authorization': `Bearer ${apiKey}` |
| }, |
| body: JSON.stringify({ |
| messages: [{ role: 'user', content: message }], |
| debug: debugMode, |
| validation: validationMode, |
| quality: qualityMode |
| }) |
| }); |
| |
| const data = await response.json(); |
| |
| // Remove typing indicator |
| document.getElementById(loadingId).remove(); |
| |
| if (response.ok) { |
| const aiResponse = data.choices[0].message.content; |
| |
| // Create response div |
| const responseDiv = document.createElement('div'); |
| responseDiv.className = 'message'; |
| |
| let debugInfo = ''; |
| |
| // Add debug information if requested |
| if (debugMode && data.debug_info) { |
| debugInfo += ` |
| <div class="debug-panel bg-blue-50 p-3 rounded mb-3 border"> |
| <strong>π§ Conversation Analysis:</strong><br> |
| User Level: ${data.debug_info.user_level}<br> |
| Main Concern: ${data.debug_info.main_concern}<br> |
| Conversation Stage: ${data.debug_info.conversation_stage}<br> |
| Quality Score: ${data.debug_info.quality_score}/1.0<br> |
| Continuity Score: ${data.debug_info.continuity_score}/1.0<br> |
| Topics: ${data.debug_info.topics_covered.join(', ') || 'None'}<br> |
| Flow: ${data.debug_info.conversation_flow} |
| </div> |
| `; |
| } |
| |
| if (validationMode && data.validation_info) { |
| debugInfo += ` |
| <div class="debug-panel bg-green-50 p-3 rounded mb-3 border"> |
| <strong>β
Response Validation:</strong><br> |
| Status: ${data.validation_info.is_valid ? 'β
Valid' : 'β Failed'}<br> |
| Summary: ${data.validation_info.validation_summary}<br> |
| Processing Time: ${data.processing_time_ms}ms |
| </div> |
| `; |
| } |
| |
| if (qualityMode && data.quality_metrics) { |
| debugInfo += ` |
| <div class="debug-panel bg-purple-50 p-3 rounded mb-3 border"> |
| <strong>π Quality Metrics:</strong><br> |
| Response Length: ${aiResponse.length} chars<br> |
| Model Used: ${data.model_used ? 'AI Model' : 'Fallback'}<br> |
| Avoided Topics: ${data.quality_metrics.avoided_repeating || 'None'} |
| </div> |
| `; |
| } |
| |
| responseDiv.innerHTML = ` |
| <div class="bg-emerald-50 p-4 rounded-lg"> |
| ${debugInfo} |
| <div class="message-content text-sm"> |
| ${aiResponse.replace(/\n/g, '<br>').replace(/</g, '<').replace(/>/g, '>').replace(/<br>/g, '<br>')} |
| </div> |
| </div> |
| `; |
| |
| chatContainer.appendChild(responseDiv); |
| statusText.textContent = `Response generated (${data.processing_time_ms}ms)`; |
| |
| } else { |
| const errorDiv = document.createElement('div'); |
| errorDiv.className = 'message'; |
| errorDiv.innerHTML = ` |
| <div class="bg-red-50 p-4 rounded-lg text-red-700"> |
| β Error: ${data.detail || 'Something went wrong'} |
| </div> |
| `; |
| chatContainer.appendChild(errorDiv); |
| statusText.textContent = 'Error occurred'; |
| } |
| } catch (error) { |
| document.getElementById(loadingId).remove(); |
| |
| const errorDiv = document.createElement('div'); |
| errorDiv.className = 'message'; |
| errorDiv.innerHTML = ` |
| <div class="bg-red-50 p-4 rounded-lg text-red-700"> |
| β Network error: ${error.message} |
| </div> |
| `; |
| chatContainer.appendChild(errorDiv); |
| statusText.textContent = 'Network error'; |
| } |
| |
| chatContainer.scrollTop = chatContainer.scrollHeight; |
| } |
| |
| function clearChat() { |
| const chatContainer = document.getElementById('chatContainer'); |
| chatContainer.innerHTML = ` |
| <div class="message bg-emerald-50 p-4 rounded-lg"> |
| <div class="message-content"> |
| <strong>Chat cleared!</strong> I'm ready for a fresh conversation.<br><br> |
| <em>What would you like to discuss?</em> |
| </div> |
| </div> |
| `; |
| document.getElementById('statusText').textContent = 'Chat cleared - ready for new conversation'; |
| } |
| |
| // Initialize |
| document.getElementById('messageInput').focus(); |
| checkStatus(); |
| |
| // Auto-check status every 30 seconds |
| setInterval(checkStatus, 30000); |
| </script> |
| </body> |
| </html> |
| """ |
|
|
| |
| |
| |
|
|
| app = FastAPI( |
| title="π Complete Safe Conversational Startup Advisor", |
| description="Full-featured conversational AI with comprehensive testing and validation", |
| version="5.0.0" |
| ) |
|
|
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=["*"], |
| allow_credentials=True, |
| allow_methods=["*"], |
| allow_headers=["*"], |
| ) |
|
|
| security = HTTPBearer() |
|
|
| async def get_current_company(credentials: HTTPAuthorizationCredentials = Depends(security)) -> Dict[str, Any]: |
| company = get_company_by_api_key(credentials.credentials) |
| if not company: |
| raise HTTPException(status_code=401, detail="Invalid API key") |
| return company |
|
|
| @app.get("/", response_class=HTMLResponse) |
| async def root(): |
| """Main interface with complete conversational AI""" |
| return HTML_TEMPLATE |
|
|
| @app.post("/v1/companies") |
| async def api_create_company(request: Request): |
| """Create a new company with validation""" |
| body = await request.json() |
| result = create_company(body) |
| if result.get("success"): |
| return result |
| else: |
| raise HTTPException(status_code=400, detail=result.get("error")) |
|
|
| @app.post("/v1/chat/completions") |
| async def complete_chat_completions(request: Request, current_company: Dict = Depends(get_current_company), background_tasks: BackgroundTasks = None): |
| """Complete chat endpoint with full conversation intelligence and validation""" |
| body = await request.json() |
| user_query = body["messages"][-1]["content"] |
| |
| |
| conversation_history = get_conversation_history_validated(current_company["id"]) |
| |
| |
| ai_response, response_metadata = generate_ai_response_validated(user_query, current_company, conversation_history) |
| |
| |
| interaction_data = { |
| "interaction_type": "complete_conversational", |
| "user_query": user_query, |
| "model_response": ai_response, |
| "processing_time_ms": response_metadata["processing_time_ms"], |
| "quality_score": response_metadata["analysis"].get("quality_score", 1.0), |
| "validation_status": response_metadata["validation"]["validation_summary"] |
| } |
| |
| if background_tasks: |
| background_tasks.add_task(track_interaction_async, current_company["id"], interaction_data) |
| |
| |
| response_data = { |
| "id": f"chatcmpl-complete-{secrets.token_urlsafe(8)}", |
| "object": "chat.completion", |
| "model": "complete-conversational-advisor", |
| "choices": [{ |
| "index": 0, |
| "message": {"role": "assistant", "content": ai_response}, |
| "finish_reason": "stop" |
| }], |
| "usage": { |
| "prompt_tokens": len(user_query.split()), |
| "completion_tokens": len(ai_response.split()), |
| "total_tokens": len(user_query.split()) + len(ai_response.split()) |
| }, |
| "processing_time_ms": response_metadata["processing_time_ms"], |
| "model_used": response_metadata["model_used"], |
| "complete_features": True |
| } |
| |
| |
| if body.get("debug"): |
| response_data["debug_info"] = { |
| "user_level": response_metadata["analysis"].get("user_level"), |
| "main_concern": response_metadata["analysis"].get("main_concern"), |
| "conversation_stage": response_metadata["analysis"].get("conversation_stage"), |
| "quality_score": response_metadata["analysis"].get("quality_score"), |
| "continuity_score": response_metadata["analysis"].get("continuity_score"), |
| "topics_covered": response_metadata["analysis"].get("topics_covered", []), |
| "conversation_flow": response_metadata["analysis"].get("conversation_flow"), |
| "last_ai_question": response_metadata["analysis"].get("last_ai_question") |
| } |
| |
| if body.get("validation"): |
| response_data["validation_info"] = response_metadata["validation"] |
| |
| if body.get("quality"): |
| response_data["quality_metrics"] = { |
| "avoided_repeating": response_metadata["analysis"].get("avoid_repeating"), |
| "conversation_quality": response_metadata["analysis"].get("quality_score"), |
| "response_validation": response_metadata["validation"]["is_valid"] |
| } |
| |
| return response_data |
|
|
| @app.get("/v1/health") |
| async def comprehensive_health_check(): |
| """Comprehensive health check with full system status""" |
| uptime = datetime.utcnow() |
| |
| return { |
| "status": "healthy", |
| "timestamp": uptime.isoformat(), |
| "uptime": "running", |
| "model_loaded": MODEL_LOADED, |
| "model_loading": MODEL_LOADING, |
| "supabase_connected": SUPABASE_CONNECTED, |
| "conversation_intelligence": "active", |
| "validation_system": "active", |
| "response_quality_tracking": len(conv_intel.response_validation_logs), |
| "device": device, |
| "server_status": "stable", |
| "auto_reload": "disabled", |
| "features": [ |
| "conversation_continuity", |
| "user_level_detection", |
| "concern_analysis", |
| "response_validation", |
| "quality_metrics", |
| "async_model_loading", |
| "csp_safe_ui", |
| "production_ready" |
| ] |
| } |
|
|
| @app.get("/v1/status") |
| async def quick_status(): |
| """Quick status check for monitoring""" |
| return { |
| "server": "running", |
| "model": "loaded" if MODEL_LOADED else ("loading" if MODEL_LOADING else "not_loaded"), |
| "database": "connected" if SUPABASE_CONNECTED else "offline", |
| "timestamp": datetime.utcnow().isoformat() |
| } |
|
|
| @app.get("/v1/conversation/analytics/{company_id}") |
| async def get_conversation_analytics(company_id: str, current_company: Dict = Depends(get_current_company)): |
| """Get conversation analytics and quality metrics""" |
| if company_id != current_company["id"]: |
| raise HTTPException(status_code=403, detail="Access denied") |
| |
| conversation_history = get_conversation_history_validated(company_id, limit=20) |
| analysis = conv_intel.analyze_conversation_depth(conversation_history) |
| |
| return { |
| "company_id": company_id, |
| "conversation_count": len(conversation_history), |
| "overall_quality_score": analysis.get("quality_score", 1.0), |
| "continuity_score": analysis.get("continuity_score", 1.0), |
| "topics_discussed": analysis.get("topics_covered", []), |
| "user_progression": analysis.get("user_level", "unknown"), |
| "main_concerns_addressed": analysis.get("main_concern", "unknown"), |
| "validation_logs": conv_intel.response_validation_logs[-10:], |
| "recommendations": [ |
| "Continue conversation-based validation", |
| "Focus on user's specific concerns", |
| "Maintain topic continuity" |
| ] |
| } |
|
|
| if __name__ == "__main__": |
| import uvicorn |
| print("π Starting Complete Safe Conversational Startup Journey Advisor...") |
| print("π Open: http://localhost:8000") |
| print("π§ Full conversation intelligence active") |
| print("β
All validation and testing features enabled") |
| print("π CSP-safe implementation") |
| print("β‘ Async model loading to prevent blocking") |
| print("π§ Production mode - auto-reload disabled") |
| |
| |
| uvicorn.run( |
| app, |
| host="0.0.0.0", |
| port=8000, |
| reload=False, |
| access_log=True, |
| log_level="info" |
| ) |
|
|