| | from typing import List |
| | from langchain_openai import ChatOpenAI |
| | from langchain_groq import ChatGroq |
| | from langchain_fireworks import ChatFireworks |
| | from langchain_anthropic import ChatAnthropic |
| | |
| | |
| | from langchain.prompts import ChatPromptTemplate |
| | from hype_pack.utils.state import InterviewState, ReferenceMaterial, QuestionList, HypeCastTranscript |
| | from hype_pack.utils.speaker_profiles import speaker_voice_map |
| | import asyncio |
| | import os |
| | from dotenv import load_dotenv |
| | from lmnt.api import Speech |
| | import time |
| | import tempfile |
| | from langchain_core.tracers.context import tracing_v2_enabled |
| |
|
| | load_dotenv() |
| |
|
| | |
| | os.environ["LANGCHAIN_TRACING_V2"] = "true" |
| | os.environ["LANGCHAIN_PROJECT"] = "hypecast_generator" |
| |
|
| | def build_reference_material_node(interview_state: InterviewState) -> InterviewState: |
| | """ |
| | Analyzes candidate background to generate material for motivational speeches. |
| | """ |
| | with tracing_v2_enabled(tags=["reference_material"]): |
| | |
| | |
| | |
| | |
| | llm = ChatGroq( |
| | model="llama-3.1-70b-versatile", |
| | temperature=0.1 |
| | ).with_structured_output(ReferenceMaterial) |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | prompt = ChatPromptTemplate.from_messages([ |
| | ("system", """You are an expert at identifying compelling personal narratives |
| | and motivational elements from people's backgrounds. Focus on finding: |
| | 1. Authentic stories that demonstrate growth and resilience |
| | 2. Genuine sources of pride and motivation |
| | 3. Clear connections between past experiences and future aspirations |
| | 4. Unique elements that make their story compelling |
| | |
| | Your output MUST include: |
| | - A core narrative about their journey |
| | - Key achievements that showcase their potential |
| | - Specific challenges they've overcome |
| | - Clear connections between their background and target role |
| | - Values demonstrated through their experiences"""), |
| | ("human", """ |
| | Analyze this person's background to identify elements for a motivational speech: |
| | |
| | Resume Content: |
| | {resume} |
| | |
| | Additional Personal Information: |
| | {personal} |
| | |
| | Target Position: |
| | {job} |
| | |
| | Provide a complete analysis including: |
| | 1. Core narrative about their journey |
| | 2. Key achievements |
| | 3. Challenges overcome |
| | 4. IMPORTANT: Specific connections between their background and target role |
| | 5. Values demonstrated through their experiences |
| | """) |
| | ]) |
| |
|
| | reference_material = llm.invoke(prompt.format_messages( |
| | resume=interview_state.user_initial_input.resume_text, |
| | personal=interview_state.user_initial_input.personal_text or "", |
| | job=interview_state.user_initial_input.job_text or "" |
| | )) |
| |
|
| | if isinstance(reference_material, ReferenceMaterial): |
| | interview_state.reference_material = reference_material |
| | else: |
| | interview_state.reference_material = ReferenceMaterial(**reference_material) |
| |
|
| | return interview_state |
| |
|
| | def generate_questions_node(interview_state: InterviewState) -> InterviewState: |
| | """ |
| | Generates questions and manages the question history. |
| | """ |
| | with tracing_v2_enabled(tags=["questions"]): |
| | llm = ChatOpenAI( |
| | model="gpt-4o-mini", |
| | temperature=0.35 |
| | ).with_structured_output(QuestionList) |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | existing_questions = set() |
| | if interview_state.qa_history is None: |
| | interview_state.qa_history = QuestionList(questions=[]) |
| | |
| | for q in interview_state.qa_history.questions: |
| | existing_questions.add(q.question_text) |
| |
|
| | prompt = ChatPromptTemplate.from_messages([ |
| | ("system", """Generate 2-3 focused questions that reveal what motivates |
| | this person. Each question should have 3 distinct choices."""), |
| | ("human", """ |
| | Reference Material: |
| | {reference_material} |
| | |
| | Previous Questions Asked: |
| | {previous_questions} |
| | |
| | Create new questions that: |
| | - Are different from previous questions |
| | - Focus on motivation and confidence |
| | - Connect to their background |
| | """) |
| | ]) |
| |
|
| | new_questions = llm.invoke(prompt.format_messages( |
| | reference_material=interview_state.reference_material, |
| | previous_questions="\n".join([ |
| | f"Q: {q.question_text}" |
| | for q in (interview_state.qa_history.questions or []) |
| | ]) |
| | )) |
| |
|
| | |
| | unique_new_questions = [ |
| | q for q in new_questions.questions |
| | if q.question_text not in existing_questions |
| | ] |
| |
|
| | |
| | if interview_state.qa_history is None: |
| | interview_state.qa_history = None |
| | interview_state.qa_history.questions.extend(unique_new_questions) |
| |
|
| | return interview_state |
| |
|
| | def generate_transcript_node(interview_state: InterviewState, speaker_profile: dict) -> InterviewState: |
| | """ |
| | Generates a concise, TTS-friendly motivational speech. |
| | """ |
| | with tracing_v2_enabled(tags=["transcript"]): |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | llm = ChatAnthropic( |
| | model="claude-3-5-haiku-20241022", |
| | temperature=0.4 |
| | ).with_structured_output(HypeCastTranscript) |
| |
|
| | prompt = ChatPromptTemplate.from_messages([ |
| | ("system", f"""You are speaking directly TO the candidate about why they should be excited about THIS specific opportunity. |
| | |
| | Adopt these speaking patterns: |
| | {speaker_profile['signature_language_patterns']} |
| | |
| | Follow this message structure: |
| | {speaker_profile['core_message_structure']} |
| | Essential Rules: |
| | - Speak directly TO them ("you" and "your") |
| | - Connect THEIR specific experiences to the role's requirements |
| | - Highlight where their background perfectly matches the opportunity |
| | - Build excitement about how they're already prepared for this role |
| | - Keep it natural and conversational |
| | - Limit to 2 minutes (about 250-300 words) |
| | - Use natural punctuation: periods, commas, exclamation points, question marks |
| | - Include paragraph breaks for readability |
| | - Add appropriate pauses with em dashes (—) or ellipses (...) |
| | |
| | Absolutely Avoid: |
| | - Generic motivation without specific connections |
| | - Any personal stories from you |
| | - Special formatting (bold, italic, underline) |
| | - Special characters like * < > # [] |
| | - Speaking as if you are them |
| | - Audience-style questions |
| | - Any text not meant to be spoken |
| | |
| | Speech Formatting Guidelines: |
| | - Start new paragraphs for new topics |
| | - Break long sentences into natural speaking chunks |
| | - Use em dashes (—) for meaningful pauses |
| | - Use exclamation points sparingly for genuine excitement |
| | - Include question marks for rhetorical questions |
| | - Write numbers as words for better speech flow |
| | - Add ellipses (...) for thoughtful transitions |
| | |
| | Remember: Your goal is to make them see how perfectly their experience aligns with this role and why they should be excited about this specific opportunity. The text should be both easy to read on screen and natural to speak aloud. |
| | """), |
| | ("human", """ |
| | Their Background and Experience: |
| | {reference_material} |
| | |
| | Their Motivations and Goals: |
| | {qa_history} |
| | |
| | Create an energetic, personal talk that shows them why they're perfect for this role. |
| | """) |
| | ]) |
| |
|
| | transcript = llm.invoke(prompt.format_messages( |
| | reference_material=interview_state.reference_material, |
| | qa_history=interview_state.qa_history |
| | )) |
| |
|
| | interview_state.transcript = transcript |
| |
|
| | return interview_state |
| |
|
| |
|
| | import logging |
| |
|
| | |
| | logging.basicConfig(level=logging.DEBUG) |
| |
|
| | async def text_to_speech_node(interview_state: InterviewState, selected_speaker: str) -> InterviewState: |
| | """ |
| | Converts transcript to audio using LMNT's text-to-speech API and stores bytes in state. |
| | """ |
| | try: |
| | print("Starting text-to-speech conversion...") |
| | speech = Speech(api_key=os.getenv('LMNT_API_KEY')) |
| | |
| | speaker_key = selected_speaker.lower().replace(" ", "_") |
| | voice_id = speaker_voice_map.get(speaker_key) |
| | |
| | if not voice_id: |
| | raise ValueError(f"No voice mapping found for speaker: {selected_speaker}") |
| |
|
| | text_content = str(interview_state.transcript.content) |
| |
|
| |
|
| | async with speech as s: |
| | result = await s.synthesize( |
| | text=text_content, |
| | voice=voice_id, |
| | format='mp3', |
| | sample_rate=24000, |
| | speed=1.0 |
| | ) |
| | print(f"API Response received. Size: {len(result)} bytes") |
| | |
| | |
| | if isinstance(result, dict) and 'audio' in result: |
| | audio_bytes = result['audio'] |
| | else: |
| | audio_bytes = result |
| | |
| | if not audio_bytes or len(audio_bytes) < 100: |
| | raise ValueError(f"Invalid audio response size: {len(audio_bytes)} bytes") |
| | |
| | interview_state.audio_bytes = audio_bytes |
| | print(f"Successfully stored {len(audio_bytes)} bytes in interview_state") |
| | |
| | except Exception as e: |
| | print(f"Error in text-to-speech conversion: {str(e)}") |
| | logging.error(f"Error in text-to-speech conversion: {str(e)}") |
| | interview_state.audio_bytes = None |
| | |
| | st.error(f"Failed to generate audio: {str(e)}") |
| | |
| | return interview_state |
| |
|
| |
|
| | def run_text_to_speech_node(interview_state: InterviewState, selected_speaker: str) -> InterviewState: |
| | """ |
| | Synchronous wrapper for the async text_to_speech_node. |
| | """ |
| | try: |
| | return asyncio.run(text_to_speech_node(interview_state, selected_speaker)) |
| | except Exception as e: |
| | logging.error(f"Error in text_to_speech_node wrapper: {str(e)}") |
| | interview_state.audio_bytes = None |
| | return interview_state |
| |
|