LeoWalker commited on
Commit
2e427ef
·
1 Parent(s): f04718d

setting up docker to be able to deploy the in streamlit app.

Browse files
.dockerignore ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ .Python
6
+ env/
7
+ venv/
8
+ .env
9
+ *.pdf
10
+ .git
11
+ .gitignore
12
+ .pytest_cache
13
+ .coverage
14
+ htmlcov/
15
+ dist/
16
+ build/
17
+ *.egg-info/
18
+ .DS_Store
.gitignore CHANGED
@@ -1,4 +1,6 @@
1
  .env
2
  __pycache__/
3
  hype_pack/depricated_files/
4
- .DS_Store
 
 
 
1
  .env
2
  __pycache__/
3
  hype_pack/depricated_files/
4
+ .DS_Store
5
+ audio_out/
6
+ .whisper
.vscode/settings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "python.analysis.typeCheckingMode": "basic"
3
+ }
Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.11 as base image (compatible with your Poetry requirements)
2
+ FROM python:3.11-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ build-essential \
10
+ curl \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # Install Poetry
14
+ RUN curl -sSL https://install.python-poetry.org | python3 -
15
+
16
+ # Copy only requirements first to leverage Docker cache
17
+ COPY pyproject.toml poetry.lock ./
18
+
19
+ # Configure Poetry to not create a virtual environment in the container
20
+ RUN poetry config virtualenvs.create false
21
+
22
+ # Install dependencies
23
+ RUN poetry install --no-dev --no-interaction --no-ansi
24
+
25
+ # Copy the rest of the application
26
+ COPY . .
27
+
28
+ # Expose the port Streamlit runs on
29
+ EXPOSE 8501
30
+
31
+ # Command to run the application
32
+ CMD ["poetry", "run", "streamlit", "run", "hype_pack/streamlit_app.py", "--server.address=0.0.0.0"]
docker-compose.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ services:
4
+ hypecast:
5
+ build: .
6
+ ports:
7
+ - "8501:8501"
8
+ volumes:
9
+ - .:/app
10
+ environment:
11
+ - OPENAI_API_KEY=${OPENAI_API_KEY}
12
+ - LMNT_API_KEY=${LMNT_API_KEY}
13
+ - GOOGLE_API_KEY=${GOOGLE_API_KEY}
hype_pack/states/__init__.py ADDED
File without changes
hype_pack/utils/nodes.py CHANGED
@@ -1,40 +1,387 @@
 
1
  from langchain_openai import ChatOpenAI
2
  from langchain.prompts import ChatPromptTemplate
3
- from hype_pack.utils.state import InterviewState, ReferenceMaterial
 
 
 
 
 
 
 
 
 
 
4
 
5
  def build_reference_material_node(interview_state: InterviewState) -> InterviewState:
6
  """
7
- Analyzes candidate background and job requirements to generate structured reference material.
8
  """
9
-
10
- # Initialize the LLM with structured output
11
  llm = ChatOpenAI(
12
  model="gpt-4o-mini",
13
  temperature=0.1
14
  ).with_structured_output(ReferenceMaterial)
15
 
16
- # Create a simple prompt
17
  prompt = ChatPromptTemplate.from_messages([
18
- ("system", "You are an expert career analyst. Analyze the provided information to generate a structured analysis."),
 
 
 
 
 
 
 
 
 
 
 
 
19
  ("human", """
20
- Resume: {resume}
21
- Personal Info: {personal}
22
- Job Description: {job}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  """)
24
  ])
25
 
26
- # Get structured output directly
27
  reference_material = llm.invoke(prompt.format_messages(
28
- resume=interview_state.user_inital_input.resume_text,
29
- personal=interview_state.user_inital_input.personal_text or "",
30
- job=interview_state.user_inital_input.job_text or ""
31
  ))
32
 
33
- # Convert ReferenceMaterial instance to a dictionary if needed
34
  if isinstance(reference_material, ReferenceMaterial):
35
- reference_material = reference_material.model_dump()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- # Update state with reference material only
38
- interview_state.reference_material = reference_material
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  return interview_state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
  from langchain_openai import ChatOpenAI
3
  from langchain.prompts import ChatPromptTemplate
4
+ from hype_pack.utils.state import InterviewState, ReferenceMaterial, QuestionList, HypeCastTranscript
5
+ import asyncio
6
+ import os
7
+ from dotenv import load_dotenv
8
+ from lmnt.api import Speech
9
+ import time
10
+
11
+
12
+ load_dotenv()
13
+
14
+
15
 
16
  def build_reference_material_node(interview_state: InterviewState) -> InterviewState:
17
  """
18
+ Analyzes candidate background to generate material for motivational speeches.
19
  """
 
 
20
  llm = ChatOpenAI(
21
  model="gpt-4o-mini",
22
  temperature=0.1
23
  ).with_structured_output(ReferenceMaterial)
24
 
 
25
  prompt = ChatPromptTemplate.from_messages([
26
+ ("system", """You are an expert at identifying compelling personal narratives
27
+ and motivational elements from people's backgrounds. Focus on finding:
28
+ 1. Authentic stories that demonstrate growth and resilience
29
+ 2. Genuine sources of pride and motivation
30
+ 3. Clear connections between past experiences and future aspirations
31
+ 4. Unique elements that make their story compelling
32
+
33
+ Your output MUST include:
34
+ - A core narrative about their journey
35
+ - Key achievements that showcase their potential
36
+ - Specific challenges they've overcome
37
+ - Clear connections between their background and target role
38
+ - Values demonstrated through their experiences"""),
39
  ("human", """
40
+ Analyze this person's background to identify elements for a motivational speech:
41
+
42
+ Resume Content:
43
+ {resume}
44
+
45
+ Additional Personal Information:
46
+ {personal}
47
+
48
+ Target Position:
49
+ {job}
50
+
51
+ Provide a complete analysis including:
52
+ 1. Core narrative about their journey
53
+ 2. Key achievements
54
+ 3. Challenges overcome
55
+ 4. IMPORTANT: Specific connections between their background and target role
56
+ 5. Values demonstrated through their experiences
57
  """)
58
  ])
59
 
 
60
  reference_material = llm.invoke(prompt.format_messages(
61
+ resume=interview_state.user_initial_input.resume_text,
62
+ personal=interview_state.user_initial_input.personal_text or "",
63
+ job=interview_state.user_initial_input.job_text or ""
64
  ))
65
 
 
66
  if isinstance(reference_material, ReferenceMaterial):
67
+ interview_state.reference_material = reference_material
68
+ else:
69
+ interview_state.reference_material = ReferenceMaterial(**reference_material)
70
+
71
+ return interview_state
72
+
73
+ def generate_questions_node(interview_state: InterviewState) -> InterviewState:
74
+ """
75
+ Generates questions and manages the question history.
76
+ """
77
+ llm = ChatOpenAI(
78
+ model="gpt-4o-mini",
79
+ temperature=0.35
80
+ ).with_structured_output(QuestionList)
81
+
82
+ # Get existing question texts to avoid duplicates
83
+ existing_questions = set()
84
+ if interview_state.qa_history is None:
85
+ interview_state.qa_history = QuestionList(questions=[])
86
 
87
+ for q in interview_state.qa_history.questions:
88
+ existing_questions.add(q.question_text)
89
+
90
+ prompt = ChatPromptTemplate.from_messages([
91
+ ("system", """Generate 2-3 focused questions that reveal what motivates
92
+ this person. Each question should have 3 distinct choices."""),
93
+ ("human", """
94
+ Reference Material:
95
+ {reference_material}
96
+
97
+ Previous Questions Asked:
98
+ {previous_questions}
99
+
100
+ Create new questions that:
101
+ - Are different from previous questions
102
+ - Focus on motivation and confidence
103
+ - Connect to their background
104
+ """)
105
+ ])
106
+
107
+ new_questions = llm.invoke(prompt.format_messages(
108
+ reference_material=interview_state.reference_material,
109
+ previous_questions="\n".join([
110
+ f"Q: {q.question_text}"
111
+ for q in (interview_state.qa_history.questions or [])
112
+ ])
113
+ ))
114
+
115
+ # Filter out any duplicate questions and append new ones
116
+ unique_new_questions = [
117
+ q for q in new_questions.questions
118
+ if q.question_text not in existing_questions
119
+ ]
120
+
121
+ # Update qa_history, initializing if None
122
+ if interview_state.qa_history is None:
123
+ interview_state.qa_history = None
124
+ interview_state.qa_history.questions.extend(unique_new_questions)
125
 
126
  return interview_state
127
+
128
+ def generate_transcript_node(interview_state: InterviewState, speaker_profile: dict) -> InterviewState:
129
+ """
130
+ Generates a concise, TTS-friendly motivational speech.
131
+ """
132
+ llm = ChatOpenAI(
133
+ model="gpt-4o-mini",
134
+ temperature=0.6
135
+ ).with_structured_output(HypeCastTranscript)
136
+
137
+ prompt = ChatPromptTemplate.from_messages([
138
+ ("system", f"""You are speaking directly TO the candidate about why they should be excited about THIS specific opportunity.
139
+
140
+ Adopt these speaking patterns:
141
+ {speaker_profile['signature_language_patterns']}
142
+
143
+ Follow this message structure:
144
+ {speaker_profile['core_message_structure']}
145
+
146
+ Essential Rules:
147
+ - Speak directly TO them ("you" and "your")
148
+ - Connect THEIR specific experiences to the role's requirements
149
+ - Highlight where their background perfectly matches the opportunity
150
+ - Build excitement about how they're already prepared for this role
151
+ - Keep it natural and conversational
152
+ - Limit to 2 minutes (about 250-300 words)
153
+ - Use only periods and commas for punctuation
154
+
155
+ Absolutely Avoid:
156
+ - Generic motivation without specific connections
157
+ - Any personal stories from you
158
+ - Markdown or formatting
159
+ - Line breaks within sentences
160
+ - Speaking as if you are them
161
+ - Audience-style questions
162
+ - Any text not meant to be spoken
163
+
164
+ Remember: Your goal is to make them see how perfectly their experience
165
+ aligns with this role and why they should be excited about this specific
166
+ opportunity."""),
167
+ ("human", """
168
+ Their Background and Experience:
169
+ {reference_material}
170
+
171
+ Their Motivations and Goals:
172
+ {qa_history}
173
+
174
+ Create an energetic, personal talk that shows them why they're perfect for this role.
175
+ """)
176
+ ])
177
+
178
+ transcript = llm.invoke(prompt.format_messages(
179
+ reference_material=interview_state.reference_material,
180
+ qa_history=interview_state.qa_history
181
+ ))
182
+
183
+ interview_state.transcript = transcript
184
+
185
+ return interview_state
186
+
187
+ # def build_reference_material_node(interview_state: InterviewState) -> InterviewState:
188
+ # """
189
+ # Analyzes candidate background and job requirements to generate structured reference material.
190
+ # """
191
+
192
+ # # Initialize the LLM with structured output
193
+ # llm = ChatOpenAI(
194
+ # model="gpt-4o-mini",
195
+ # temperature=0.1
196
+ # ).with_structured_output(ReferenceMaterial)
197
+
198
+ # # Create a simple prompt
199
+ # prompt = ChatPromptTemplate.from_messages([
200
+ # ("system", "You are an expert career analyst. Analyze the provided information to generate a structured analysis."),
201
+ # ("human", """
202
+ # Resume: {resume}
203
+ # Personal Info: {personal}
204
+ # Job Description: {job}
205
+ # """)
206
+ # ])
207
+
208
+ # # Get structured output directly
209
+ # reference_material = llm.invoke(prompt.format_messages(
210
+ # resume=interview_state.user_inital_input.resume_text,
211
+ # personal=interview_state.user_inital_input.personal_text or "",
212
+ # job=interview_state.user_inital_input.job_text or ""
213
+ # ))
214
+
215
+ # # Convert ReferenceMaterial instance to a dictionary if needed
216
+ # if isinstance(reference_material, ReferenceMaterial):
217
+ # reference_material = reference_material.model_dump()
218
+
219
+ # # Update state with reference material only
220
+ # interview_state.reference_material = reference_material
221
+
222
+ # return interview_state
223
+
224
+ # def generate_questions_node(interview_state: InterviewState) -> InterviewState:
225
+ # """
226
+ # Generates relevant interview questions based on the candidate's background and previous Q&A history,
227
+ # along with generated answers in case the user doesn't respond.
228
+ # """
229
+
230
+ # # Initialize the LLM with structured output
231
+ # llm = ChatOpenAI(
232
+ # model="gpt-4o-mini",
233
+ # temperature=0.35
234
+ # ).with_structured_output(QAPair)
235
+
236
+ # # Build context about previous questions asked
237
+ # previous_qa_context = ""
238
+ # if interview_state.qa_history:
239
+ # previous_qa_context = "\nPreviously asked questions and answers:\n"
240
+ # for qa in interview_state.qa_history:
241
+ # previous_qa_context += f"Q: {qa.question}\nA: {qa.answer}\n"
242
+
243
+ # # Create a prompt for generating questions and answers
244
+ # prompt = ChatPromptTemplate.from_messages([
245
+ # ("system", "You are an enthusiastic technical recruiter. Generate relevant interview questions that have not been asked before, along with a suggested answer."),
246
+ # ("human", """
247
+ # Use the following reference material to inform your question:
248
+ # {reference_material}
249
+
250
+ # {previous_qa_context}
251
+
252
+ # Please generate new questions that have not been asked yet, and provide a suggested answer for each question.
253
+ # """)
254
+ # ])
255
+
256
+ # # Get structured output directly
257
+ # question_pair = llm.invoke(prompt.format_messages(
258
+ # reference_material=interview_state.reference_material,
259
+ # previous_qa_context=previous_qa_context
260
+ # ))
261
+
262
+ # # Convert QAPair instance to a dictionary if needed
263
+ # if isinstance(question_pair, QAPair):
264
+ # question_pair = question_pair.model_dump()
265
+
266
+ # # Append the new QAPair to the QA history
267
+ # interview_state.qa_history.append(question_pair)
268
+
269
+ # return interview_state
270
+
271
+ # def generate_transcript_node(interview_state: InterviewState, speaker_profile: dict) -> InterviewState:
272
+ # """
273
+ # Generates a high-energy podcast transcript based on the candidate's background,
274
+ # previous Q&A history, and motivational speaking style.
275
+ # """
276
+
277
+ # # Initialize the LLM with structured output
278
+ # llm = ChatOpenAI(
279
+ # model="gpt-4o-mini",
280
+ # temperature=0.6
281
+ # ).with_structured_output(HypeCastTranscript) # Expecting a string output for the transcript
282
+
283
+ # # Convert the speaker profile dictionary to a string
284
+ # speaker_profile_str = "\n".join(f"{key}: {value.strip()}" for key, value in speaker_profile.items())
285
+
286
+ # # Create a prompt for generating the transcript
287
+ # prompt = ChatPromptTemplate.from_messages([
288
+ # ("system", f"""You are an AI simulating a motivational speaker delivering a one-on-one motivational talk.
289
+ # Use the following profile to guide your communication style:
290
+ # \n{speaker_profile_str}
291
+
292
+ # CORE RULES:
293
+ # 1. Address the listener (candidate) directly as "you" throughout
294
+ # 2. Never narrate about the candidate in third person
295
+ # 3. Never address an imaginary audience
296
+ # 4. Maintain an intimate, personal conversation
297
+ # 5. Use the reference material to inform your motivation, not to tell their story back to them
298
+ # 6. NEVER include structural headers or section markers
299
+ # 7. NEVER use formatting markers like [INTRO], [PROBLEM], etc.
300
+ # 8. NEVER use **Speaker:** or similar labels
301
+ # 9. Deliver the speech as pure conversational text
302
+ # 10. Only use bold (**) for occasional emphasis of key words, not sections
303
+
304
+ # SPEAKING GUIDELINES:
305
+ # - CORRECT: "When you took on that challenging project - that moment when you had to step up..."
306
+ # - CORRECT: "Your unique background, the way you've combined different experiences - that's what makes you special..."
307
+ # - INCORRECT: "[INTRO] Speaker: Let me tell you..."
308
+ # - INCORRECT: "**Section 1:** Your journey..."
309
+ # - INCORRECT: "[PROBLEM IDENTIFICATION]"
310
+
311
+ # STRUCTURE:
312
+ # The speech should flow naturally without visible structure markers, transitioning smoothly between:
313
+ # 1. Direct acknowledgment
314
+ # 2. Personal connection
315
+ # 3. Building intensity
316
+ # 4. Call to action
317
+
318
+ # TONE:
319
+ # - Intimate and personal, as if speaking one-on-one
320
+ # - High energy but focused entirely on the individual
321
+ # - Draw directly from their experiences to build motivation
322
+ # - End with a specific call to action about their current opportunity
323
+
324
+ # The speaker should deliver a continuous, uninterrupted flow of motivational speech without any formatting or structural markers.
325
+ # """),
326
+ # ("human", """
327
+ # REFERENCE_MATERIAL: {reference_material}
328
+ # QA_HISTORY: {qa_history}
329
+
330
+ # Please generate a transcript using the provided reference material and QA history.
331
+ # """)
332
+ # ])
333
+ # print(f"\n\n prompt: {prompt} \n\n")
334
+ # # Get structured output directly
335
+ # transcript = llm.invoke(prompt.format_messages(
336
+ # reference_material=interview_state.reference_material,
337
+ # qa_history=interview_state.qa_history
338
+ # ))
339
+
340
+ # # Update the state with the generated transcript
341
+ # interview_state.transcript = transcript
342
+
343
+ # return interview_state
344
+
345
+ async def text_to_speech_node(interview_state: InterviewState) -> InterviewState:
346
+ """
347
+ Converts the generated transcript to speech using LMNT API.
348
+ This node should be executed after the transcript generation.
349
+ """
350
+ if not interview_state.transcript or not interview_state.transcript.content:
351
+ print("No transcript found to convert to speech")
352
+ return interview_state
353
+
354
+ try:
355
+ async with Speech(os.getenv('LMNT_API_KEY')) as s:
356
+ synthesize = await s.synthesize(
357
+ text=interview_state.transcript.content,
358
+ voice='a84f1be9-3db1-4a83-8d9b-1b8b7357d52d', # Example voice ID
359
+ language='en',
360
+ format='mp3'
361
+ )
362
+
363
+ # Create output filename using timestamp
364
+ timestamp = int(time.time())
365
+ output_filename = f"hype_cast_{timestamp}.mp3"
366
+
367
+ # Write the audio data
368
+ with open(output_filename, 'wb') as f:
369
+ f.write(synthesize)
370
+
371
+ # Store the audio file path in the interview state
372
+ interview_state.audio_file_path = output_filename
373
+ print(f'Audio saved to {output_filename}')
374
+
375
+ except Exception as e:
376
+ print(f"Error in text-to-speech conversion: {str(e)}")
377
+ interview_state.audio_file_path = None
378
+
379
+ return interview_state
380
+
381
+ def run_text_to_speech_node(interview_state: InterviewState) -> InterviewState:
382
+ """
383
+ Synchronous wrapper for the async text_to_speech_node.
384
+ This allows the node to be used in the same way as other nodes in the pipeline.
385
+ """
386
+ return asyncio.run(text_to_speech_node(interview_state))
387
+
hype_pack/utils/speaker_profiles.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tony_robbins = {
2
+ "core_message_structure": """
3
+ - Problem identification → Personal story → Universal connection → Solution → Action steps
4
+ - Uses progressive intensity building through repetition
5
+ - Creates emotional peaks followed by practical solutions
6
+ """,
7
+
8
+ "engagement_questions": """
9
+ - How many of you have ever...?
10
+ - What would happen if...?
11
+ - Who here is ready to...?
12
+ - What's been stopping you from...?
13
+ - If you could change one thing right now...
14
+ """,
15
+
16
+ "signature_language_patterns": """
17
+ - Uses "state" language (In a beautiful/resourceful/powerful state)
18
+ - Time urgency phrases (Right now!, In this moment, The time is now)
19
+ - Certainty phrases (I know without a doubt, I promise you this)
20
+ - Action commands (Stand up!, Raise your hand if, Say yes!)
21
+ """,
22
+
23
+ "story_framework": """
24
+ - Opens with tension point
25
+ - Describes emotional state vividly
26
+ - Includes specific details about the moment of change
27
+ - Connects personal breakthrough to universal truth
28
+ - Ends with actionable insight
29
+ """
30
+ }
31
+
32
+ les_brown = {
33
+ "core_message_structure": """
34
+ - Acknowledgment of current pain → Personal story → Possibility → Call to action
35
+ - Uses "You have greatness within you" as recurring theme
36
+ - Builds through emotional crescendos
37
+ """,
38
+
39
+ "engagement_questions": """
40
+ - Can you say that with me?
41
+ - How many of you know what I'm talking about?
42
+ - Who feels me on this?
43
+ - What would your life be like if...?
44
+ """,
45
+
46
+ "signature_language_patterns": """
47
+ - Listen to me carefully...
48
+ - You don't have to be great to get started...
49
+ - Someone is waiting for you to...
50
+ - It's possible!
51
+ - That's what I'm talking about!
52
+ """,
53
+
54
+ "story_framework": """
55
+ - Starts with vulnerability
56
+ - Incorporates dialogue (especially with his mother)
57
+ - Uses repetition of key phrases
58
+ - Builds to emotional revelation
59
+ - Connects to immediate action step
60
+ """
61
+ }
62
+
63
+ eric_thomas = {
64
+ "core_message_structure": """
65
+ - Reality check → Challenge → Story → Higher standard → Call to action
66
+ - Uses contrast between current reality and potential
67
+ - Builds intensity through repetition and volume
68
+ """,
69
+
70
+ "engagement_questions": """
71
+ - Do you want it as bad as you want to breathe?
72
+ - What's your why?
73
+ - How bad do you want it?
74
+ - Are you ready to sacrifice?
75
+ """,
76
+
77
+ "signature_language_patterns": """
78
+ - When you want to succeed as bad as...
79
+ - I'm here to tell you...
80
+ - Average is over!
81
+ - You still ain't gonna get it because...
82
+ - I'm doing this while you're sleeping!
83
+ """,
84
+
85
+ "story_framework": """
86
+ - Uses personal hardship as foundation
87
+ - Incorporates modern cultural references
88
+ - Builds through progressive intensity
89
+ - Ends with direct challenge
90
+ """
91
+ }
92
+
93
+ simon_sinek = {
94
+ "core_message_structure": """
95
+ - Why → How → What framework
96
+ - Uses circular reasoning to reinforce main point
97
+ - Builds through logical progression
98
+ - Ends with call to leadership
99
+ """,
100
+
101
+ "engagement_questions": """
102
+ - What's your why?
103
+ - Imagine if...
104
+ - How many of you have experienced...?
105
+ - What would happen if we...?
106
+ - Why do you do what you do?
107
+ """,
108
+
109
+ "signature_language_patterns": """
110
+ - The goal is not to...
111
+ - Here's the thing...
112
+ - People don't buy what you do...
113
+ - Let me give you an example...
114
+ - The reason is simple...
115
+ """,
116
+
117
+ "story_framework": """
118
+ - Starts with counterintuitive observation
119
+ - Uses business case studies
120
+ - Connects seemingly unrelated concepts
121
+ - Builds through logical progression
122
+ - Ends with universal principle
123
+ """
124
+ }
hype_pack/utils/state.py CHANGED
@@ -1,44 +1,140 @@
1
- from pydantic import BaseModel, Field
2
- from typing import List, Optional
 
 
3
 
4
- class ReferenceMaterial(BaseModel):
5
- """Structured analysis of candidate background and target position."""
6
-
7
- personal_history_summary: str = Field(
8
- description="Summary of candidate's career background and key achievements"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  )
10
-
11
- aspiring_position_summary: str = Field(
12
- description="Overview of the target role and its key requirements"
13
  )
14
-
15
- personal_focus_points: List[str] = Field(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  default_factory=list,
17
- description="Key points highlighting candidate's relevant experiences and skills"
 
 
 
 
18
  )
19
 
20
- aspiring_position_focus_points: List[str] = Field(
 
 
 
 
 
21
  default_factory=list,
22
- description="Essential requirements and expectations of the target role"
23
  )
24
 
25
- class QAPair(BaseModel):
26
- """Individual question-answer interaction during the interview process."""
 
27
 
28
- question_id: int = Field(description="Unique identifier for the Q&A pair")
29
- question: str = Field(description="The question asked")
30
- answer: Optional[str] = Field(default=None, description="The generated or provided answer")
31
 
32
- class InitialInput(BaseModel):
33
- """Raw input from the user."""
34
- resume_text: str = Field(default="", description="Raw text extracted from the resume")
35
- personal_text: Optional[str] = Field(default=None, description="Additional personal information provided by the user")
36
- job_text: Optional[str] = Field(default=None, description="Job description or position-related text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  class InterviewState(BaseModel):
39
  """Current state of the interview process."""
40
- user_inital_input: InitialInput
41
  reference_material: Optional[ReferenceMaterial] = None
42
- qa_history: List[QAPair] = Field(default_factory=list)
43
- message_history: List[str] = Field(default_factory=list) # Consider using specific message types if applicable
44
- transcript: str = Field(default="", description="Generated podcast transcript as a single string")
 
 
 
 
 
1
+ from pydantic import BaseModel, Field, conlist
2
+ from langchain_core.messages import BaseMessage
3
+ from langgraph.graph.message import add_messages
4
+ from typing import List, Optional, Sequence, Annotated, Dict
5
 
6
+
7
+ class InitialInput(BaseModel):
8
+ """Raw input from the user."""
9
+ resume_text: str = Field(default="", description="Raw text extracted from the resume")
10
+ personal_text: Optional[str] = Field(default=None, description="Additional personal information provided by the user")
11
+ job_text: Optional[str] = Field(default=None, description="Job description or position-related text")
12
+
13
+ ### These are classes for the build_reference_material_node
14
+
15
+ class PersonalStory(BaseModel):
16
+ """Key moments that can be used as storytelling elements"""
17
+ challenge: str = Field(description="Specific challenge or obstacle faced")
18
+ action: str = Field(description="How they addressed the challenge")
19
+ result: str = Field(description="Outcome and impact of their actions")
20
+ lessons_learned: str = Field(description="Key takeaways from this experience")
21
+
22
+
23
+ class MotivationalElements(BaseModel):
24
+ """Core elements that drive the person"""
25
+ key_values: List[str] = Field(
26
+ description="Personal and professional values demonstrated in their history"
27
  )
28
+ proud_moments: List[str] = Field(
29
+ description="Achievements they speak about with genuine enthusiasm"
 
30
  )
31
+ impact_areas: List[str] = Field(
32
+ description="Areas where they've made meaningful differences"
33
+ )
34
+
35
+ class RoleConnection(BaseModel):
36
+ """Structured connection between experience and target role"""
37
+ experience: str = Field(description="Relevant past experience")
38
+ role_requirement: str = Field(description="Matching requirement in target role")
39
+ strength_level: str = Field(description="How strongly this experience matches")
40
+
41
+ class ReferenceMaterial(BaseModel):
42
+ """Essential elements for creating a motivational speech"""
43
+
44
+ core_narrative: str = Field(
45
+ description="The main theme that emerges from their background and aspirations"
46
+ )
47
+
48
+ compelling_stories: List[PersonalStory] = Field(
49
  default_factory=list,
50
+ description="Key stories that can be used to illustrate their journey"
51
+ )
52
+
53
+ motivation_profile: MotivationalElements = Field(
54
+ description="Elements that genuinely motivate the person"
55
  )
56
 
57
+ role_summary: str = Field(
58
+ description="A summary of the target role and its key requirements"
59
+ )
60
+
61
+
62
+ target_role_connections: List[RoleConnection] = Field(
63
  default_factory=list,
64
+ description="Clear connections between their experiences and the target role"
65
  )
66
 
67
+ authenticity_markers: List[str] = Field(
68
+ description="Genuine aspects of their personality and experience that make their story unique"
69
+ )
70
 
 
 
 
71
 
72
+ # class ReferenceMaterial(BaseModel):
73
+ # """Structured analysis of candidate background and target position."""
74
+
75
+ # personal_history_summary: str = Field(
76
+ # description="Summary of candidate's career background and key achievements"
77
+ # )
78
+
79
+ # aspiring_position_summary: str = Field(
80
+ # description="Overview of the target role and its key requirements"
81
+ # )
82
+
83
+ # personal_focus_points: List[str] = Field(
84
+ # default_factory=list,
85
+ # description="Key points highlighting candidate's relevant experiences and skills"
86
+ # )
87
+
88
+ # aspiring_position_focus_points: List[str] = Field(
89
+ # default_factory=list,
90
+ # description="Essential requirements and expectations of the target role"
91
+ # )
92
 
93
+ ### These are the classes for the generate_questions_node
94
+
95
+ class AnswerChoice(BaseModel):
96
+ """Single answer choice for a question"""
97
+ text: str = Field(description="The answer option text")
98
+ category: str = Field(description="Simple category this answer aligns with")
99
+
100
+ class Question(BaseModel):
101
+ """Question with multiple choice options"""
102
+ question_text: str = Field(description="The main question to be asked")
103
+ context: str = Field(description="Brief context from their background")
104
+ choices: Annotated[List[AnswerChoice],
105
+ conlist(AnswerChoice, min_length=3, max_length=3)
106
+ ] = Field(description="Three possible answer choices")
107
+ user_answer: str = Field(description="The user's answer to the question", default="")
108
+
109
+ # Make sure Question is properly defined as a Pydantic model
110
+ class QuestionList(BaseModel):
111
+ questions: List[Question]
112
+
113
+ # class QAPair(BaseModel):
114
+ # """Individual question-answer interaction during the interview process."""
115
+
116
+ # question_id: int = Field(description="Unique identifier for the Q&A pair")
117
+ # question: str = Field(description="The question asked")
118
+ # answer: Optional[str] = Field(default=None, description="The generated or provided answer")
119
+
120
+ ## These are used for the transcript generation node
121
+
122
+ class HypeCastTranscript(BaseModel):
123
+ """Motivational speech transcript"""
124
+ content: str = Field(
125
+ description="The complete motivational speech as a single flowing conversation"
126
+ )
127
+
128
+
129
+ ### This is the main state class
130
  class InterviewState(BaseModel):
131
  """Current state of the interview process."""
132
+ user_initial_input: InitialInput
133
  reference_material: Optional[ReferenceMaterial] = None
134
+ qa_history: Optional[QuestionList] = None
135
+ transcript: Optional[HypeCastTranscript] = None
136
+ messages: Annotated[Sequence[BaseMessage], add_messages] = [] # Add this line
137
+ audio_file_path: str | None = None
138
+
139
+ class Config:
140
+ arbitrary_types_allowed = True # Enable arbitrary types for BaseMessage
poetry.lock CHANGED
The diff for this file is too large to render. See raw diff
 
pyproject.toml CHANGED
@@ -5,24 +5,30 @@ description = "AI-powered career matching system"
5
  authors = ["Leo Walker <leowalker89@gmail.com>"]
6
 
7
  [tool.poetry.dependencies]
8
- python = ">=3.9,<3.12"
9
  langchain = ">=0.3.0,<0.4.0"
10
  langchain-openai = ">=0.2.0"
11
  langchain-anthropic = ">=0.2.0,<0.3.0"
12
  langchain-google-genai = ">=0.0.5"
13
  langchain-community = ">=0.3.0,<0.4.0"
 
14
  pypdf = "^3.17.1"
15
  pydantic = ">=2.0.0,<3.0.0"
16
  python-dotenv = "^1.0.0"
17
  openai = ">=1.6.1"
18
  lmnt = "^0.1.0"
19
  pydub = "^0.25.1"
 
 
20
 
21
 
22
  [tool.poetry.group.dev.dependencies]
23
  ipykernel = "^6.29.5"
24
  notebook = "^7.2.2"
25
 
 
 
 
26
  [build-system]
27
  requires = ["poetry-core"]
28
  build-backend = "poetry.core.masonry.api"
 
5
  authors = ["Leo Walker <leowalker89@gmail.com>"]
6
 
7
  [tool.poetry.dependencies]
8
+ python = ">3.9.7,<3.12"
9
  langchain = ">=0.3.0,<0.4.0"
10
  langchain-openai = ">=0.2.0"
11
  langchain-anthropic = ">=0.2.0,<0.3.0"
12
  langchain-google-genai = ">=0.0.5"
13
  langchain-community = ">=0.3.0,<0.4.0"
14
+ langgraph = "*"
15
  pypdf = "^3.17.1"
16
  pydantic = ">=2.0.0,<3.0.0"
17
  python-dotenv = "^1.0.0"
18
  openai = ">=1.6.1"
19
  lmnt = "^0.1.0"
20
  pydub = "^0.25.1"
21
+ PyPDF2 = "*"
22
+ streamlit = "^1.40.1"
23
 
24
 
25
  [tool.poetry.group.dev.dependencies]
26
  ipykernel = "^6.29.5"
27
  notebook = "^7.2.2"
28
 
29
+ [tool.poetry.scripts]
30
+ streamlit = "streamlit.cli:main"
31
+
32
  [build-system]
33
  requires = ["poetry-core"]
34
  build-backend = "poetry.core.masonry.api"