Spaces:
Sleeping
Sleeping
| from typing import Dict, Any | |
| from app.agent.state import AgentState | |
| from app.services.llm import get_llm_service | |
| from app.services.file_generator import get_file_generator | |
| from app.models.question import Question, Answer | |
| from app.models.checklist import ChecklistItem | |
| def generate_initial_questions(state: AgentState) -> Dict[str, Any]: | |
| """Генерирует первые 3 вопроса для начала интервью""" | |
| llm = get_llm_service() | |
| questions_data = llm.generate_initial_questions() | |
| questions = [ | |
| Question(id=q["id"], text=q["text"]) | |
| for q in questions_data | |
| ] | |
| return { | |
| "current_questions": questions, | |
| "current_round": 1, | |
| "waiting_for_answers": True | |
| } | |
| def process_answers(state: AgentState) -> Dict[str, Any]: | |
| """Обрабатывает полученные ответы и создает Answer объекты""" | |
| transcripts = state.get("pending_transcripts", []) | |
| current_questions = state.get("current_questions", []) | |
| current_round = state.get("current_round", 1) | |
| all_answers = list(state.get("all_answers", [])) | |
| # Создаем Answer объекты из транскриптов | |
| for i, transcript in enumerate(transcripts): | |
| if i < len(current_questions): | |
| answer = Answer( | |
| question_id=current_questions[i].id, | |
| question_text=current_questions[i].text, | |
| audio_transcript=transcript, | |
| round_number=current_round | |
| ) | |
| all_answers.append(answer) | |
| return { | |
| "all_answers": all_answers, | |
| "pending_transcripts": [], | |
| "waiting_for_answers": False | |
| } | |
| def analyze_round(state: AgentState) -> Dict[str, Any]: | |
| """Анализирует ответы раунда и генерирует следующие вопросы или завершает""" | |
| llm = get_llm_service() | |
| current_round = state.get("current_round", 1) | |
| all_answers = state.get("all_answers", []) | |
| round_summaries = list(state.get("round_summaries", [])) | |
| # Анализируем раунд | |
| result = llm.analyze_round_and_generate_questions( | |
| round_number=current_round, | |
| all_answers=all_answers, | |
| round_summaries=round_summaries | |
| ) | |
| # Добавляем саммари раунда | |
| round_summaries.append(result.get("round_summary", "")) | |
| # Если это не последний раунд - генерируем следующие вопросы | |
| if current_round < state.get("max_rounds", 3): | |
| questions_data = result.get("questions", []) | |
| questions = [ | |
| Question(id=q["id"], text=q["text"]) | |
| for q in questions_data | |
| ] | |
| return { | |
| "current_questions": questions, | |
| "current_round": current_round + 1, | |
| "round_summaries": round_summaries, | |
| "waiting_for_answers": True, | |
| "is_complete": False | |
| } | |
| else: | |
| # Последний раунд - готовимся к генерации чеклиста | |
| return { | |
| "round_summaries": round_summaries, | |
| "waiting_for_answers": False, | |
| "is_complete": False | |
| } | |
| def generate_checklist(state: AgentState) -> Dict[str, Any]: | |
| """Генерирует финальный чеклист""" | |
| llm = get_llm_service() | |
| file_gen = get_file_generator() | |
| all_answers = state.get("all_answers", []) | |
| round_summaries = state.get("round_summaries", []) | |
| session_id = state.get("session_id", "unknown") | |
| # Генерируем чеклист | |
| result = llm.generate_checklist(all_answers, round_summaries) | |
| checklist_items = [ | |
| ChecklistItem(**item) | |
| for item in result.get("checklist", []) | |
| ] | |
| # Генерируем Markdown | |
| markdown = file_gen.generate_markdown( | |
| session_id=session_id, | |
| checklist=checklist_items, | |
| round_summaries=round_summaries | |
| ) | |
| return { | |
| "checklist_items": checklist_items, | |
| "markdown_content": markdown, | |
| "is_complete": True | |
| } | |
| def check_round_complete(state: AgentState) -> str: | |
| """Проверяет, нужно ли продолжать или завершать""" | |
| current_round = state.get("current_round", 1) | |
| max_rounds = state.get("max_rounds", 3) | |
| if current_round >= max_rounds: | |
| return "generate_checklist" | |
| else: | |
| return "wait_for_answers" | |