Spaces:
Runtime error
Runtime error
| import os | |
| import time | |
| import anthropic | |
| import streamlit as st | |
| from datetime import datetime | |
| from typing import Dict, List, Any, Tuple, Optional | |
| from langgraph.graph import StateGraph, END | |
| from langgraph.prebuilt import ToolNode | |
| import langchain | |
| from langchain_anthropic import ChatAnthropic | |
| from langchain_core.messages import HumanMessage, AIMessage | |
| from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder | |
| from langchain.tools.tavily_search import TavilySearchResults | |
| # Function to get Claude client | |
| def get_claude_client(): | |
| api_key = os.environ.get("ANTHROPIC_API_KEY", st.session_state.get("anthropic_api_key", "")) | |
| if not api_key: | |
| st.error("Please set your Anthropic API key in the settings.") | |
| return None | |
| return anthropic.Anthropic(api_key=api_key) | |
| # Function to get LangChain Claude client | |
| def get_langchain_claude(): | |
| api_key = os.environ.get("ANTHROPIC_API_KEY", st.session_state.get("anthropic_api_key", "")) | |
| if not api_key: | |
| st.error("Please set your Anthropic API key in the settings.") | |
| return None | |
| return ChatAnthropic( | |
| model="claude-3-7-sonnet-20250219", | |
| temperature=0.2, | |
| anthropic_api_key=api_key | |
| ) | |
| # Define state for the LangGraph agent workflow | |
| class ChapterWorkflowState(dict): | |
| """State for the chapter creation workflow.""" | |
| chapter_info: Dict | |
| research_results: Optional[str] = None | |
| selected_personas: List[str] = [] | |
| selection_rationale: str = "" | |
| persona_contributions: Dict[str, str] = {} | |
| final_chapter: Optional[str] = None | |
| error: Optional[str] = None | |
| # Get persona contribution | |
| def get_persona_contribution(persona_id: str, state: dict) -> dict: | |
| """Get contribution from a specific persona.""" | |
| client = get_claude_client() | |
| if not client: | |
| return {**state, "error": "Claude client not available"} | |
| if persona_id not in st.session_state.persona_library: | |
| return {**state, "error": f"Persona {persona_id} not found in library"} | |
| persona = st.session_state.persona_library[persona_id] | |
| system_prompt = persona["system_prompt"] | |
| chapter_info = state["chapter_info"] | |
| chapter_title = chapter_info.get("title", "Untitled Chapter") | |
| chapter_outline = chapter_info.get("outline", "No outline provided") | |
| book_context = f"Book Title: {st.session_state.book_data['title']}\nSubtitle: {st.session_state.book_data['subtitle']}" | |
| research_content = "" | |
| if state.get("research_results"): | |
| research_content = f"\n\nResearch Results:\n{state['research_results']}\n" | |
| # Target word count for each contribution | |
| target_words = "Your contribution should be around 1800-2200 words in length to ensure adequate depth and detail. This is important as we aim for each final chapter to be approximately 30 A4 pages (7500-9000 words)." | |
| try: | |
| # Log starting the contribution | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Requesting contribution from {persona['name']} for chapter '{chapter_title}'", | |
| "timestamp": time.time() | |
| }) | |
| message = client.messages.create( | |
| model="claude-3-7-sonnet-20250219", | |
| system=system_prompt, | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": f"You are contributing to a chapter for the book 'self.api' as the {persona['name']} persona.\n\n" | |
| f"Book Context:\n{book_context}\n\n" | |
| f"Chapter Title: {chapter_title}\n\n" | |
| f"Chapter Outline:\n{chapter_outline}\n{research_content}\n\n" | |
| f"{target_words}\n\n" | |
| f"Please provide your unique perspective and contribution to this chapter based on your expertise." | |
| } | |
| ], | |
| temperature=0.3, | |
| max_tokens=6000 | |
| ) | |
| # Extract the response | |
| contribution = message.content[0].text | |
| # Check that we got a valid contribution | |
| if not contribution or len(contribution.strip()) < 50: | |
| error_message = f"Received empty or very short contribution from {persona['name']}" | |
| st.session_state.thinking_logs.append({ | |
| "agent": persona["name"], | |
| "thought": error_message, | |
| "timestamp": time.time() | |
| }) | |
| return {**state, "error": error_message} | |
| # Log the contribution | |
| st.session_state.thinking_logs.append({ | |
| "agent": persona["name"], | |
| "thought": contribution, | |
| "timestamp": time.time() | |
| }) | |
| # Update the persona contributions in the state | |
| new_contributions = {**state.get("persona_contributions", {}), persona_id: contribution} | |
| # Log success | |
| word_count = len(contribution.split()) | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Received contribution from {persona['name']} ({word_count} words)", | |
| "timestamp": time.time() | |
| }) | |
| return {**state, "persona_contributions": new_contributions} | |
| except Exception as e: | |
| error_message = f"Error generating contribution for {persona['name']}: {str(e)}" | |
| st.session_state.thinking_logs.append({ | |
| "agent": persona["name"], | |
| "thought": error_message, | |
| "timestamp": time.time() | |
| }) | |
| return {**state, "error": error_message} | |
| # Select personas node | |
| def select_personas(state: dict) -> dict: | |
| """Select the most appropriate personas for the chapter.""" | |
| client = get_claude_client() | |
| if not client: | |
| return {**state, "error": "Claude client not available"} | |
| # Get all available personas | |
| all_personas = list(st.session_state.persona_library.values()) | |
| available_personas = [f"- {p['name']}: {p['description']}" for p in all_personas | |
| if p['id'] not in ['meta_agent', 'selector_agent']] | |
| # Create a mapping from name to ID for easier lookup | |
| name_to_id = {persona["name"].lower(): persona["id"] for persona in all_personas} | |
| # Prepare the persona selection prompt | |
| persona_list = "\n".join(available_personas) | |
| chapter_info = state["chapter_info"] | |
| chapter_title = chapter_info.get("title", "Untitled Chapter") | |
| chapter_outline = chapter_info.get("outline", "No outline provided") | |
| book_context = f"Book Title: {st.session_state.book_data['title']}\nSubtitle: {st.session_state.book_data['subtitle']}" | |
| num_personas = state.get("num_personas", 5) | |
| try: | |
| # Log that we're selecting personas | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Selecting {num_personas} personas for chapter '{chapter_title}'", | |
| "timestamp": time.time() | |
| }) | |
| message = client.messages.create( | |
| model="claude-3-7-sonnet-20250219", | |
| system=st.session_state.persona_library["selector_agent"]["system_prompt"], | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": f"Select the {num_personas} most appropriate personas for writing this book chapter:\n\n" | |
| f"Book Context:\n{book_context}\n\n" | |
| f"Chapter Title: {chapter_title}\n\n" | |
| f"Chapter Outline:\n{chapter_outline}\n\n" | |
| f"Available personas:\n{persona_list}\n\n" | |
| f"For each selected persona, briefly explain why they're appropriate for this chapter. " | |
| f"Return your selection in this format:\n" | |
| f"1. [Persona Name] - [Brief justification]\n" | |
| f"2. [Persona Name] - [Brief justification]\n" | |
| f"...\n" | |
| f"Important: Each persona name must EXACTLY match one of the following: " | |
| f"{', '.join([p['name'] for p in all_personas if p['id'] not in ['meta_agent', 'selector_agent']])}" | |
| } | |
| ], | |
| temperature=0.3, | |
| max_tokens=1000 | |
| ) | |
| # Extract the response | |
| response_text = message.content[0].text | |
| # Log the full response for debugging | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Selector agent returned:\n{response_text}", | |
| "timestamp": time.time() | |
| }) | |
| # Parse the response to get the persona names - improved extraction | |
| selected_personas = [] | |
| # Create a set of all persona names for fuzzy matching | |
| all_persona_names = {p["name"] for p in all_personas if p["id"] not in ["meta_agent", "selector_agent"]} | |
| # First try numbered format parsing (e.g., "1. Technical Architect - ...") | |
| for line in response_text.split("\n"): | |
| line = line.strip() | |
| if not line or len(line) < 3: | |
| continue | |
| # Check for numbered list items (1., 2., etc.) | |
| if line[0].isdigit() and line[1] == ".": | |
| # Extract everything after the number until the first dash | |
| if " - " in line: | |
| potential_name = line[2:line.find(" - ")].strip() | |
| # Try exact match first | |
| if potential_name in all_persona_names: | |
| persona_id = name_to_id.get(potential_name.lower()) | |
| if persona_id and persona_id not in selected_personas: | |
| selected_personas.append(persona_id) | |
| continue | |
| # If no exact match, try case-insensitive match | |
| for persona_name in all_persona_names: | |
| if potential_name.lower() == persona_name.lower(): | |
| persona_id = name_to_id.get(persona_name.lower()) | |
| if persona_id and persona_id not in selected_personas: | |
| selected_personas.append(persona_id) | |
| break | |
| # If no personas were found with the above method, try direct name matching throughout the text | |
| if not selected_personas: | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": "No personas found with numbered list parsing. Trying direct name matching.", | |
| "timestamp": time.time() | |
| }) | |
| for persona in all_personas: | |
| if persona["id"] in ["meta_agent", "selector_agent"]: | |
| continue | |
| if persona["name"] in response_text: | |
| selected_personas.append(persona["id"]) | |
| # Always include the meta agent | |
| if "meta_agent" not in selected_personas: | |
| selected_personas.append("meta_agent") | |
| # If we still don't have enough personas, add some default ones | |
| if len(selected_personas) < 2: # Less than 2 personas (including meta_agent) | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Not enough personas identified. Adding default personas.", | |
| "timestamp": time.time() | |
| }) | |
| # Add some default personas that are useful for most chapters | |
| default_personas = ["tech_architect", "eastern_philosopher", "meditation_teacher", "creative_writer"] | |
| for persona_id in default_personas: | |
| if persona_id not in selected_personas and persona_id in st.session_state.persona_library: | |
| selected_personas.append(persona_id) | |
| if len(selected_personas) >= num_personas + 1: # +1 for meta_agent | |
| break | |
| # Check that we have enough personas | |
| if len(selected_personas) < 2: # Still not enough | |
| error_message = f"Could not identify enough personas from selector output. Only found: {selected_personas}" | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": error_message, | |
| "timestamp": time.time() | |
| }) | |
| return {**state, "error": error_message} | |
| # Log the selection | |
| st.session_state.thinking_logs.append({ | |
| "agent": "Persona Selector", | |
| "thought": f"Selected the following personas for the chapter '{chapter_title}':\n{', '.join(selected_personas)}", | |
| "timestamp": time.time() | |
| }) | |
| # Return a dictionary with updated state | |
| return { | |
| **state, | |
| "selected_personas": selected_personas, | |
| "selection_rationale": response_text | |
| } | |
| except Exception as e: | |
| error_message = f"Error selecting personas: {str(e)}" | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": error_message, | |
| "timestamp": time.time() | |
| }) | |
| return {**state, "error": error_message} | |
| # Research node | |
| def conduct_research(state: dict) -> dict: | |
| """Conduct research for the chapter if a query is provided.""" | |
| if not state.get("research_query"): | |
| return state # No research query, skip this step | |
| # Initialize Tavily search tool if API key is available | |
| tavily_api_key = os.environ.get("TAVILY_API_KEY", st.session_state.get("tavily_api_key", "")) | |
| if not tavily_api_key: | |
| return {**state, "error": "Tavily API key not available"} | |
| try: | |
| search_tool = TavilySearchResults(api_key=tavily_api_key) | |
| # Get the chapter info | |
| chapter_info = state["chapter_info"] | |
| chapter_title = chapter_info.get("title", "Untitled Chapter") | |
| research_query = state["research_query"] | |
| # Log that we're conducting research | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Conducting research for query: '{research_query}'", | |
| "timestamp": time.time() | |
| }) | |
| # Conduct the search | |
| results = search_tool.invoke(f"Research for book chapter '{chapter_title}': {research_query}") | |
| # Log the research results | |
| st.session_state.thinking_logs.append({ | |
| "agent": "Research Agent", | |
| "thought": f"Research results for query '{research_query}':\n{results}", | |
| "timestamp": time.time() | |
| }) | |
| return {**state, "research_results": results} | |
| except Exception as e: | |
| error_message = f"Error conducting research: {str(e)}" | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": error_message, | |
| "timestamp": time.time() | |
| }) | |
| return {**state, "error": error_message} | |
| # Define a function to generate contributions from all selected personas | |
| def generate_all_contributions(state: dict) -> dict: | |
| """Generate contributions from all selected personas.""" | |
| if not state.get("selected_personas"): | |
| return {**state, "error": "No personas selected"} | |
| # Initialize persona_contributions if it doesn't exist | |
| if "persona_contributions" not in state: | |
| state = {**state, "persona_contributions": {}} | |
| new_state = state.copy() | |
| contributions_count = 0 | |
| # Log start of contribution generation | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Starting to generate contributions from {len([p for p in state['selected_personas'] if p != 'meta_agent'])} personas", | |
| "timestamp": time.time() | |
| }) | |
| for persona_id in state["selected_personas"]: | |
| if persona_id == "meta_agent": | |
| continue # Skip meta agent | |
| # Check if contribution already exists | |
| if persona_id in new_state.get("persona_contributions", {}): | |
| contributions_count += 1 | |
| continue | |
| # Get contribution for this persona | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Generating contribution for {persona_id}", | |
| "timestamp": time.time() | |
| }) | |
| updated_state = get_persona_contribution(persona_id, new_state) | |
| # Check if there was an error | |
| if "error" in updated_state and updated_state["error"]: | |
| return updated_state | |
| # Check if contribution was actually added | |
| if persona_id in updated_state.get("persona_contributions", {}): | |
| contributions_count += 1 | |
| new_state = updated_state | |
| # If no contributions were generated, return an error | |
| if contributions_count == 0: | |
| return {**state, "error": "No contributions were generated"} | |
| # Log success | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Successfully generated {contributions_count} contributions", | |
| "timestamp": time.time() | |
| }) | |
| return new_state | |
| # Synthesize chapter node | |
| def synthesize_chapter(state: dict) -> dict: | |
| """Synthesize the final chapter from all persona contributions.""" | |
| client = get_claude_client() | |
| if not client: | |
| return {**state, "error": "Claude client not available"} | |
| contributions = state.get("persona_contributions", {}) | |
| # Log details about contributions | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Preparing to synthesize chapter with {len(contributions)} contributions", | |
| "timestamp": time.time() | |
| }) | |
| # Check if we have contributions to work with | |
| if not contributions: | |
| return {**state, "error": "No contributions available to synthesize"} | |
| # Check if all contributions are empty or None | |
| valid_contributions = {k: v for k, v in contributions.items() if v and isinstance(v, str) and len(v.strip()) > 0} | |
| if not valid_contributions: | |
| return {**state, "error": "No valid contributions to synthesize (all contributions are empty)"} | |
| chapter_info = state["chapter_info"] | |
| chapter_title = chapter_info.get("title", "Untitled Chapter") | |
| chapter_outline = chapter_info.get("outline", "No outline provided") | |
| book_context = f"Book Title: {st.session_state.book_data['title']}\nSubtitle: {st.session_state.book_data['subtitle']}" | |
| # Format all the contributions | |
| contributions_text = "" | |
| for persona_id, contribution in valid_contributions.items(): | |
| if persona_id in st.session_state.persona_library: | |
| persona_name = st.session_state.persona_library[persona_id]["name"] | |
| contributions_text += f"\n\n## {persona_name} Contribution:\n{contribution}\n" | |
| # Calculate total contribution word count to provide guidance | |
| total_contrib_words = sum(len(contribution.split()) for contribution in valid_contributions.values()) | |
| target_chapter_length = f"The final chapter should be approximately 7500-9000 words long (roughly 30 A4 pages). The combined contributions have {total_contrib_words} words, but you should focus on quality, coherence, and completeness rather than strict word count. Integrate all important insights from the contributions while maintaining a smooth narrative flow." | |
| try: | |
| st.session_state.thinking_logs.append({ | |
| "agent": "Meta Agent Synthesizer", | |
| "thought": f"Beginning synthesis of {len(valid_contributions)} contributions totaling {total_contrib_words} words", | |
| "timestamp": time.time() | |
| }) | |
| message = client.messages.create( | |
| model="claude-3-7-sonnet-20250219", | |
| system=st.session_state.persona_library["meta_agent"]["system_prompt"], | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": f"You are synthesizing a book chapter for 'self.api' by integrating multiple persona contributions.\n\n" | |
| f"Book Context:\n{book_context}\n\n" | |
| f"Chapter Title: {chapter_title}\n\n" | |
| f"Chapter Outline:\n{chapter_outline}\n\n" | |
| f"Expert Contributions:\n{contributions_text}\n\n" | |
| f"{target_chapter_length}\n\n" | |
| f"Please synthesize these diverse perspectives into a cohesive, engaging book chapter that flows well, " | |
| f"maintains consistent terminology, effectively uses the API metaphor to illuminate spiritual concepts, " | |
| f"and provides both intellectual understanding and practical guidance. The chapter should be written in a " | |
| f"flowing narrative style appropriate for a published book, not as a collection of separate perspectives." | |
| } | |
| ], | |
| temperature=0.2, | |
| max_tokens=12000 | |
| ) | |
| # Extract the response | |
| final_chapter = message.content[0].text | |
| # Calculate word count for the final chapter | |
| word_count = len(final_chapter.split()) | |
| # Log the synthesis | |
| st.session_state.thinking_logs.append({ | |
| "agent": "Meta Agent Synthesizer", | |
| "thought": "Final chapter synthesis complete.", | |
| "timestamp": time.time() | |
| }) | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Final chapter word count: {word_count} words (target: 7500-9000 words)", | |
| "timestamp": time.time() | |
| }) | |
| return {**state, "final_chapter": final_chapter} | |
| except Exception as e: | |
| error_message = f"Error synthesizing chapter: {str(e)}" | |
| st.session_state.thinking_logs.append({ | |
| "agent": "Meta Agent Synthesizer", | |
| "thought": error_message, | |
| "timestamp": time.time() | |
| }) | |
| return {**state, "error": error_message} | |
| # Function to run the chapter creation workflow with a specific initial state | |
| def run_chapter_creation( | |
| chapter_info: Dict[str, Any], | |
| research_query: Optional[str] = None, | |
| num_personas: int = 5 | |
| ) -> Dict[str, Any]: | |
| """ | |
| Run the full chapter creation workflow with the given initial state. | |
| Args: | |
| chapter_info: Dictionary with chapter title, outline, etc. | |
| research_query: Optional query for research | |
| num_personas: Number of personas to select | |
| Returns: | |
| The final state of the workflow | |
| """ | |
| try: | |
| # Log the beginning of the process | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Starting chapter creation workflow for '{chapter_info.get('title', 'Untitled')}' with {num_personas} personas", | |
| "timestamp": time.time() | |
| }) | |
| # Create the initial state | |
| initial_state = { | |
| "chapter_info": chapter_info, | |
| "research_query": research_query, | |
| "num_personas": num_personas, | |
| "selected_personas": [], | |
| "persona_contributions": {}, | |
| "selection_rationale": "", | |
| "research_results": None, | |
| "final_chapter": None, | |
| "error": None | |
| } | |
| # Add any existing personas and contributions if they exist | |
| if "selected_personas" in chapter_info and chapter_info["selected_personas"]: | |
| initial_state["selected_personas"] = chapter_info["selected_personas"] | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Using pre-selected personas: {', '.join(chapter_info['selected_personas'])}", | |
| "timestamp": time.time() | |
| }) | |
| if "contributions" in chapter_info and chapter_info["contributions"]: | |
| initial_state["persona_contributions"] = chapter_info["contributions"] | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": f"Using existing contributions from {len(chapter_info['contributions'])} personas", | |
| "timestamp": time.time() | |
| }) | |
| # Run the steps in sequence manually instead of relying on the workflow | |
| current_state = initial_state | |
| # Step 1: Research if needed | |
| if research_query: | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": "Starting research phase", | |
| "timestamp": time.time() | |
| }) | |
| current_state = conduct_research(current_state) | |
| if current_state.get("error"): | |
| return current_state | |
| # Step 2: Select personas if needed | |
| if not current_state.get("selected_personas"): | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": "Starting persona selection phase", | |
| "timestamp": time.time() | |
| }) | |
| current_state = select_personas(current_state) | |
| if current_state.get("error"): | |
| return current_state | |
| # Step 3: Generate contributions | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": "Starting contribution generation phase", | |
| "timestamp": time.time() | |
| }) | |
| current_state = generate_all_contributions(current_state) | |
| if current_state.get("error"): | |
| return current_state | |
| # Step 4: Synthesize chapter | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": "Starting chapter synthesis phase", | |
| "timestamp": time.time() | |
| }) | |
| current_state = synthesize_chapter(current_state) | |
| if current_state.get("error"): | |
| return current_state | |
| # Update the chapter info with the results | |
| if current_state.get("final_chapter"): | |
| chapter_info["content"] = current_state["final_chapter"] | |
| if current_state.get("selected_personas"): | |
| chapter_info["selected_personas"] = current_state["selected_personas"] | |
| if current_state.get("selection_rationale"): | |
| chapter_info["selection_rationale"] = current_state["selection_rationale"] | |
| if current_state.get("persona_contributions"): | |
| chapter_info["contributions"] = current_state["persona_contributions"] | |
| # If research was conducted, add it to the chapter info | |
| if research_query and current_state.get("research_results"): | |
| if "research" not in chapter_info: | |
| chapter_info["research"] = [] | |
| chapter_info["research"].append({ | |
| "query": research_query, | |
| "results": current_state["research_results"], | |
| "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| }) | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": "Workflow completed successfully", | |
| "timestamp": time.time() | |
| }) | |
| return current_state | |
| except Exception as e: | |
| error_message = f"Error in chapter creation workflow: {str(e)}" | |
| st.session_state.thinking_logs.append({ | |
| "agent": "System", | |
| "thought": error_message, | |
| "timestamp": time.time() | |
| }) | |
| return {"error": error_message} | |
| # Function to run just the persona selection part | |
| def select_personas_for_chapter(chapter_info, num_personas=5): | |
| """Simplified interface for legacy code: Select personas for a chapter.""" | |
| state = ChapterWorkflowState( | |
| chapter_info=chapter_info, | |
| num_personas=num_personas | |
| ) | |
| # Run just the persona selection part | |
| result_state = select_personas(state) | |
| return result_state.get("selected_personas", []), result_state.get("selection_rationale", "") | |
| # Function to get research agent (legacy compatibility) | |
| def get_research_agent(): | |
| """Legacy function to maintain compatibility with existing code.""" | |
| # Initialize Tavily search tool if API key is available | |
| tavily_api_key = os.environ.get("TAVILY_API_KEY", st.session_state.get("tavily_api_key", "")) | |
| if not tavily_api_key: | |
| return None | |
| class SimplifiedResearchAgent: | |
| def invoke(self, input_data): | |
| query = input_data["input"] | |
| state = ChapterWorkflowState( | |
| chapter_info={"title": "Research"}, | |
| research_query=query | |
| ) | |
| result = conduct_research(state) | |
| return {"output": result.get("research_results", "No results found.")} | |
| return SimplifiedResearchAgent() | |
| # Legacy function to get persona contribution for a chapter | |
| def get_persona_contribution_legacy(persona_id, chapter_info, research_results=None): | |
| """Legacy function to maintain compatibility with existing code.""" | |
| state = ChapterWorkflowState( | |
| chapter_info=chapter_info, | |
| research_results=research_results | |
| ) | |
| result = get_persona_contribution(persona_id, state) | |
| return result.get("persona_contributions", {}).get(persona_id) | |
| # Legacy function to synthesize chapter from persona contributions | |
| def synthesize_chapter_legacy(chapter_info, contributions): | |
| """Legacy function to maintain compatibility with existing code.""" | |
| state = ChapterWorkflowState( | |
| chapter_info=chapter_info, | |
| persona_contributions=contributions | |
| ) | |
| result = synthesize_chapter(state) | |
| return result.get("final_chapter") |