Research_Draft_Generator / agents /topic_analyzer.py
anushkap01patidar
Fix runtime errors: Update imports and dependencies
4505501
"""
LangChain LLMChain for analyzing and refining a research topic with yield support.
"""
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from langchain.callbacks.base import BaseCallbackHandler
import os
from typing import Dict, Any, Generator
from streaming_config import get_chunk_size, is_yield_enabled
# Prompt template for topic analysis
TOPIC_ANALYZER_PROMPT = """
You are an expert research assistant. Given a user-provided research topic, refine it for clarity, focus, and academic rigor.
Original topic: {topic}
Instructions:
- Refine the topic into a clear, focused research question
- Keep the response concise and academic
- Make sure response should be generated in 500 characters including spaces.
Refined topic (one sentence):
"""
def get_topic_analyzer_chain():
"""
Returns a LangChain LLMChain for topic analysis.
"""
prompt = PromptTemplate(
input_variables=["topic"],
template=TOPIC_ANALYZER_PROMPT
)
# Always enable streaming in the LLM - callbacks will be passed during execution
llm = OpenAI(
temperature=0.3,
openai_api_key=os.getenv("OPENAI_API_KEY"),
streaming=True # Always enable streaming
)
return LLMChain(llm=llm, prompt=prompt, output_key="refined_topic")
def yield_topic_analysis(topic: str, preset: str = None) -> Generator[str, None, None]:
"""
Yield topic analysis results progressively
Args:
topic: The research topic to analyze
preset: Optional streaming preset
Yields:
str: Progressive analysis results
"""
if not is_yield_enabled(preset):
# Fallback to non-yield processing
chain = get_topic_analyzer_chain()
result = chain.run(topic=topic)
yield result
return
try:
# Import required modules
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
import os
# Create prompt template
prompt = PromptTemplate(
input_variables=["topic"],
template=TOPIC_ANALYZER_PROMPT
)
# Create LLM with streaming
llm = ChatOpenAI(
temperature=0.3,
openai_api_key=os.getenv("OPENAI_API_KEY"),
streaming=True
)
# Format the prompt
formatted_prompt = prompt.format(topic=topic)
# Get chunk size for this step
chunk_size = get_chunk_size("topic_analyzer", preset)
# Call LLM and yield results progressively
result = llm.invoke(formatted_prompt)
content = result.content
# Yield content in chunks
for i in range(0, len(content), chunk_size):
chunk = content[i:i + chunk_size]
yield chunk
except Exception as e:
yield f"Error in topic analysis: {str(e)}"
def process_topic_with_feedback(topic: str, feedback: str = None) -> Generator[str, None, None]:
"""
Process topic analysis with optional feedback using yield generators
Args:
topic: The research topic
feedback: Optional feedback for refinement
Yields:
str: Progressive processing results
"""
if feedback:
# Include feedback in processing
enhanced_prompt = f"""
Original topic: {topic}
Feedback: {feedback}
Please refine the topic considering the feedback provided.
"""
yield "Processing topic with feedback..."
else:
enhanced_prompt = f"Original topic: {topic}"
yield "Processing topic analysis..."
# Yield the enhanced prompt for processing
for chunk in yield_topic_analysis(enhanced_prompt):
yield chunk