|
|
""" |
|
|
LangChain LLMChain for analyzing and refining a research topic with yield support. |
|
|
""" |
|
|
from langchain.chains import LLMChain |
|
|
from langchain.prompts import PromptTemplate |
|
|
from langchain_openai import OpenAI |
|
|
from langchain.callbacks.base import BaseCallbackHandler |
|
|
import os |
|
|
from typing import Dict, Any, Generator |
|
|
from streaming_config import get_chunk_size, is_yield_enabled |
|
|
|
|
|
|
|
|
TOPIC_ANALYZER_PROMPT = """ |
|
|
You are an expert research assistant. Given a user-provided research topic, refine it for clarity, focus, and academic rigor. |
|
|
|
|
|
Original topic: {topic} |
|
|
|
|
|
Instructions: |
|
|
- Refine the topic into a clear, focused research question |
|
|
- Keep the response concise and academic |
|
|
- Make sure response should be generated in 500 characters including spaces. |
|
|
|
|
|
Refined topic (one sentence): |
|
|
""" |
|
|
|
|
|
def get_topic_analyzer_chain(): |
|
|
""" |
|
|
Returns a LangChain LLMChain for topic analysis. |
|
|
""" |
|
|
prompt = PromptTemplate( |
|
|
input_variables=["topic"], |
|
|
template=TOPIC_ANALYZER_PROMPT |
|
|
) |
|
|
|
|
|
|
|
|
llm = OpenAI( |
|
|
temperature=0.3, |
|
|
openai_api_key=os.getenv("OPENAI_API_KEY"), |
|
|
streaming=True |
|
|
) |
|
|
|
|
|
return LLMChain(llm=llm, prompt=prompt, output_key="refined_topic") |
|
|
|
|
|
def yield_topic_analysis(topic: str, preset: str = None) -> Generator[str, None, None]: |
|
|
""" |
|
|
Yield topic analysis results progressively |
|
|
|
|
|
Args: |
|
|
topic: The research topic to analyze |
|
|
preset: Optional streaming preset |
|
|
|
|
|
Yields: |
|
|
str: Progressive analysis results |
|
|
""" |
|
|
if not is_yield_enabled(preset): |
|
|
|
|
|
chain = get_topic_analyzer_chain() |
|
|
result = chain.run(topic=topic) |
|
|
yield result |
|
|
return |
|
|
|
|
|
try: |
|
|
|
|
|
from langchain_openai import ChatOpenAI |
|
|
from langchain.prompts import PromptTemplate |
|
|
import os |
|
|
|
|
|
|
|
|
prompt = PromptTemplate( |
|
|
input_variables=["topic"], |
|
|
template=TOPIC_ANALYZER_PROMPT |
|
|
) |
|
|
|
|
|
|
|
|
llm = ChatOpenAI( |
|
|
temperature=0.3, |
|
|
openai_api_key=os.getenv("OPENAI_API_KEY"), |
|
|
streaming=True |
|
|
) |
|
|
|
|
|
|
|
|
formatted_prompt = prompt.format(topic=topic) |
|
|
|
|
|
|
|
|
chunk_size = get_chunk_size("topic_analyzer", preset) |
|
|
|
|
|
|
|
|
result = llm.invoke(formatted_prompt) |
|
|
content = result.content |
|
|
|
|
|
|
|
|
for i in range(0, len(content), chunk_size): |
|
|
chunk = content[i:i + chunk_size] |
|
|
yield chunk |
|
|
|
|
|
except Exception as e: |
|
|
yield f"Error in topic analysis: {str(e)}" |
|
|
|
|
|
def process_topic_with_feedback(topic: str, feedback: str = None) -> Generator[str, None, None]: |
|
|
""" |
|
|
Process topic analysis with optional feedback using yield generators |
|
|
|
|
|
Args: |
|
|
topic: The research topic |
|
|
feedback: Optional feedback for refinement |
|
|
|
|
|
Yields: |
|
|
str: Progressive processing results |
|
|
""" |
|
|
if feedback: |
|
|
|
|
|
enhanced_prompt = f""" |
|
|
Original topic: {topic} |
|
|
Feedback: {feedback} |
|
|
|
|
|
Please refine the topic considering the feedback provided. |
|
|
""" |
|
|
yield "Processing topic with feedback..." |
|
|
else: |
|
|
enhanced_prompt = f"Original topic: {topic}" |
|
|
yield "Processing topic analysis..." |
|
|
|
|
|
|
|
|
for chunk in yield_topic_analysis(enhanced_prompt): |
|
|
yield chunk |
|
|
|