File size: 3,869 Bytes
7de6e4d 4505501 7de6e4d 1561d5f 7de6e4d 1561d5f 7de6e4d 1561d5f 7de6e4d 1561d5f 7de6e4d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 | """
LangChain LLMChain for analyzing and refining a research topic with yield support.
"""
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from langchain.callbacks.base import BaseCallbackHandler
import os
from typing import Dict, Any, Generator
from streaming_config import get_chunk_size, is_yield_enabled
# Prompt template for topic analysis
TOPIC_ANALYZER_PROMPT = """
You are an expert research assistant. Given a user-provided research topic, refine it for clarity, focus, and academic rigor.
Original topic: {topic}
Instructions:
- Refine the topic into a clear, focused research question
- Keep the response concise and academic
- Make sure response should be generated in 500 characters including spaces.
Refined topic (one sentence):
"""
def get_topic_analyzer_chain():
"""
Returns a LangChain LLMChain for topic analysis.
"""
prompt = PromptTemplate(
input_variables=["topic"],
template=TOPIC_ANALYZER_PROMPT
)
# Always enable streaming in the LLM - callbacks will be passed during execution
llm = OpenAI(
temperature=0.3,
openai_api_key=os.getenv("OPENAI_API_KEY"),
streaming=True # Always enable streaming
)
return LLMChain(llm=llm, prompt=prompt, output_key="refined_topic")
def yield_topic_analysis(topic: str, preset: str = None) -> Generator[str, None, None]:
"""
Yield topic analysis results progressively
Args:
topic: The research topic to analyze
preset: Optional streaming preset
Yields:
str: Progressive analysis results
"""
if not is_yield_enabled(preset):
# Fallback to non-yield processing
chain = get_topic_analyzer_chain()
result = chain.run(topic=topic)
yield result
return
try:
# Import required modules
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
import os
# Create prompt template
prompt = PromptTemplate(
input_variables=["topic"],
template=TOPIC_ANALYZER_PROMPT
)
# Create LLM with streaming
llm = ChatOpenAI(
temperature=0.3,
openai_api_key=os.getenv("OPENAI_API_KEY"),
streaming=True
)
# Format the prompt
formatted_prompt = prompt.format(topic=topic)
# Get chunk size for this step
chunk_size = get_chunk_size("topic_analyzer", preset)
# Call LLM and yield results progressively
result = llm.invoke(formatted_prompt)
content = result.content
# Yield content in chunks
for i in range(0, len(content), chunk_size):
chunk = content[i:i + chunk_size]
yield chunk
except Exception as e:
yield f"Error in topic analysis: {str(e)}"
def process_topic_with_feedback(topic: str, feedback: str = None) -> Generator[str, None, None]:
"""
Process topic analysis with optional feedback using yield generators
Args:
topic: The research topic
feedback: Optional feedback for refinement
Yields:
str: Progressive processing results
"""
if feedback:
# Include feedback in processing
enhanced_prompt = f"""
Original topic: {topic}
Feedback: {feedback}
Please refine the topic considering the feedback provided.
"""
yield "Processing topic with feedback..."
else:
enhanced_prompt = f"Original topic: {topic}"
yield "Processing topic analysis..."
# Yield the enhanced prompt for processing
for chunk in yield_topic_analysis(enhanced_prompt):
yield chunk
|