anushkap01patidar
Fix runtime errors: Update imports and dependencies
4505501
# agents/draft_writer.py
from typing import Any, Dict, Generator
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from streaming_config import get_chunk_size, is_yield_enabled
import os
"""
LangChain LLMChain for writing a draft based on the outline and research notes with yield support.
"""
DRAFT_WRITER_PROMPT = """
You are an expert academic writer. Given a research paper outline and research notes, write a comprehensive draft of the paper. Use clear academic language and expand on each section of the outline.
Outline:
{outline}
Research notes:
{research_notes}
Instructions:
- Write a comprehensive draft that fully develops each section of the outline
- Use section headings and include inline citations like [1], [2] where appropriate
- Do not include a bibliography section
- Ensure the draft is complete and covers all points from the outline
Draft:
"""
def get_draft_writer_chain():
"""
Returns a LangChain LLMChain for draft writing.
"""
prompt = PromptTemplate(
input_variables=["outline", "research_notes"],
template=DRAFT_WRITER_PROMPT
)
# Always enable streaming in the LLM - callbacks will be passed during execution
llm = OpenAI(
temperature=0.3,
openai_api_key=os.getenv("OPENAI_API_KEY"),
streaming=True # Always enable streaming
)
return LLMChain(llm=llm, prompt=prompt, output_key="draft")
def yield_draft_writing(outline: str, research_notes: str, preset: str = None) -> Generator[str, None, None]:
"""
Yield draft writing results progressively
Args:
outline: The research paper outline
research_notes: The research notes
preset: Optional streaming preset
Yields:
str: Progressive draft content
"""
if not is_yield_enabled(preset):
# Fallback to non-yield processing
chain = get_draft_writer_chain()
result = chain.run(outline=outline, research_notes=research_notes)
yield result
return
try:
# Import required modules
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
import os
# Create prompt template
prompt = PromptTemplate(
input_variables=["outline", "research_notes"],
template=DRAFT_WRITER_PROMPT
)
# Create LLM with streaming
llm = ChatOpenAI(
temperature=0.3,
openai_api_key=os.getenv("OPENAI_API_KEY"),
streaming=True
)
# Format the prompt
formatted_prompt = prompt.format(outline=outline, research_notes=research_notes)
# Get chunk size for this step
chunk_size = get_chunk_size("draft_writer", preset)
# Call LLM and yield results progressively
result = llm.invoke(formatted_prompt)
content = result.content
# Yield content in chunks
for i in range(0, len(content), chunk_size):
chunk = content[i:i + chunk_size]
yield chunk
except Exception as e:
yield f"Error in draft writing: {str(e)}"
def yield_draft_by_sections(outline: str, research_notes: str) -> Generator[str, None, None]:
"""
Yield draft writing organized by sections
Args:
outline: The research paper outline
research_notes: The research notes
Yields:
str: Progressive draft content by section
"""
# Parse outline to extract sections
sections = []
lines = outline.split('\n')
current_section = ""
for line in lines:
line = line.strip()
if line and (line.startswith('#') or line.startswith('1.') or line.startswith('2.') or
line.startswith('3.') or line.startswith('4.') or line.startswith('5.')):
current_section = line
sections.append(current_section)
if not sections:
# Fallback to single section
yield "Writing complete draft..."
for chunk in yield_draft_writing(outline, research_notes):
yield chunk
return
yield f"Writing draft with {len(sections)} sections..."
for i, section in enumerate(sections, 1):
yield f"\n--- Section {i}: {section} ---"
# Create section-specific prompt
section_prompt = f"""
Write the content for this specific section of the research paper:
Section: {section}
Full Outline: {outline}
Research Notes: {research_notes}
Focus on developing this section comprehensively.
"""
# Process section with yield
for chunk in yield_draft_writing(section_prompt, research_notes):
yield chunk
yield f"\n--- Section {i} Complete ---\n"
def yield_draft_with_style(outline: str, research_notes: str, style: str = "academic") -> Generator[str, None, None]:
"""
Yield draft writing with specific style using yield generators
Args:
outline: The research paper outline
research_notes: The research notes
style: Writing style (academic, technical, accessible, etc.)
Yields:
str: Progressive draft content with specified style
"""
style_instructions = {
"academic": "Use formal academic language with proper citations and scholarly tone.",
"technical": "Focus on technical details and methodology with precise terminology.",
"accessible": "Use clear, accessible language suitable for broader audiences.",
"concise": "Write in a concise, direct manner with minimal elaboration."
}
style_instruction = style_instructions.get(style, style_instructions["academic"])
yield f"Writing draft in {style} style..."
# Create style-enhanced prompt
enhanced_prompt = f"""
You are an expert academic writer. Given a research paper outline and research notes, write a comprehensive draft of the paper.
Style requirement: {style_instruction}
Outline:
{outline}
Research notes:
{research_notes}
Instructions:
- Write a comprehensive draft that fully develops each section of the outline
- Use section headings and include inline citations like [1], [2] where appropriate
- Do not include a bibliography section
- Ensure the draft is complete and covers all points from the outline
- Follow the specified style: {style}
Draft:
"""
# Yield the enhanced draft writing
for chunk in yield_draft_writing(enhanced_prompt, ""):
yield chunk
def process_draft_with_revisions(outline: str, research_notes: str, revisions: list = None) -> Generator[str, None, None]:
"""
Process draft writing with optional revision requests using yield generators
Args:
outline: The research paper outline
research_notes: The research notes
revisions: Optional list of revision requests
Yields:
str: Progressive draft content with revisions
"""
if revisions:
yield f"Applying {len(revisions)} revision requests..."
# Apply revisions to the prompt
revision_text = "\n".join([f"- {rev}" for rev in revisions])
enhanced_prompt = f"""
Outline: {outline}
Research notes: {research_notes}
Revision requests:
{revision_text}
Please incorporate these revision requests into the draft.
"""
else:
enhanced_prompt = f"Outline: {outline}\nResearch notes: {research_notes}"
# Yield the enhanced draft writing
for chunk in yield_draft_writing(enhanced_prompt, ""):
yield chunk