Aakash326
Update model from llama-3.1-70b-versatile to openai/gpt-oss-120b
a19d764
import gradio as gr
import os
from typing import Annotated, List
import operator
from typing_extensions import TypedDict
from pydantic import BaseModel, Field
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_groq import ChatGroq
from langgraph.graph import StateGraph, START, END
from langgraph.constants import Send
# Initialize LLM
def initialize_llm(api_key: str):
"""Initialize Groq LLM with API key"""
os.environ["GROQ_API_KEY"] = api_key
return ChatGroq(model="openai/gpt-oss-120b", temperature=0.7)
# Schema for structured output
class Section(BaseModel):
name: str = Field(description="Name for this section")
description: str = Field(description="Brief overview of what this section should contain")
class Sections(BaseModel):
sections: List[Section] = Field(description="Sections of the content")
# Graph state
class State(TypedDict):
content_type: str # story, joke, or poem
topic: str # User's topic
sections: list[Section] # List of sections
completed_sections: Annotated[list, operator.add] # All workers write here
final_content: str # Final output
# Worker state
class WorkerState(TypedDict):
section: Section
content_type: str
topic: str
completed_sections: Annotated[list, operator.add]
# Node functions
def orchestrator(state: State, llm):
"""Orchestrator that generates a plan for the content"""
planner = llm.with_structured_output(Sections)
content_prompts = {
"story": "Generate a plan for a creative story with multiple sections (introduction, rising action, climax, resolution).",
"joke": "Generate a plan for a joke with setup and punchline sections.",
"poem": "Generate a plan for a poem with multiple stanzas/verses."
}
prompt = content_prompts.get(state['content_type'], content_prompts['story'])
content_sections = planner.invoke([
SystemMessage(content=prompt),
HumanMessage(content=f"Here is the topic: {state['topic']}")
])
return {"sections": content_sections.sections}
def worker(state: WorkerState, llm):
"""Worker writes one section of the content"""
content_instructions = {
"story": "Write an engaging story section following the provided name and description. Use vivid imagery and narrative techniques. Use markdown formatting.",
"joke": "Write a funny joke section following the provided name and description. Be witty and entertaining.",
"poem": "Write a poetic verse following the provided name and description. Use poetic devices and rhythm. Use markdown formatting."
}
instruction = content_instructions.get(state['content_type'], content_instructions['story'])
section_content = llm.invoke([
SystemMessage(content=instruction),
HumanMessage(
content=f"Topic: {state['topic']}\nSection name: {state['section'].name}\nDescription: {state['section'].description}"
)
])
return {"completed_sections": [section_content.content]}
def assign_workers(state: State):
"""Assign a worker to each section via Send API"""
return [
Send("worker", {
"section": s,
"content_type": state['content_type'],
"topic": state['topic']
})
for s in state["sections"]
]
def synthesizer(state: State, llm):
"""Synthesize final content from all sections"""
completed_sections = state["completed_sections"]
if state['content_type'] == 'joke':
# For jokes, combine without separators
final_content = "\n\n".join(completed_sections)
else:
# For stories and poems, use nice separators
final_content = "\n\n---\n\n".join(completed_sections)
# Add a title
title_map = {
"story": f"# πŸ“– Story: {state['topic']}\n\n",
"joke": f"# πŸ˜„ Joke: {state['topic']}\n\n",
"poem": f"# ✨ Poem: {state['topic']}\n\n"
}
final_content = title_map.get(state['content_type'], "") + final_content
return {"final_content": final_content}
# Build the graph
def build_graph(llm):
"""Build the orchestrator-worker graph"""
builder = StateGraph(State)
# Add nodes with llm parameter
builder.add_node("orchestrator", lambda state: orchestrator(state, llm))
builder.add_node("worker", lambda state: worker(state, llm))
builder.add_node("synthesizer", lambda state: synthesizer(state, llm))
# Add edges
builder.add_edge(START, "orchestrator")
builder.add_conditional_edges("orchestrator", assign_workers, ["worker"])
builder.add_edge("worker", "synthesizer")
builder.add_edge("synthesizer", END)
return builder.compile()
# Main generation function
def generate_content(content_type: str, topic: str, api_key: str, progress=gr.Progress()):
"""Main function to generate content"""
if not api_key:
return "⚠️ Please enter your Groq API key to continue."
if not topic:
return "⚠️ Please enter a topic."
try:
progress(0, desc="Initializing...")
llm = initialize_llm(api_key)
progress(0.2, desc="Building workflow...")
graph = build_graph(llm)
progress(0.4, desc=f"Planning {content_type}...")
# Invoke the graph
result = graph.invoke({
"content_type": content_type,
"topic": topic
})
progress(1.0, desc="Complete!")
return result["final_content"]
except Exception as e:
return f"❌ Error: {str(e)}\n\nPlease check your API key and try again."
# Custom CSS
custom_css = """
#main-container {
max-width: 1200px;
margin: auto;
}
.gradio-container {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
}
#output-box {
min-height: 400px;
}
.header {
text-align: center;
padding: 20px;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
border-radius: 10px;
margin-bottom: 20px;
}
"""
# Build Gradio interface
with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
gr.HTML("""
<div class="header">
<h1>πŸš€ AI Content Generator</h1>
<p>Powered by LangGraph Orchestrator-Worker Pattern & Groq API</p>
</div>
""")
gr.Markdown("""
## How it works:
This app uses an **Orchestrator-Worker** workflow pattern:
1. 🎯 **Orchestrator** analyzes your topic and creates a content plan
2. πŸ‘· **Workers** generate each section in parallel
3. 🎨 **Synthesizer** combines everything into the final output
### Get Started:
""")
with gr.Row():
with gr.Column(scale=1):
api_key_input = gr.Textbox(
label="πŸ”‘ Groq API Key",
placeholder="Enter your Groq API key (gsk_...)",
type="password",
info="Get your free API key at https://console.groq.com"
)
content_type = gr.Radio(
choices=["story", "joke", "poem"],
label="πŸ“ Content Type",
value="story",
info="Choose what type of content to generate"
)
topic_input = gr.Textbox(
label="πŸ’‘ Topic",
placeholder="E.g., 'A robot learning to love', 'AI taking over the world', 'Moonlight'",
info="What should the content be about?"
)
generate_btn = gr.Button("✨ Generate Content", variant="primary", size="lg")
gr.Markdown("""
### πŸ“Œ Examples:
- **Story**: "A time traveler's dilemma"
- **Joke**: "Programming bugs"
- **Poem**: "Autumn leaves"
""")
with gr.Column(scale=2):
output = gr.Markdown(
label="Generated Content",
value="Your generated content will appear here...",
elem_id="output-box"
)
gr.Markdown("""
---
### πŸ”§ About the Technology:
- **LangGraph**: Orchestrator-Worker workflow for intelligent task decomposition
- **Groq**: Ultra-fast LLM inference
- **Dynamic Planning**: AI determines the optimal structure for your content
- **Parallel Processing**: Multiple sections generated simultaneously
### 🌟 Features:
- βœ… Smart content planning based on type and topic
- βœ… Parallel section generation for speed
- βœ… Automatic synthesis into cohesive output
- βœ… Support for stories, jokes, and poems
---
Built with ❀️ using LangGraph | [Learn More](https://langchain-ai.github.io/langgraph/)
""")
# Event handler
generate_btn.click(
fn=generate_content,
inputs=[content_type, topic_input, api_key_input],
outputs=output
)
# Example inputs
gr.Examples(
examples=[
["story", "A detective solving a mystery in space", ""],
["joke", "Artificial Intelligence and coffee", ""],
["poem", "The beauty of neural networks", ""],
["story", "An adventure in a magical library", ""],
["joke", "Debugging code at 3 AM", ""],
],
inputs=[content_type, topic_input, api_key_input],
)
# Launch the app
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
)