DexterSptizu's picture
Update app.py
9fad4e4 verified
#https://python.langchain.com/docs/how_to/parallel/
import gradio as gr
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel
def process_parallel(topic, api_key):
try:
# Initialize the model
model = ChatOpenAI(
openai_api_key=api_key,
model="gpt-4o-mini",
temperature=0.7
)
# Create two chains
joke_chain = ChatPromptTemplate.from_template("tell me a joke about {topic}") | model
poem_chain = ChatPromptTemplate.from_template("write a 2-line poem about {topic}") | model
# Combine chains to run in parallel
parallel_chain = RunnableParallel(
joke=joke_chain,
poem=poem_chain
)
# Get results
result = parallel_chain.invoke({"topic": topic})
return f"""
Joke: {result['joke'].content}
Poem: {result['poem'].content}
"""
except Exception as e:
return f"Error: {str(e)}"
# Create Gradio interface
demo = gr.Interface(
fn=process_parallel,
inputs=[
gr.Textbox(label="Topic", placeholder="Enter a topic..."),
gr.Textbox(label="OpenAI API Key", type="password")
],
outputs=gr.Textbox(label="Results", lines=6),
title="LangChain Parallel Processing Demo",
description="Generates a joke and poem about your topic simultaneously"
)
if __name__ == "__main__":
demo.launch()