import streamlit as st import os from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from dotenv import load_dotenv from langchain.schema import HumanMessage,SystemMessage,AIMessage load_dotenv() def get_openai_response(question): chatllm=ChatOpenAI(openai_api_key=os.getenv('OPENAI_API_KEY'),temperature=0.6,model='gpt-4o-mini') response=chatllm([ SystemMessage(content="You are an AI assistant"), HumanMessage(content=question) ]) return response.content st.set_page_config(page_title="Q&A Demo") st.header("Langchain Application") input=st.text_input("Input: ",key="input") response=get_openai_response(input) submit=st.button("Ask the question: "); if submit: st.subheader("The response is: ") st.write(response)