# Q&A chatbot # Run as: streamlit run app.py from langchain.llms import OpenAI import streamlit as st import os from dotenv import load_dotenv load_dotenv() # Function to load OpenAI model and get responses def get_openai_response(question): llm = OpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"), model="gpt-3.5-turbo-instruct", temperature=0.0) response = llm(question) return response # initialize Streamlit app st.set_page_config(page_title="Q&A Demo") st.header("Langchain Application") input = st.text_input("Input: ", key="input") response = get_openai_response(input) submit = st.button("Ask a question") if submit: st.subheader("Response: ") st.write(response)