Spaces:
Sleeping
Sleeping
| from langchain_openai import ChatOpenAI # Import the correct class | |
| from dotenv import load_dotenv | |
| import os | |
| import streamlit as st | |
| load_dotenv() # Load environment variables | |
| # Function to load OpenAI model and get responses | |
| def get_openai_response(question): | |
| # Use ChatOpenAI for chat models like gpt-3.5-turbo | |
| llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.5, openai_api_key=os.getenv("OPENAI_API_KEY")) | |
| response = llm.invoke(question) # Use invoke() instead of __call__() | |
| return response.content | |
| # Initialize Streamlit app | |
| st.set_page_config(page_title="Q&A Demo") | |
| st.header("LangChain Application") | |
| input = st.text_input("Input: ", key="input") | |
| response = get_openai_response(input) | |
| print(type) | |
| submit = st.button("Ask the question") | |
| # If ask button is clicked | |
| if submit: | |
| st.subheader("The response is") | |
| st.write(response) |