Spaces:
Sleeping
Sleeping
| ##Q&A chatbot | |
| from langchain.llms import OpenAI | |
| from dotenv import load_dotenv | |
| load_dotenv() #take the environment variable from .env | |
| import streamlit as st | |
| from langchain.chat_models import ChatOpenAI | |
| import os | |
| ##function to load OpenAI model and get responses | |
| def got_openai_responses(question): | |
| llm=OpenAI(openai_api_key=os.getenv("OPEN_API_KEY"),model_name="gpt-3.5-turbo-instruct",temperature=0.5) | |
| response=llm(question) | |
| return response | |
| #initialize streamit app | |
| st.set_page_config(page_title="Q&A Demo") | |
| st.header("Langchain Application") | |
| input=st.text_input("input :",key="input") | |
| response=got_openai_responses(input) | |
| submit=st.button("Ask the question") | |
| ##if ask button is clicked | |
| if submit: | |
| st.subheader("the Responses is") | |
| st.write(response) | |
| '''import os | |
| import streamlit as st | |
| from dotenv import load_dotenv | |
| from langchain.chat_models import ChatOpenAI | |
| load_dotenv() # load environment variables from .env | |
| # Function to get response from OpenAI | |
| def got_openai_responses(question): | |
| llm = ChatOpenAI(openai_api_key=os.getenv("OPEN_API_KEY"), model_name="gpt-3.5-turbo", temperature=0.5) | |
| response = llm.invoke(question) | |
| return response.content if hasattr(response, "content") else response | |
| # Streamlit UI | |
| st.set_page_config(page_title="Q&A Demo") | |
| st.header("LangChain Chatbot") | |
| input = st.text_input("Ask your question:", key="input") | |
| submit = st.button("Ask") | |
| if submit and input: | |
| response = got_openai_responses(input) | |
| st.subheader("Response:") | |
| st.write(response)''' | |