import os # from dotenv import load_dotenv # load_dotenv() #os.environ['OPENAI_API_KEY'] = os.getenv("OPENAI_API_KEY") OPENAI_API_KEY = os.environ['OPENAI_API_KEY'] from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough def my_chain(retriever,question): template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0, streaming=True ) chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | llm | StrOutputParser() ) answer=chain.invoke(question) return answer