Spaces:
Build error
Build error
File size: 1,540 Bytes
1c0a23b 240ad82 1c0a23b 240ad82 1c0a23b b29a8c2 1c0a23b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | from helpers.utils import create_or_load_vectore_store
from helpers.import_envs import openai_api_key
from langchain_openai import ChatOpenAI
from langchain.schema import StrOutputParser
from langchain_core.runnables.passthrough import RunnablePassthrough
from langchain.prompts import ChatPromptTemplate
from helpers.model_utils import set_question_answer_llm
def answer_question(question, transcript_file_name, llm_choice=None):
question_answer_llm = set_question_answer_llm(llm_choice)
# Specify the path to the file you want to check
vector_store = create_or_load_vectore_store(transcript_file_name=transcript_file_name)
# create a prompt template to send to our LLM that will incorporate the documents from our retriever with the
# question we ask the chat model
prompt_template = ChatPromptTemplate.from_template(
"Answer the {question} based on the following {context}."
)
# create a retriever for our documents
retriever = vector_store.as_retriever()
# create a parser to parse the output of our LLM
parser = StrOutputParser()
# 💻 Create the sequence (recipe)
runnable_chain = (
# TODO: How do we chain the output of our retriever, prompt, model and model output parser so that we can get a good answer to our query?
{"context": retriever, "question": RunnablePassthrough()}
| prompt_template
| question_answer_llm
| StrOutputParser()
)
answer = runnable_chain.invoke(question)
print(answer)
return answer
|