agent-chat-mp2-3 / agents.py
Caseyrmorrison's picture
update greeting
e77b0bc
from operator import itemgetter
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_core.prompts import ChatPromptTemplate
from langchain.prompts.prompt import PromptTemplate
class Obnoxious_Agent:
def __init__(self, client) -> None:
# TODO: Initialize the client and prompt for the Obnoxious_Agent
self.client = client
self.prompt = ("""
Answer STRICTLY with "Yes" or "No".
As an AI trained in understanding human language nuances and emotions,
you are tasked with evaluating the sentiment and appropriateness of the text provided below.
Please analyze the tone, the specific words and phrases used, and the overall context.
Your goal is to discern whether the text comes across as obnoxious, rude, or inappropriate in any way.
After your analysis, respond with \"Yes\" if you find the text to be generally obnoxious or inappropriate,
and \"No\" if the text seems generally acceptable and not obnoxious. "
If the user asks a simple question, you should not flag it as obnoxious unless it contains inappropriate language or content.
For example: "test" would not be considered obnoxious, but "crap" would be.
Only mark as obnoxious if the text is clearly inappropriate or offensive.
Keep in mind the subtleties of human communication and consider the potential for misunderstanding or misinterpretation.
context: {context}
""")
def set_prompt(self, query=None):
# TODO: Set the prompt for the Obnoxious_Agent
self.prompt = self.prompt.format(context=query)
return self.prompt
def extract_action(self, response) -> bool:
response = response.strip().lower()
# print(f"Obnoxious response: {response} and type: {type(response)}")
if response == "yes":
return True
else:
return False
class Query_Agent:
"""Checks for relevance between user query and topic at hand"""
def __init__(self, openai_client, embeddings, pinecone_index, vectorstore) -> None:
# TODO: Initialize the Query_Agent agent
# OpenAI client + embeddings
self.openai_client = openai_client
self.embeddings = embeddings
# Pinecone Index + vector store
self.pinecone_index = pinecone_index
self.vectorstore = vectorstore
# Prompt
self.prompt = """ """
# Data
self.documents = []
def query_vector_store(self, query, k=3):
# TODO: Query the Pinecone vector store
self.query = query
self.documents = [x.page_content for x in self.vectorstore.similarity_search(query)]
return self.documents
def set_prompt(self, documents, query):
self.prompt = self.prompt.format(context=documents, query=query)
return self.prompt
class Relevant_Documents_Agent:
"""Checks relevance between user query and documents"""
def __init__(self, openai_client, embeddings) -> None:
# TODO: Initialize the Relevant_Documents_Agent
# OpenAI client + embeddings
self.openai_client = openai_client
self.embeddings = embeddings
# Prompts
self.prompt = """[INST]Answer STRICTLY with "Yes" or "No".
This is important: If the user querie or the question below is a general greeting such as "hello" then you must reply with "Yes".
Use the following context to check if the query is relevant or not.
If the context is even slightly relevant then reply with "Yes" and
if the context and query are poles apart then reply with "No".
Context: {context}
Question: {query} [/INST]
This is important: If the user querie or the question below is a general greeting such as "hello" then you must reply with "Yes".
"""
# Data
self.query = ""
def set_prompt(self, documents, query):
self.prompt = self.prompt.format(context=documents, query=query)
return self.prompt
def extract_action(self, response = None):
response = response.strip().lower()
print(f"Relevance response: {response} and type: {type(response)}")
if response == "yes":
return True
else:
return False
class Answering_Agent:
def __init__(self, model, retriever) -> None:
self.model = model
self.retriever = retriever
self.prompt_template = ChatPromptTemplate.from_template("""As an AI, you are provided with relevant pieces of information to help answer the following user query.
Utilize the insights from these texts to formulate a comprehensive and accurate response.
Your goal is to synthesize the information, highlight key points, and ensure the answer is informative and directly addresses the user's question.
You will also be given the previous history of chat as Context use it to influence the answer.
This is important: If the user queries with any kind of general greeting such as "hello", respond with a general greeting.
Relevant Text : {relevant_text}
User's Query: {query}
""")
self.history_template = PromptTemplate
self.chain = (
{
"relevant_text": itemgetter("query")| self.retriever,
"query": RunnablePassthrough(),
}
| self.prompt_template
| self.model
| StrOutputParser()
)
self.prompt = {}
def set_prompt(self, query):
self.prompt = {"relevant_text": query,
"query": query}
return self.prompt
def generate_response(self):
return self.chain.invoke(self.prompt)