Spaces:
Sleeping
Sleeping
| from api_interface import API_Interface | |
| class Obnoxious_Agent: | |
| def __init__(self, ai): | |
| self.ai = ai | |
| """Checks if a query is obnoxious.""" | |
| def check_query(self, query, need_answer=False) -> bool: | |
| """Checks if a query is obnoxious. Responds with True or False.""" | |
| prompt = f""" | |
| Is this query obnoxious? Answer using 'Yes' or 'No', and explain. | |
| Query: {query} | |
| """ | |
| messages = [{"role": "user", "content": prompt}] | |
| response = self.ai.client_chat(messages) | |
| if need_answer: | |
| print(response) | |
| return response.lower().find("yes") != -1 | |
| class Query_Agent: | |
| """Retrieves relevant documents from the vector store.""" | |
| def __init__(self, ai): | |
| self.ai = ai | |
| def query_vector_store(self, query, k_docs=5, k_tables=3): | |
| (docs, scores), (tables, tscores) = self.ai.query_pinecone_vector_store(query, top_k_docs=k_docs, top_k_tbls=k_tables) | |
| return docs, scores, tables, tscores | |
| class Answering_Agent: | |
| """Generates responses for user queries.""" | |
| def __init__(self, ai, mode="concise"): | |
| self.ai = ai | |
| self.mode = mode | |
| def check_mode(self, query): | |
| prompt = f""" | |
| If the query asks for a change in speaking style, which most closely matches the user's needs - "concise" or "talkative"? | |
| If the query does not ask for a change in speaking style, respond with "N/A"? | |
| Query: {query} | |
| """ | |
| messages = [{"role": "user", "content": prompt}] | |
| response = self.ai.client_chat(messages) | |
| new_mode = False | |
| if response.lower().find("concise") != -1: | |
| self.mode = "concise" | |
| new_mode = True | |
| elif response.lower().find("talkative") != -1: | |
| self.mode = "talktative" | |
| new_mode = True | |
| return new_mode | |
| def generate_response(self, query:str, documents:list, conv_history:list, context:str=""): | |
| if context != "": | |
| context = f"\nYou are given the following conversation context: {context}\n\n" | |
| """Generates response for user query given relevant documents and previous conversation. | |
| Returns both the raw response and the extended conversation history used to generate the response.""" | |
| new_messages = [{"role": "developer", "content": f""" | |
| You are given the following information: | |
| {documents} | |
| {context} | |
| Do not answer any unrelated questions in the query. | |
| Answer the following query in a {self.mode} manner: | |
| {query} | |
| """} | |
| ] | |
| conv_history.extend(new_messages) | |
| response = self.ai.client_chat(new_messages) | |
| return response, conv_history # return conv_history in case it's useful | |
| def requires_context(self, query, need_answer=False) -> bool: | |
| prompt = f""" | |
| Does this query require further context? Answer using "Yes" or "No", then explain. | |
| Query: {query} | |
| """ | |
| messages = [{"role": "user", "content": prompt}] | |
| response = self.ai.client_chat(messages) | |
| if need_answer: | |
| print(response) | |
| return response.lower().find("yes") != -1 | |
| class Relevant_Documents_Agent: | |
| """Determines if documents are relevant to the query.""" | |
| def __init__(self, ai): | |
| self.ai = ai | |
| def get_relevance(self, query, documents, need_answer = False) -> bool: | |
| relevances = [self.__get_doc_relevance(query, doc, need_answer) for doc in documents] | |
| print("\n\n", relevances) | |
| return any(relevances) | |
| def __get_doc_relevance(self, query, document, need_answer=False)->bool: | |
| prompt = f"""Does any portion of the following text or context share the | |
| same or similar topic as the following query, and is the query related | |
| to solar eclipses, numerical data, or astronomy? Answer with "Yes" or "No", and explain why or why not. | |
| Text: {document.page_content} | |
| Query: {query}""" | |
| messages = [{"role": "user", "content": prompt}] | |
| response = self.ai.client_chat(messages) | |
| if need_answer: | |
| print("Relevance prompt:", prompt[250:1250].replace("\n", " <> ")) | |
| print("Relevant response:", response, "\n\n\n") | |
| return response.lower().find("yes") != -1 | |
| class Greeting_Agent: | |
| def __init__(self, ai): | |
| self.ai = ai | |
| def check_greeting(self, query): | |
| prompt = f""" | |
| Would the following statement be regarded as a general | |
| greeting or friendly conversation opener? Answer using 'Yes' or 'No'. | |
| Statement: {query} | |
| """ | |
| messages = [{"role": "user", "content": prompt}] | |
| response = self.ai.client_chat(messages) | |
| return response.lower().find("yes") != -1 | |
| def get_greeting_response(self, query): | |
| messages = [{"role": "user", "content": query}] | |
| response = self.ai.client_chat(messages) | |
| return response | |
| class Head_Agent: | |
| def __init__(self, OPEN_AI_KEY, PINECONE_KEY): | |
| self.ai = API_Interface(OPEN_AI_KEY, PINECONE_KEY, chunk_size=1200) | |
| self.setup_sub_agents() | |
| def setup_sub_agents(self): | |
| self.a_obnoxious = Obnoxious_Agent(self.ai) | |
| self.a_query = Query_Agent(self.ai) | |
| self.a_answering = Answering_Agent(self.ai) | |
| self.a_relevant = Relevant_Documents_Agent(self.ai) | |
| self.a_greeting = Greeting_Agent(self.ai) | |
| def generate_response(self, query, conv_history:list=None): | |
| conv_history = conv_history or [] | |
| if self.a_obnoxious.check_query(query): | |
| return "Please do not ask obnoxious questions." | |
| if self.a_greeting.check_greeting(query): | |
| return self.a_greeting.get_greeting_response(query) | |
| ret = "" | |
| if self.a_answering.check_mode(query): | |
| ret = "I have updated my communication style to better suit your needs!\n\n" | |
| context = "" | |
| for msg in conv_history[-8:-1]: | |
| if msg["role"] != "developer": | |
| context += f"{msg['role']}: {msg['content']}\n" | |
| no_rel = ("No relevant information found. " | |
| "Please refine your query or ask another question pertaining to solar eclipses on Earth.") | |
| documents, scores, tables, tscores = self.a_query.query_vector_store(context) | |
| context_plus = "This is our prior conversation for additional context:\n\n" + context + f"Latest user query: {query}" | |
| docs_is_c_relevant = self.a_relevant.get_relevance(context_plus, documents+tables, need_answer=True) | |
| print(scores, "\n", tscores, "\n\n", context_plus, "\n", "#"*50) | |
| if not docs_is_c_relevant: | |
| return no_rel | |
| print("Using context to generate response.") | |
| r, h = self.a_answering.generate_response(query, documents+tables, conv_history, context) | |
| ret += r | |
| return ret | |