Spaces:
Sleeping
Sleeping
| import os | |
| import openai | |
| """ | |
| A module to manage responses from the OpenAI Response API for an IT Helpdesk assistant | |
| at Harvey Mudd College. This module initializes the OpenAI client and provides a method | |
| to create responses using RAG (Retrieval-Augmented Generation) to user queries. It uses | |
| a vector store for retrieval of knowledge base documents and generates responses using | |
| the specified OpenAI model. The module also loads a developer message from a text file | |
| to prompt engineer responses from the AI model. | |
| """ | |
| # Load the OpenAI API key from the environment variable | |
| # If the API key is not set, raise an error. | |
| if "OPENAI_API_KEY" not in os.environ: | |
| raise ValueError("OPENAI_API_KEY environment variable is not set.") | |
| api_key=os.getenv("OPENAI_API_KEY") | |
| class ResponseManager: | |
| """ | |
| A class to manage responses from the OpenAI API for an IT Helpdesk assistant. | |
| This class initializes the OpenAI client and provides a method to create responses | |
| to user queries using the specified OpenAI model. | |
| """ | |
| def __init__(self, vector_store_id): | |
| """ | |
| Initialize the ResponseManager with a vector store ID. | |
| :param vector_store_id: The ID of the vector store to use for file search. | |
| """ | |
| # Initialize the OpenAI client | |
| # Note: The OpenAI client is initialized with the API key set in the environment variable | |
| # This is a placeholder for the actual OpenAI client initialization | |
| # In a real-world scenario, you would use the appropriate OpenAI client library | |
| # For example, if using the OpenAI Python library, you would do: | |
| self.client = openai.OpenAI(api_key=api_key) | |
| self.vector_store_id = vector_store_id | |
| self.previous_response_id = None | |
| # Load the meta prompt from the text file | |
| # This message is used to provide context for the AI model | |
| meta_prompt_file = 'config/meta_prompt.txt' | |
| if not os.path.exists(meta_prompt_file): | |
| raise FileNotFoundError(f"Meta prompt file '{meta_prompt_file}' not found.") | |
| with open(meta_prompt_file, 'r') as file: | |
| self.meta_prompt = file.read().strip() | |
| def create_response(self, query, model: str= "gpt-4o-mini", | |
| temperature=0, max_output_tokens=800, | |
| max_num_results=7): | |
| """ | |
| Create a response to a user query using the OpenAI API. | |
| :param query: The user query to respond to. | |
| :param model: The OpenAI model to use (default is "gpt-4o-mini"). | |
| :param temperature: The temperature for the response (default is 0). | |
| :param max_output_tokens: The maximum number of output tokens (default is 800). | |
| :param max_num_results: The maximum number of search results to return (default is 7). | |
| :param verbose: Whether to print the response (default is False). | |
| :return: The response text from the OpenAI API. | |
| """ | |
| if self.previous_response_id is None: | |
| input=[{"role": "developer", "content": self.meta_prompt}, | |
| {"role": "user", "content": query}] | |
| else: | |
| input=[{"role": "user", "content": query}] | |
| response = self.client.responses.create( | |
| model=model, | |
| previous_response_id=self.previous_response_id, | |
| input=input, | |
| tools=[{ | |
| "type": "file_search", | |
| "vector_store_ids": [self.vector_store_id], # ["<vector_store_id>"] | |
| "max_num_results": max_num_results} | |
| ], | |
| temperature=temperature, | |
| max_output_tokens = max_output_tokens, | |
| # include=["output[*].file_search_call.search_results"] | |
| ) | |
| self.previous_response_id = response.id | |
| return response.output_text |