Spaces:
Sleeping
Sleeping
| import os | |
| import openai | |
| import streamlit as st | |
| import logging | |
| # Configure logging | |
| logging.basicConfig(level=logging.DEBUG) | |
| # Retrieve API key from environment variables | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| if not OPENAI_API_KEY: | |
| logging.error("OpenAI API key is missing or invalid.") | |
| st.error("OpenAI API key is missing or invalid.") | |
| st.stop() | |
| # Set OpenAI API key | |
| openai.api_key = OPENAI_API_KEY | |
| def ask_openai(question, chat_log=None, engine="davinci", stop=None): | |
| if chat_log is None: | |
| chat_log = """Human: Hello, who are you?\nAI: I am an AI created by OpenAI. How can I assist you today?\n""" | |
| prompt = f'{chat_log}Human: {question}\nAI:' | |
| try: | |
| response = openai.Completion.create( | |
| prompt=prompt, | |
| engine=engine, | |
| stop=stop if stop else ["\nHuman"], | |
| temperature=0.9, | |
| max_tokens=150, | |
| top_p=1, | |
| best_of=1 | |
| ) | |
| answer = response.choices[0].text.strip() | |
| return answer | |
| except Exception as e: | |
| logging.error(f"OpenAI request failed: {e}") | |
| return None | |
| def get_books_agent(query, conversation_history): | |
| system_message = """You are a library assistant specializing in fetching books. Your role is to: | |
| 1. Retrieve information about available books. | |
| 2. Provide details about the books as requested. | |
| 3. Ensure the user gets the correct book information.""" | |
| chat_log = format_chat_log(conversation_history, system_message) | |
| response = ask_openai(query, chat_log=chat_log) | |
| return handle_response(response, conversation_history) | |
| def remove_intros_from_books_agent(query, conversation_history): | |
| system_message = """You are a text processing assistant specializing in removing introductions from books. Your role is to: | |
| 1. Identify and remove introductions from provided text. | |
| 2. Ensure the main content of the book is preserved.""" | |
| chat_log = format_chat_log(conversation_history, system_message) | |
| response = ask_openai(query, chat_log=chat_log) | |
| return handle_response(response, conversation_history) | |
| def chunking_the_books_agent(query, conversation_history): | |
| system_message = """You are a text processing assistant specializing in chunking books. Your role is to: | |
| 1. Divide books into manageable chunks for easier processing. | |
| 2. Ensure each chunk is coherent and logical.""" | |
| chat_log = format_chat_log(conversation_history, system_message) | |
| response = ask_openai(query, chat_log=chat_log) | |
| return handle_response(response, conversation_history) | |
| def embed_chromadb_agent(query, conversation_history): | |
| system_message = """You are a database assistant specializing in embedding books into ChromaDB. Your role is to: | |
| 1. Embed the book's information into ChromaDB. | |
| 2. Ensure the embedding process is accurate and efficient.""" | |
| chat_log = format_chat_log(conversation_history, system_message) | |
| response = ask_openai(query, chat_log=chat_log) | |
| return handle_response(response, conversation_history) | |
| def query_library_agent(query, conversation_history): | |
| system_message = """You are a library assistant specializing in querying the library database. Your role is to: | |
| 1. Search the library database for requested information. | |
| 2. Provide accurate and relevant information from the library.""" | |
| chat_log = format_chat_log(conversation_history, system_message) | |
| response = ask_openai(query, chat_log=chat_log) | |
| return handle_response(response, conversation_history) | |
| def chat_with_a_library_agent(query, conversation_history): | |
| system_message = """You are a library assistant specializing in chatting with users about the library. Your role is to: | |
| 1. Answer user queries about the library. | |
| 2. Provide helpful and accurate information.""" | |
| chat_log = format_chat_log(conversation_history, system_message) | |
| response = ask_openai(query, chat_log=chat_log) | |
| return handle_response(response, conversation_history) | |
| def handle_response(response, conversation_history): | |
| if response is None: | |
| return conversation_history, "Sorry, I didn't understand that. Can you please rephrase?" | |
| # Append response to conversation history | |
| conversation_history.append({"role": "assistant", "content": response}) | |
| return conversation_history, response | |
| def format_chat_log(conversation_history, system_message): | |
| chat_log = f"AI: {system_message}\n" | |
| for message in conversation_history: | |
| chat_log += f"{message['role'].capitalize()}: {message['content']}\n" | |
| return chat_log | |
| def get_response(user_input, selected_department): | |
| if selected_department == "get_books": | |
| return get_books_agent(user_input, st.session_state.chat_history) | |
| elif selected_department == "remove_intros_from_books": | |
| return remove_intros_from_books_agent(user_input, st.session_state.chat_history) | |
| elif selected_department == "chunking_the_books": | |
| return chunking_the_books_agent(user_input, st.session_state.chat_history) | |
| elif selected_department == "embed_chromadb": | |
| return embed_chromadb_agent(user_input, st.session_state.chat_history) | |
| elif selected_department == "query_library": | |
| return query_library_agent(user_input, st.session_state.chat_history) | |
| elif selected_department == "chat_with_a_library": | |
| return chat_with_a_library_agent(user_input, st.session_state.chat_history) | |
| else: | |
| return None, "Department not recognized." | |
| # Streamlit Interface | |
| def main(): | |
| st.title("Library Assistant Chatbot") | |
| if 'chat_history' not in st.session_state: | |
| st.session_state.chat_history = [] | |
| selected_department = st.selectbox("Select Department", ["get_books", "remove_intros_from_books", "chunking_the_books", "embed_chromadb", "query_library", "chat_with_a_library"]) | |
| user_input = st.text_input("Enter your message:") | |
| if st.button("Send"): | |
| if user_input: | |
| response_type, response_message = get_response(user_input, selected_department) | |
| st.session_state.chat_history.append({"role": "user", "content": user_input}) | |
| st.session_state.chat_history.append({"role": "assistant", "content": response_message}) | |
| st.write("Chat History:") | |
| for message in st.session_state.chat_history: | |
| st.write(f"{message['role'].capitalize()}: {message['content']}") | |
| if __name__ == "__main__": | |
| main() | |