| import os |
| import shutil |
| import gradio as gr |
| import qdrant_client |
| from getpass import getpass |
|
|
|
|
| openai_api_key = os.getenv('OPENAI_API_KEY') |
|
|
| from llama_index.llms.openai import OpenAI |
| from llama_index.embeddings.openai import OpenAIEmbedding |
| from llama_index.core import Settings |
|
|
| Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.4) |
| Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002") |
|
|
| from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext |
| from llama_index.vector_stores.qdrant import QdrantVectorStore |
| from llama_index.core.memory import ChatMemoryBuffer |
|
|
|
|
| chat_engine = None |
| index = None |
| query_engine = None |
| memory = None |
| client = None |
| vector_store = None |
| storage_context = None |
|
|
|
|
| def process_upload(files): |
|
|
| upload_dir = "uploaded_files" |
| if not os.path.exists(upload_dir): |
| os.makedirs(upload_dir) |
| |
| for file_path in files: |
| file_name = os.path.basename(file_path) |
| dest = os.path.join(upload_dir, file_name) |
| if not os.path.exists(dest): |
| shutil.copy(file_path, dest) |
| |
| documents = SimpleDirectoryReader(upload_dir).load_data() |
| |
| global client, vector_store, storage_context, index, query_engine, memory, chat_engine |
| client = qdrant_client.QdrantClient(location=":memory:") |
| |
| vector_store = QdrantVectorStore( |
| collection_name="paper", |
| client=client, |
| enable_hybrid=True, |
| batch_size=20, |
| ) |
| |
| storage_context = StorageContext.from_defaults(vector_store=vector_store) |
| |
| index = VectorStoreIndex.from_documents(documents, storage_context=storage_context) |
| |
| query_engine = index.as_query_engine(vector_store_query_mode="hybrid") |
| |
| memory = ChatMemoryBuffer.from_defaults(token_limit=3000) |
| |
| chat_engine = index.as_chat_engine( |
| chat_mode="context", |
| memory=memory, |
| system_prompt=( |
| "You are an AI assistant who answers the user questions" |
| ), |
| ) |
| |
| return "Documents uploaded and index built successfully!" |
|
|
|
|
| def chat_with_ai(user_input, chat_history): |
| global chat_engine |
| if chat_engine is None: |
| return chat_history, "Please upload documents first." |
| |
| response = chat_engine.chat(user_input) |
| references = response.source_nodes |
| ref, pages = [], [] |
| |
| for node in references: |
| file_name = node.metadata.get('file_name') |
| if file_name and file_name not in ref: |
| ref.append(file_name) |
| |
| complete_response = str(response) + "\n\n" |
| if ref or pages: |
| chat_history.append((user_input, complete_response)) |
| else: |
| chat_history.append((user_input, str(response))) |
| return chat_history, "" |
|
|
|
|
| def clear_history(): |
| return [], "" |
|
|
|
|
| def gradio_interface(): |
| with gr.Blocks() as demo: |
| gr.Markdown("# AI Assistant") |
| |
| with gr.Tab("Upload Documents"): |
| gr.Markdown("Upload PDF, Excel, CSV, DOC/DOCX, or TXT files below:") |
| |
| file_upload = gr.File( |
| label="Upload Files", |
| file_count="multiple", |
| file_types=[".pdf", ".csv", ".txt", ".xlsx", ".xls", ".doc", ".docx"], |
| type="filepath" |
| ) |
| upload_status = gr.Textbox(label="Upload Status", interactive=False) |
| upload_button = gr.Button("Process Upload") |
| |
| upload_button.click(process_upload, inputs=file_upload, outputs=upload_status) |
| |
| with gr.Tab("Chat"): |
| chatbot = gr.Chatbot(label="Chatbot Assistant") |
| user_input = gr.Textbox( |
| placeholder="Ask a question...", label="Enter your question" |
| ) |
| submit_button = gr.Button("Send") |
| btn_clear = gr.Button("Restart") |
| |
| |
| chat_history = gr.State([]) |
| |
| submit_button.click(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input]) |
| user_input.submit(chat_with_ai, inputs=[user_input, chat_history], outputs=[chatbot, user_input]) |
| btn_clear.click(clear_history, outputs=[chatbot, user_input]) |
| |
| return demo |
|
|
| |
| gradio_interface().launch(debug=True) |
|
|