Spaces:
Build error
Build error
| import gradio as gr | |
| import os | |
| import torch | |
| from langchain_community.document_loaders import PyPDFLoader | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import FAISS | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # Configuration | |
| DOCS_DIR = "business_docs" | |
| EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2" | |
| MODEL_NAME = "microsoft/phi-2" | |
| def initialize_system(): | |
| # Document processing | |
| if not os.path.exists(DOCS_DIR): | |
| raise FileNotFoundError(f"Missing {DOCS_DIR} folder") | |
| pdf_files = [os.path.join(DOCS_DIR, f) | |
| for f in os.listdir(DOCS_DIR) | |
| if f.endswith(".pdf")] | |
| text_splitter = RecursiveCharacterTextSplitter( | |
| chunk_size=1000, | |
| chunk_overlap=200 | |
| ) | |
| texts = [] | |
| for pdf in pdf_files: | |
| loader = PyPDFLoader(pdf) | |
| pages = loader.load_and_split(text_splitter) | |
| texts.extend(pages) | |
| # Create embeddings | |
| embeddings = HuggingFaceEmbeddings( | |
| model_name=EMBEDDING_MODEL, | |
| model_kwargs={'device': 'cpu'} | |
| ) | |
| # Vector store | |
| vector_store = FAISS.from_documents(texts, embeddings) | |
| # Load model and tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True) | |
| tokenizer.pad_token = tokenizer.eos_token # Fix padding issue | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| trust_remote_code=True, | |
| torch_dtype=torch.float32 if not torch.cuda.is_available() else torch.float16, | |
| device_map="auto", | |
| low_cpu_mem_usage=True | |
| ) | |
| return vector_store, model, tokenizer | |
| try: | |
| vector_store, model, tokenizer = initialize_system() | |
| print("β System initialized successfully") | |
| if torch.cuda.is_available(): | |
| print("π Using CUDA") | |
| print(f"Memory usage: {torch.cuda.memory_allocated()/1024**3:.2f} GB") | |
| else: | |
| print("π§ Using CPU") | |
| except Exception as e: | |
| print(f"β Initialization failed: {str(e)}") | |
| raise | |
| def generate_response(query): | |
| try: | |
| # Context retrieval | |
| docs = vector_store.similarity_search(query, k=3) | |
| context = "\n".join([d.page_content for d in docs]) | |
| # Prompt template optimized for Phi-2 | |
| prompt = f"""Context: | |
| {context} | |
| Question: {query} | |
| Answer:""" | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| outputs = model.generate( | |
| inputs.input_ids, | |
| max_new_tokens=300, | |
| temperature=0.3, | |
| do_sample=True, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response.split("Answer:")[-1].strip() | |
| except Exception as e: | |
| return "Sorry, an error occurred while generating a response." | |
| # Gradio UI | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# π§ Enterprise Customer Support Chatbot") | |
| chatbot = gr.Chatbot(height=500, label="Conversation") | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder="Ask about our services...", scale=7) | |
| submit_btn = gr.Button("Send", variant="primary", scale=1) | |
| clear = gr.ClearButton([msg, chatbot]) | |
| def respond(message, history): | |
| response = generate_response(message) | |
| history.append((message, response)) | |
| return "", history | |
| submit_btn.click(respond, [msg, chatbot], [msg, chatbot]) | |
| msg.submit(respond, [msg, chatbot], [msg, chatbot]) | |
| demo.launch(server_port=7860) | |