Spaces:
Runtime error
Runtime error
| import pandas as pd | |
| import torch | |
| from sentence_transformers import SentenceTransformer, util | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| import gradio as gr | |
| import json | |
| import faiss | |
| import numpy as np | |
| import spaces | |
| # Ensure you have GPU support | |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
| # Load the CSV file with embeddings | |
| df = pd.read_csv('RBDx10kstats.csv') | |
| df['embedding'] = df['embedding'].apply(json.loads) # Convert JSON string back to list | |
| # Convert embeddings to a numpy array | |
| embeddings = np.array(df['embedding'].tolist(), dtype='float32') | |
| # Setup FAISS | |
| index = faiss.IndexFlatL2(embeddings.shape[1]) # dimension should match the embedding size | |
| index.add(embeddings) | |
| # Load the Sentence Transformer model | |
| sentence_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2', device=device) | |
| # Load the LLaMA model for response generation | |
| llama_tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") | |
| llama_model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2").to(device) | |
| # Load the summarization model | |
| summarizer = pipeline("summarization", model="facebook/bart-large-cnn", device=0 if device == 'cuda' else -1) | |
| # Define the function to find the most relevant document using FAISS | |
| def retrieve_relevant_doc(query): | |
| query_embedding = sentence_model.encode(query, convert_to_tensor=False) | |
| _, indices = index.search(np.array([query_embedding]), k=1) | |
| best_match_idx = indices[0][0] | |
| return df.iloc[best_match_idx]['Abstract'] | |
| # Define the function to generate a response | |
| def generate_response(query): | |
| relevant_doc = retrieve_relevant_doc(query) | |
| if len(relevant_doc) > 512: # Truncate long documents | |
| relevant_doc = summarizer(relevant_doc, max_length=4096, min_length=50, do_sample=False)[0]['summary_text'] | |
| input_text = f"Document: {relevant_doc}\n\nQuestion: {query}\n\nAnswer:" | |
| inputs = llama_tokenizer(input_text, return_tensors="pt").to(device) | |
| # Set pad_token_id to eos_token_id to avoid the warning | |
| pad_token_id = llama_tokenizer.eos_token_id | |
| outputs = llama_model.generate( | |
| inputs["input_ids"], | |
| attention_mask=inputs["attention_mask"], | |
| max_length=512, | |
| pad_token_id=pad_token_id | |
| ) | |
| response = llama_tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response | |
| # Create a Gradio interface | |
| iface = gr.Interface( | |
| fn=generate_response, | |
| inputs=gr.Textbox(lines=2, placeholder="Enter your query here..."), | |
| outputs="text", | |
| title="RAG Chatbot", | |
| description="This chatbot retrieves relevant documents based on your query and generates responses using LLaMA." | |
| ) | |
| # Launch the Gradio interface | |
| iface.launch() |