Spaces:
Sleeping
Sleeping
| import os | |
| import pandas as pd | |
| import numpy as np | |
| from sklearn.feature_extraction.text import TfidfVectorizer | |
| from sklearn.metrics.pairwise import cosine_similarity | |
| from transformers import pipeline | |
| import gradio as gr | |
| # Access the Hugging Face token from the environment variable | |
| HF_TOKEN = os.getenv("HF_Token") | |
| from huggingface_hub import login | |
| # Log in with token | |
| login(token=os.getenv("HF_Token")) | |
| # Load and preprocess the data | |
| def preprocess_data(file_path): | |
| """Load and preprocess the CSV data.""" | |
| data = pd.read_csv(file_path) | |
| # Clean column names | |
| data.columns = data.columns.str.strip().str.replace('#', 'Count').str.replace(' ', '_') | |
| # Handle missing values (if any) | |
| data.fillna(0, inplace=True) | |
| return data | |
| # Convert data into a retrievable knowledge base | |
| def create_knowledge_base(data): | |
| """Transform the data into a knowledge base suitable for retrieval.""" | |
| # Combine relevant fields into a single text-based feature for embedding | |
| data['Knowledge_Text'] = data.apply(lambda row: ( | |
| f"Player: {row['Player_Name']}, Position: {row['Main_Possition']}, " | |
| f"Date: {row['Date']}, Session: {row['Session_Name']}, " | |
| f"Played Time: {row['Played_Time_(min)']} minutes, Top Speed: {row['Top_Speed_(km/h)']} km/h, " | |
| f"Distance Covered: {row['Dist._Covered_(m)']} meters, " | |
| f"Intensity: {row['Session_Intensity']}, " | |
| f"RPE: {row['RPE']}, s-RPE: {row['s-RPE']}" | |
| ), axis=1) | |
| return data[['Player_ID', 'Knowledge_Text']] | |
| # Create a similarity-based retrieval function | |
| def query_knowledge_base(knowledge_base, query, vectorizer): | |
| """Query the knowledge base using cosine similarity.""" | |
| query_vec = vectorizer.transform([query]) | |
| knowledge_vec = vectorizer.transform(knowledge_base['Knowledge_Text']) | |
| # Compute cosine similarities | |
| similarities = cosine_similarity(query_vec, knowledge_vec).flatten() | |
| # Retrieve the most relevant rows | |
| top_indices = np.argsort(similarities)[::-1][:5] # Top 5 results | |
| return knowledge_base.iloc[top_indices], similarities[top_indices] | |
| # Main pipeline with LLM integration and prompt engineering | |
| def main_pipeline(file_path, user_query): | |
| """End-to-end pipeline for the RAG system with Llama3.2 and prompt engineering.""" | |
| # Preprocess data | |
| data = preprocess_data(file_path) | |
| knowledge_base = create_knowledge_base(data) | |
| # Create TF-IDF Vectorizer | |
| vectorizer = TfidfVectorizer() | |
| vectorizer.fit(knowledge_base['Knowledge_Text']) | |
| # Query the knowledge base | |
| results, scores = query_knowledge_base(knowledge_base, user_query, vectorizer) | |
| # Format retrieved knowledge for LLM input | |
| retrieved_text = "\n".join(results['Knowledge_Text'].tolist()) | |
| # Use Llama3.2 for question answering with prompt engineering | |
| llm = pipeline("text-generation", model="meta-llama/Llama-3.2-1B-Instruct") | |
| prompt = ( | |
| f"You are an expert sports analyst. Based on the following training data, provide a detailed and insightful answer to the user's question. " | |
| f"Always include relevant numerical data in your response. Limit your response to a maximum of 200 words.\n\n" | |
| f"Training Data:\n{retrieved_text}\n\n" | |
| f"User Question: {user_query}\n\nAnswer:" | |
| ) | |
| response = llm(prompt, max_new_tokens=200, num_return_sequences=1) | |
| # Extract the answer part only | |
| answer = response[0]['generated_text'].split("Answer:", 1)[-1].strip() | |
| return answer | |
| # Gradio interface | |
| def query_interface(file_path, user_query): | |
| try: | |
| result = main_pipeline(file_path.name, user_query) | |
| return result | |
| except Exception as e: | |
| return str(e) | |
| # Launch Gradio app | |
| file_input = gr.File(label="Upload CSV File") | |
| text_input = gr.Textbox(label="Ask a Question", lines=2, placeholder="Enter your query here...") | |
| output = gr.Textbox(label="Answer") | |
| interface = gr.Interface( | |
| fn=query_interface, | |
| inputs=[file_input, text_input], | |
| outputs=output, | |
| title="RAG Training Data Query System", | |
| description="Upload a CSV file containing training data and ask detailed questions about it." | |
| ) | |
| interface.launch() | |