Spaces:
Running
Running
File size: 17,475 Bytes
68997ff e8aaf11 68997ff 3f6bcbe 68997ff 3f8e971 3f6bcbe 68997ff e8aaf11 1804ce0 256331a 3f6bcbe e81d67d 80e6314 1804ce0 8ef4162 68997ff e8aaf11 68997ff e8aaf11 68997ff 3f6bcbe e8aaf11 68997ff e8aaf11 68997ff e8aaf11 68997ff 3f6bcbe 68997ff 3f6bcbe e8aaf11 3f6bcbe e8aaf11 3f6bcbe a55b18e 68997ff e8aaf11 68997ff a55b18e 68997ff e8aaf11 68997ff 3f6bcbe a55b18e e8aaf11 a55b18e b67f5d5 a55b18e 68997ff a55b18e 68997ff a55b18e 68997ff a55b18e 68997ff a55b18e 68997ff a55b18e 68997ff a55b18e 3f6bcbe e8aaf11 3f6bcbe a55b18e 7db5512 3f6bcbe a55b18e 7db5512 a55b18e 7db5512 a55b18e 7db5512 a55b18e 7db5512 a55b18e 7db5512 a55b18e 7db5512 a55b18e 7db5512 a55b18e db04008 3f6bcbe a55b18e 3f6bcbe 847ac0b 3f6bcbe 2762e86 a29ef8b 3f6bcbe ec0b4e0 3f6bcbe a29ef8b 68997ff 2762e86 68997ff a29ef8b 68997ff a29ef8b 68997ff ec0b4e0 68997ff 256331a e81d67d 68997ff ca2ef97 68997ff 3f6bcbe e81d67d 256331a 68997ff e81d67d 68997ff e81d67d 256331a 68997ff 256331a ca2ef97 256331a ca2ef97 2762e86 e81d67d 2762e86 ffc022a 2762e86 ca2ef97 2762e86 e81d67d 2762e86 e81d67d ca2ef97 2762e86 ca2ef97 256331a 2762e86 256331a e81d67d 68997ff 256331a e81d67d 68997ff 9949f77 68997ff 9949f77 68997ff 9949f77 68997ff 9949f77 68997ff 9949f77 68997ff 9949f77 68997ff 9949f77 3f6bcbe 68997ff 3f6bcbe 9949f77 3f6bcbe 9949f77 3f6bcbe 68997ff 9949f77 68997ff 9949f77 68997ff 9949f77 68997ff 3f6bcbe 68997ff 3f6bcbe 68997ff 9949f77 3f6bcbe 9949f77 3f6bcbe 9949f77 3f6bcbe 9949f77 3f6bcbe 8ef4162 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 |
"""
Module for managing dataset on Hugging Face Hub
"""
import os
import json
import tempfile
from typing import Tuple, List, Dict, Any, Optional, Union
from datetime import datetime
from huggingface_hub import HfApi, HfFolder
from langchain_community.vectorstores import FAISS
from config.settings import (
VECTOR_STORE_PATH,
HF_TOKEN,
EMBEDDING_MODEL,
DATASET_ID,
CHAT_HISTORY_PATH,
DATASET_CHAT_HISTORY_PATH,
DATASET_VECTOR_STORE_PATH,
DATASET_FINE_TUNED_PATH,
DATASET_ANNOTATIONS_PATH
)
from langchain_huggingface import HuggingFaceEmbeddings
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DatasetManager:
def __init__(self, dataset_name: Optional[str] = None, token: Optional[str] = None):
self.dataset_name = dataset_name or DATASET_ID
self.token = token if token else HF_TOKEN
self.api = HfApi(token=self.token)
# Use paths from settings
self.vector_store_path = DATASET_VECTOR_STORE_PATH
self.chat_history_path = DATASET_CHAT_HISTORY_PATH
self.fine_tuned_path = DATASET_FINE_TUNED_PATH
self.annotations_path = DATASET_ANNOTATIONS_PATH
def init_dataset_structure(self) -> Tuple[bool, str]:
"""
Initialize dataset structure with required directories
Returns:
(success, message)
"""
try:
# Check if repository exists
try:
self.api.repo_info(repo_id=self.dataset_name, repo_type="dataset")
except Exception:
# Create repository if it doesn't exist
self.api.create_repo(repo_id=self.dataset_name, repo_type="dataset", private=True)
# Create empty .gitkeep files to maintain structure
directories = ["vector_store", "chat_history", "documents"]
for directory in directories:
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp_path = temp.name
try:
self.api.upload_file(
path_or_fileobj=temp_path,
path_in_repo=f"{directory}/.gitkeep",
repo_id=self.dataset_name,
repo_type="dataset"
)
finally:
if os.path.exists(temp_path):
os.remove(temp_path)
return True, "Dataset structure initialized successfully"
except Exception as e:
return False, f"Error initializing dataset structure: {str(e)}"
def upload_vector_store(self, vector_store: FAISS) -> Tuple[bool, str]:
"""
Upload vector store to dataset
Args:
vector_store: FAISS vector store to upload
Returns:
(success, message)
"""
try:
with tempfile.TemporaryDirectory() as temp_dir:
# Save vector store to temporary directory
vector_store.save_local(folder_path=temp_dir)
index_path = os.path.join(temp_dir, "index.faiss")
config_path = os.path.join(temp_dir, "index.pkl")
# Add debug logging
print(f"Debug - Checking files before upload:")
print(f"index.faiss exists: {os.path.exists(index_path)}, size: {os.path.getsize(index_path) if os.path.exists(index_path) else 0} bytes")
print(f"index.pkl exists: {os.path.exists(config_path)}, size: {os.path.getsize(config_path) if os.path.exists(config_path) else 0} bytes")
if not os.path.exists(index_path) or not os.path.exists(config_path):
return False, "Vector store files not created"
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# First save old files to archive if they exist
try:
# Check for existing files
self.api.hf_hub_download(
repo_id=self.dataset_name,
filename="vector_store/index.faiss",
repo_type="dataset"
)
# If file exists, create archive copy
self.api.upload_file(
path_or_fileobj=index_path,
path_in_repo=f"vector_store/archive/index_{timestamp}.faiss",
repo_id=self.dataset_name,
repo_type="dataset"
)
self.api.upload_file(
path_or_fileobj=config_path,
path_in_repo=f"vector_store/archive/index_{timestamp}.pkl",
repo_id=self.dataset_name,
repo_type="dataset"
)
except Exception:
# If no files exist, create archive directory
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp_path = temp.name
try:
self.api.upload_file(
path_or_fileobj=temp_path,
path_in_repo="vector_store/archive/.gitkeep",
repo_id=self.dataset_name,
repo_type="dataset"
)
finally:
if os.path.exists(temp_path):
os.remove(temp_path)
# Upload current files
self.api.upload_file(
path_or_fileobj=index_path,
path_in_repo="vector_store/index.faiss",
repo_id=self.dataset_name,
repo_type="dataset"
)
self.api.upload_file(
path_or_fileobj=config_path,
path_in_repo="vector_store/index.pkl",
repo_id=self.dataset_name,
repo_type="dataset"
)
# Update metadata about last update
metadata = {
"last_update": timestamp,
"version": "1.0"
}
with tempfile.NamedTemporaryFile(mode="w+", suffix=".json", delete=False) as temp:
json.dump(metadata, temp, ensure_ascii=False, indent=2)
temp_name = temp.name
try:
self.api.upload_file(
path_or_fileobj=temp_name,
path_in_repo="vector_store/metadata.json",
repo_id=self.dataset_name,
repo_type="dataset"
)
finally:
if os.path.exists(temp_name):
os.remove(temp_name)
return True, "Vector store uploaded successfully"
except Exception as e:
return False, f"Error uploading vector store: {str(e)}"
def download_vector_store(self) -> Tuple[bool, Union[FAISS, str]]:
"""Download vector store from dataset"""
try:
with tempfile.TemporaryDirectory() as temp_dir:
print(f"Downloading to temporary directory: {temp_dir}")
# Download files to temporary directory
try:
index_path = self.api.hf_hub_download(
repo_id=self.dataset_name,
filename="vector_store/index.faiss",
repo_type="dataset",
local_dir=temp_dir
)
print(f"Downloaded index.faiss to: {index_path}")
config_path = self.api.hf_hub_download(
repo_id=self.dataset_name,
filename="vector_store/index.pkl",
repo_type="dataset",
local_dir=temp_dir
)
print(f"Downloaded index.pkl to: {config_path}")
# Verify files exist
if not os.path.exists(index_path) or not os.path.exists(config_path):
return False, f"Downloaded files not found at {temp_dir}"
# Load vector store from temporary directory
embeddings = HuggingFaceEmbeddings(
model_name=EMBEDDING_MODEL,
model_kwargs={'device': 'cpu'}
)
# Use the directory containing the files
store_dir = os.path.dirname(index_path)
print(f"Loading vector store from: {store_dir}")
vector_store = FAISS.load_local(
store_dir,
embeddings,
allow_dangerous_deserialization=True
)
return True, vector_store
except Exception as e:
return False, f"Failed to download vector store: {str(e)}"
except Exception as e:
return False, f"Error downloading vector store: {str(e)}"
def save_chat_history(self, conversation_id: str, messages: List[Dict[str, str]]) -> Tuple[bool, str]:
try:
timestamp = datetime.now().isoformat()
filename = f"{self.chat_history_path}/{conversation_id}_{datetime.now().strftime('%Y%m%d-%H%M%S')}.json"
chat_data = {
"conversation_id": conversation_id,
"timestamp": timestamp,
"history": messages # Changed from 'messages' to 'history'
}
if not self._validate_chat_structure(chat_data):
return False, "Invalid chat history structure"
with tempfile.NamedTemporaryFile(mode="w+", suffix=".json", delete=False, encoding="utf-8") as temp:
json.dump(chat_data, temp, ensure_ascii=False, indent=2)
temp.flush()
return True, "Chat history saved successfully"
except Exception as e:
return False, f"Error saving chat history: {str(e)}"
def _validate_chat_structure(self, chat_data: Dict) -> bool:
required_fields = {"conversation_id", "timestamp", "history"}
if not all(field in chat_data for field in required_fields):
return False
if not isinstance(chat_data["history"], list):
return False
for message in chat_data["history"]:
if not all(field in message for field in ["role", "content", "timestamp"]):
return False
return True
def get_chat_history(self, conversation_id: Optional[str] = None) -> Tuple[bool, Any]:
try:
logger.info(f"Attempting to get chat history from dataset {self.dataset_name}")
# Get all files from repository
files = self.api.list_repo_files(
repo_id=self.dataset_name,
repo_type="dataset"
)
# Filter only files from chat_history directory using settings
chat_files = [f for f in files if f.startswith(f"{CHAT_HISTORY_PATH}/")]
logger.info(f"Found {len(chat_files)} files in {CHAT_HISTORY_PATH}")
if conversation_id:
chat_files = [f for f in chat_files if conversation_id in f]
if not chat_files:
logger.warning("No chat history files found")
return True, []
chat_histories = []
with tempfile.TemporaryDirectory() as temp_dir:
for file in chat_files:
if file.endswith(".gitkeep"):
continue
try:
local_file = self.api.hf_hub_download(
repo_id=self.dataset_name,
filename=file,
repo_type="dataset",
local_dir=temp_dir
)
with open(local_file, "r", encoding="utf-8") as f:
chat_data = json.load(f)
logger.debug(f"Loaded chat data: {chat_data}") # Debug log
if not isinstance(chat_data, dict):
logger.error(f"Chat data is not a dictionary in {file}")
continue
# Get messages from either 'messages' or 'history' key
messages = None
if "messages" in chat_data:
messages = chat_data["messages"]
elif "history" in chat_data:
messages = chat_data["history"]
if not messages:
logger.error(f"No messages found in {file}")
continue
if not isinstance(messages, list):
logger.error(f"Messages is not a list in {file}")
continue
# Create standardized format
standardized_data = {
"conversation_id": chat_data.get("conversation_id", "unknown"),
"timestamp": chat_data.get("timestamp", datetime.now().isoformat()),
"messages": messages
}
chat_histories.append(standardized_data)
logger.info(f"Successfully loaded chat data from {file}")
except json.JSONDecodeError as e:
logger.error(f"Invalid JSON in file {file}: {str(e)}")
continue
except Exception as e:
logger.error(f"Error processing file {file}: {e}")
continue
if not chat_histories:
logger.warning("No valid chat histories found")
else:
logger.info(f"Successfully loaded {len(chat_histories)} chat histories")
return True, chat_histories
except Exception as e:
logger.error(f"Error getting chat history: {str(e)}")
return False, str(e)
def upload_document(self, file_path: str, document_id: Optional[str] = None) -> Tuple[bool, str]:
"""
Upload document to the dataset
Args:
file_path: Path to the document file
document_id: Document identifier (if None, uses filename)
Returns:
(success, message)
"""
try:
if not os.path.exists(file_path):
return False, f"File not found: {file_path}"
# Use filename as document_id if not specified
if document_id is None:
document_id = os.path.basename(file_path)
# Add timestamp to filename
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"documents/{document_id}_{timestamp}{os.path.splitext(file_path)[1]}"
# Upload file
self.api.upload_file(
path_or_fileobj=file_path,
path_in_repo=filename,
repo_id=self.dataset_name,
repo_type="dataset"
)
return True, f"Document uploaded successfully: {filename}"
except Exception as e:
return False, f"Error uploading document: {str(e)}"
def test_dataset_connection(token: Optional[str] = None) -> Tuple[bool, str]:
"""
Test function to check dataset connection
Args:
token: Hugging Face Hub access token
Returns:
(success, message)
"""
try:
manager = DatasetManager(token=token)
success, message = manager.init_dataset_structure()
if not success:
return False, message
print(f"Initialization test: {message}")
return True, "Dataset connection is working"
except Exception as e:
return False, f"Dataset connection error: {str(e)}"
if __name__ == "__main__":
# Test connection
success, message = test_dataset_connection()
print(message)
|