Datasets:
File size: 4,972 Bytes
c2786b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import os
import glob
import json
import joblib
import torch
from transformers import AutoModel
from tqdm import tqdm
os.environ['HTTP_PROXY'] = 'http://localhost:1080'
os.environ['HTTPS_PROXY'] = 'http://localhost:1080'
# --- Configuration Section ---
# Model settings
MODEL_NAME = "jinaai/jina-embeddings-v3"
TRUST_REMOTE_CODE = True
TRUNCATE_DIM = 256
# Data processing settings
DATA_ROOT_DIR = "./data/California_ISO/weather"
FILE_PATTERN = os.path.join(DATA_ROOT_DIR, "*", "fast_general_*.json")
BATCH_SIZE = 5000 # Number of texts processed in single batch (adjust based on GPU memory)
def load_embedding_model(model_name, device):
"""
Load text embedding model and move to specified device.
"""
print(f"Loading model '{model_name}' to device '{device}'...")
try:
model = AutoModel.from_pretrained(
model_name,
trust_remote_code=TRUST_REMOTE_CODE
).to(device)
model.eval() # Set to evaluation mode
print("Model loaded successfully.")
return model
except Exception as e:
print(f"Error: Failed to load model. {e}")
print("Please ensure 'transformers' and 'torch' libraries are installed, and internet connection is working.")
exit(1)
def process_and_embed_files(model, file_list, batch_size, truncate_dim):
"""
Process list of files: generate embeddings for texts in each file and save results.
"""
if not file_list:
print("No matching files found. Please check FILE_PATTERN and DATA_ROOT_DIR settings.")
return
print(f"\nFound {len(file_list)} files. Starting processing...")
# Progress bar for file processing
for file_path in tqdm(file_list, desc="Overall progress"):
try:
with open(file_path, "r") as f:
data = json.load(f)
except (json.JSONDecodeError, FileNotFoundError) as e:
print(f"\nWarning: Skipping unreadable file {file_path}: {e}")
continue
timestamps = list(data.keys())
if not timestamps:
print(f"\nWarning: Skipping empty file {file_path}")
continue
# Extract company/source name from file path for context
context_prefix = os.path.basename(os.path.dirname(file_path))
print(context_prefix)
emb_dict = {}
# Batch processing for timestamps within a file
for i in tqdm(range(0, len(timestamps), batch_size), desc=f"Processing {os.path.basename(file_path)}", leave=False):
batch_timestamps = timestamps[i : i + batch_size]
# Aggregate all texts in current batch
all_texts_in_batch = []
# len_list tracks text boundaries for later embedding separation
len_list = [0]
for timestamp in batch_timestamps:
if isinstance(data.get(timestamp), dict):
daily_texts = list(data[timestamp].values())
all_texts_in_batch.extend(daily_texts)
len_list.append(len(all_texts_in_batch))
if not all_texts_in_batch:
continue
# Add contextual prefix to each text
prefixed_texts = [f'{context_prefix}: {text}' for text in all_texts_in_batch]
# Generate embeddings with model
with torch.no_grad():
embeddings = model.encode(
prefixed_texts,
truncate_dim=truncate_dim
)
# Split aggregated embeddings back to timestamp-specific chunks
for j, timestamp in enumerate(batch_timestamps):
start_index = len_list[j]
end_index = len_list[j + 1]
emb_dict[timestamp] = embeddings[start_index:end_index, :]
# Save generated embeddings
output_dir = os.path.dirname(file_path)
base_name = os.path.basename(file_path)
output_filename = base_name.replace("forecast", "embeddings").replace(".json", ".pkl")
output_path = os.path.join(output_dir, output_filename)
with open(output_path, "wb") as f:
joblib.dump(emb_dict, f)
def main():
"""
Main execution function
"""
# 1. Configure processing device
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using compute device: {device}")
# 2. Load embedding model
model = load_embedding_model(MODEL_NAME, device)
# 3. Find all matching files
print(f"Searching for files matching '{FILE_PATTERN}'...")
files_to_process = sorted(glob.glob(FILE_PATTERN))
# 4. Execute embedding pipeline
process_and_embed_files(model, files_to_process, BATCH_SIZE, TRUNCATE_DIM)
print("\nAll files processed successfully!")
if __name__ == "__main__":
main() |