|
|
import os
|
|
|
import glob
|
|
|
import json
|
|
|
import joblib
|
|
|
import torch
|
|
|
from transformers import AutoModel
|
|
|
from tqdm import tqdm
|
|
|
|
|
|
os.environ['HTTP_PROXY'] = 'http://localhost:1080'
|
|
|
os.environ['HTTPS_PROXY'] = 'http://localhost:1080'
|
|
|
|
|
|
|
|
|
|
|
|
MODEL_NAME = "jinaai/jina-embeddings-v3"
|
|
|
TRUST_REMOTE_CODE = True
|
|
|
TRUNCATE_DIM = 256
|
|
|
|
|
|
|
|
|
DATA_ROOT_DIR = "./data/California_ISO/weather"
|
|
|
FILE_PATTERN = os.path.join(DATA_ROOT_DIR, "*", "fast_general_*.json")
|
|
|
BATCH_SIZE = 5000
|
|
|
|
|
|
def load_embedding_model(model_name, device):
|
|
|
"""
|
|
|
Load text embedding model and move to specified device.
|
|
|
"""
|
|
|
print(f"Loading model '{model_name}' to device '{device}'...")
|
|
|
try:
|
|
|
model = AutoModel.from_pretrained(
|
|
|
model_name,
|
|
|
trust_remote_code=TRUST_REMOTE_CODE
|
|
|
).to(device)
|
|
|
model.eval()
|
|
|
print("Model loaded successfully.")
|
|
|
return model
|
|
|
except Exception as e:
|
|
|
print(f"Error: Failed to load model. {e}")
|
|
|
print("Please ensure 'transformers' and 'torch' libraries are installed, and internet connection is working.")
|
|
|
exit(1)
|
|
|
|
|
|
def process_and_embed_files(model, file_list, batch_size, truncate_dim):
|
|
|
"""
|
|
|
Process list of files: generate embeddings for texts in each file and save results.
|
|
|
"""
|
|
|
if not file_list:
|
|
|
print("No matching files found. Please check FILE_PATTERN and DATA_ROOT_DIR settings.")
|
|
|
return
|
|
|
|
|
|
print(f"\nFound {len(file_list)} files. Starting processing...")
|
|
|
|
|
|
|
|
|
for file_path in tqdm(file_list, desc="Overall progress"):
|
|
|
try:
|
|
|
with open(file_path, "r") as f:
|
|
|
data = json.load(f)
|
|
|
except (json.JSONDecodeError, FileNotFoundError) as e:
|
|
|
print(f"\nWarning: Skipping unreadable file {file_path}: {e}")
|
|
|
continue
|
|
|
|
|
|
timestamps = list(data.keys())
|
|
|
if not timestamps:
|
|
|
print(f"\nWarning: Skipping empty file {file_path}")
|
|
|
continue
|
|
|
|
|
|
|
|
|
context_prefix = os.path.basename(os.path.dirname(file_path))
|
|
|
print(context_prefix)
|
|
|
|
|
|
emb_dict = {}
|
|
|
|
|
|
for i in tqdm(range(0, len(timestamps), batch_size), desc=f"Processing {os.path.basename(file_path)}", leave=False):
|
|
|
batch_timestamps = timestamps[i : i + batch_size]
|
|
|
|
|
|
|
|
|
all_texts_in_batch = []
|
|
|
|
|
|
len_list = [0]
|
|
|
for timestamp in batch_timestamps:
|
|
|
if isinstance(data.get(timestamp), dict):
|
|
|
daily_texts = list(data[timestamp].values())
|
|
|
all_texts_in_batch.extend(daily_texts)
|
|
|
len_list.append(len(all_texts_in_batch))
|
|
|
|
|
|
if not all_texts_in_batch:
|
|
|
continue
|
|
|
|
|
|
|
|
|
prefixed_texts = [f'{context_prefix}: {text}' for text in all_texts_in_batch]
|
|
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
embeddings = model.encode(
|
|
|
prefixed_texts,
|
|
|
truncate_dim=truncate_dim
|
|
|
)
|
|
|
|
|
|
|
|
|
for j, timestamp in enumerate(batch_timestamps):
|
|
|
start_index = len_list[j]
|
|
|
end_index = len_list[j + 1]
|
|
|
emb_dict[timestamp] = embeddings[start_index:end_index, :]
|
|
|
|
|
|
|
|
|
output_dir = os.path.dirname(file_path)
|
|
|
base_name = os.path.basename(file_path)
|
|
|
output_filename = base_name.replace("forecast", "embeddings").replace(".json", ".pkl")
|
|
|
output_path = os.path.join(output_dir, output_filename)
|
|
|
|
|
|
with open(output_path, "wb") as f:
|
|
|
joblib.dump(emb_dict, f)
|
|
|
|
|
|
def main():
|
|
|
"""
|
|
|
Main execution function
|
|
|
"""
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
print(f"Using compute device: {device}")
|
|
|
|
|
|
|
|
|
model = load_embedding_model(MODEL_NAME, device)
|
|
|
|
|
|
|
|
|
print(f"Searching for files matching '{FILE_PATTERN}'...")
|
|
|
files_to_process = sorted(glob.glob(FILE_PATTERN))
|
|
|
|
|
|
|
|
|
process_and_embed_files(model, files_to_process, BATCH_SIZE, TRUNCATE_DIM)
|
|
|
|
|
|
print("\nAll files processed successfully!")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main() |