|
|
|
|
|
|
|
|
import json
|
|
|
import glob
|
|
|
import joblib
|
|
|
import os
|
|
|
import torch
|
|
|
import numpy as np
|
|
|
from transformers import AutoModel
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DATA_DIR = "./weather/weather_report"
|
|
|
EMBEDDING_MODEL = "jinaai/jina-embeddings-v3"
|
|
|
TRUNCATE_DIM = 256
|
|
|
BATCH_SIZE = 1500
|
|
|
|
|
|
|
|
|
BOROUGHS = ['formal_report']
|
|
|
BASE_BOROUGH = 'formal_report'
|
|
|
|
|
|
def initialize_model():
|
|
|
"""
|
|
|
Initializes and loads the embedding model, prioritizing GPU usage.
|
|
|
"""
|
|
|
print("Initializing embedding model...")
|
|
|
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
print(f"Using device: {device}")
|
|
|
|
|
|
model = AutoModel.from_pretrained(
|
|
|
EMBEDDING_MODEL,
|
|
|
trust_remote_code=True
|
|
|
).to(device)
|
|
|
|
|
|
print("Model loaded successfully.")
|
|
|
return model
|
|
|
|
|
|
def process_dynamic_data_for_borough(model, borough):
|
|
|
"""
|
|
|
Processes all dynamic forecast JSON files for a specific city,
|
|
|
generating and saving their embeddings.
|
|
|
"""
|
|
|
print(f"\n--- Starting processing for city: {borough} ---")
|
|
|
borough_path = os.path.join(DATA_DIR, borough)
|
|
|
json_files = glob.glob(os.path.join(borough_path, "wm_messages_??.json"))
|
|
|
|
|
|
if not json_files:
|
|
|
print(f"No dynamic forecast JSON files found for {borough}. Skipping.")
|
|
|
return
|
|
|
|
|
|
for file_path in json_files:
|
|
|
print(f"\nProcessing file: {file_path}")
|
|
|
with open(file_path, "r") as f:
|
|
|
data = json.load(f)
|
|
|
|
|
|
timestamps = list(data.keys())
|
|
|
if not timestamps:
|
|
|
print(f"File {file_path} is empty. Skipping.")
|
|
|
continue
|
|
|
print(f"Loaded {len(data)} records from {file_path}")
|
|
|
|
|
|
emb_dict = {}
|
|
|
num_batches = (len(timestamps) + BATCH_SIZE - 1) // BATCH_SIZE
|
|
|
for i in range(0, len(timestamps), BATCH_SIZE):
|
|
|
batch_timestamps = timestamps[i:i + BATCH_SIZE]
|
|
|
print(f" Processing batch {i // BATCH_SIZE + 1}/{num_batches}")
|
|
|
|
|
|
batch_texts, len_list = [], [0]
|
|
|
for ts in batch_timestamps:
|
|
|
|
|
|
texts = [f'{borough.replace("-", " ").title()}: {text}' for text in data[ts].values()]
|
|
|
batch_texts.extend(texts)
|
|
|
len_list.append(len(batch_texts))
|
|
|
|
|
|
embeddings = model.encode(batch_texts, truncate_dim=TRUNCATE_DIM)
|
|
|
|
|
|
for j, ts in enumerate(batch_timestamps):
|
|
|
start_idx, end_idx = len_list[j], len_list[j+1]
|
|
|
emb_dict[ts] = embeddings[start_idx:end_idx, :]
|
|
|
|
|
|
output_file_name = os.path.basename(file_path).replace("forecast", "embeddings").replace(".json", ".pkl")
|
|
|
output_path = os.path.join(borough_path, output_file_name)
|
|
|
|
|
|
with open(output_path, "wb") as f:
|
|
|
joblib.dump(emb_dict, f)
|
|
|
print(f"Saved city-specific embeddings to {output_path}")
|
|
|
|
|
|
def merge_borough_embeddings():
|
|
|
"""
|
|
|
Merges the embedding files from all specified cities.
|
|
|
"""
|
|
|
print("\n--- Starting merging of city embeddings ---")
|
|
|
base_path = os.path.join(DATA_DIR, 'weather', BASE_BOROUGH)
|
|
|
base_embedding_files = glob.glob(os.path.join(base_path, "fast_general_*_embeddings_*.pkl"))
|
|
|
|
|
|
if not base_embedding_files:
|
|
|
print("No base embedding files found to merge. Skipping.")
|
|
|
return
|
|
|
|
|
|
for base_file in base_embedding_files:
|
|
|
print(f"\nMerging based on: {base_file}")
|
|
|
|
|
|
|
|
|
data_files = {}
|
|
|
try:
|
|
|
for borough in BOROUGHS:
|
|
|
target_file = base_file.replace(BASE_BOROUGH, borough)
|
|
|
with open(target_file, "rb") as f:
|
|
|
data_files[borough] = joblib.load(f)
|
|
|
except FileNotFoundError as e:
|
|
|
print(f"Could not find a corresponding file for {e.filename}. Skipping this year.")
|
|
|
continue
|
|
|
|
|
|
merged_embeddings = {}
|
|
|
|
|
|
for timestamp in data_files[BASE_BOROUGH].keys():
|
|
|
embeddings_to_merge = []
|
|
|
|
|
|
if all(timestamp in data_files[b] for b in BOROUGHS):
|
|
|
for borough in BOROUGHS:
|
|
|
embeddings_to_merge.append(data_files[borough][timestamp])
|
|
|
|
|
|
|
|
|
merged_embeddings[timestamp] = np.concatenate(embeddings_to_merge, axis=0)
|
|
|
|
|
|
output_filename = os.path.basename(base_file)
|
|
|
final_output_path = os.path.join(DATA_DIR, 'weather', output_filename)
|
|
|
joblib.dump(merged_embeddings, final_output_path)
|
|
|
print(f"Saved final merged embeddings to {final_output_path}")
|
|
|
|
|
|
def main():
|
|
|
"""
|
|
|
Main execution function
|
|
|
"""
|
|
|
model = initialize_model()
|
|
|
|
|
|
|
|
|
for borough in BOROUGHS:
|
|
|
process_dynamic_data_for_borough(model, borough)
|
|
|
|
|
|
|
|
|
merge_borough_embeddings()
|
|
|
|
|
|
print("\nAll processing complete.")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
main() |