Datasets:
File size: 6,066 Bytes
f634eb1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
#!/usr/bin/env python3
import json
import glob
import joblib
import os
import torch
import numpy as np # Required for array operations
from transformers import AutoModel
# Set proxy (replace with your proxy address and port)
# os.environ['HTTP_PROXY'] = 'http://localhost:1080'
# os.environ['HTTPS_PROXY'] = 'http://localhost:1080'
# --- Configuration ---
# Assuming your data directory structure is as follows:
# ./data/
# ├── san-francisco/
# │ ├── fast_general_..._forecast_2017.json
# │ └── ...
# ├── san-diego/
# │ ├── fast_general_..._forecast_2017.json
# │ └── ...
# └── id_info_imputed.json
DATA_DIR = "./weather/weather_report" # Your main data directory
EMBEDDING_MODEL = "jinaai/jina-embeddings-v3"
TRUNCATE_DIM = 256
BATCH_SIZE = 1500 # Adjust based on your hardware
# --- New Configuration: Define the cities ---
BOROUGHS = ['formal_report']
BASE_BOROUGH = 'formal_report' # We will use this city's files as the baseline for finding corresponding files
def initialize_model():
"""
Initializes and loads the embedding model, prioritizing GPU usage.
"""
print("Initializing embedding model...")
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
model = AutoModel.from_pretrained(
EMBEDDING_MODEL,
trust_remote_code=True
).to(device)
print("Model loaded successfully.")
return model
def process_dynamic_data_for_borough(model, borough):
"""
Processes all dynamic forecast JSON files for a specific city,
generating and saving their embeddings.
"""
print(f"\n--- Starting processing for city: {borough} ---")
borough_path = os.path.join(DATA_DIR, borough)
json_files = glob.glob(os.path.join(borough_path, "wm_messages_??.json"))
if not json_files:
print(f"No dynamic forecast JSON files found for {borough}. Skipping.")
return
for file_path in json_files:
print(f"\nProcessing file: {file_path}")
with open(file_path, "r") as f:
data = json.load(f)
timestamps = list(data.keys())
if not timestamps:
print(f"File {file_path} is empty. Skipping.")
continue
print(f"Loaded {len(data)} records from {file_path}")
emb_dict = {}
num_batches = (len(timestamps) + BATCH_SIZE - 1) // BATCH_SIZE
for i in range(0, len(timestamps), BATCH_SIZE):
batch_timestamps = timestamps[i:i + BATCH_SIZE]
print(f" Processing batch {i // BATCH_SIZE + 1}/{num_batches}")
batch_texts, len_list = [], [0]
for ts in batch_timestamps:
# Preprocess text uniformly, adding city information
texts = [f'{borough.replace("-", " ").title()}: {text}' for text in data[ts].values()]
batch_texts.extend(texts)
len_list.append(len(batch_texts))
embeddings = model.encode(batch_texts, truncate_dim=TRUNCATE_DIM)
for j, ts in enumerate(batch_timestamps):
start_idx, end_idx = len_list[j], len_list[j+1]
emb_dict[ts] = embeddings[start_idx:end_idx, :]
output_file_name = os.path.basename(file_path).replace("forecast", "embeddings").replace(".json", ".pkl")
output_path = os.path.join(borough_path, output_file_name)
with open(output_path, "wb") as f:
joblib.dump(emb_dict, f)
print(f"Saved city-specific embeddings to {output_path}")
def merge_borough_embeddings():
"""
Merges the embedding files from all specified cities.
"""
print("\n--- Starting merging of city embeddings ---")
base_path = os.path.join(DATA_DIR, 'weather', BASE_BOROUGH)
base_embedding_files = glob.glob(os.path.join(base_path, "fast_general_*_embeddings_*.pkl"))
if not base_embedding_files:
print("No base embedding files found to merge. Skipping.")
return
for base_file in base_embedding_files:
print(f"\nMerging based on: {base_file}")
# Load the corresponding files for all cities
data_files = {}
try:
for borough in BOROUGHS:
target_file = base_file.replace(BASE_BOROUGH, borough)
with open(target_file, "rb") as f:
data_files[borough] = joblib.load(f)
except FileNotFoundError as e:
print(f"Could not find a corresponding file for {e.filename}. Skipping this year.")
continue
merged_embeddings = {}
# Iterate through the timestamps of the base file
for timestamp in data_files[BASE_BOROUGH].keys():
embeddings_to_merge = []
# Ensure each city has data for this timestamp
if all(timestamp in data_files[b] for b in BOROUGHS):
for borough in BOROUGHS:
embeddings_to_merge.append(data_files[borough][timestamp])
# Concatenate the embeddings using numpy
merged_embeddings[timestamp] = np.concatenate(embeddings_to_merge, axis=0)
output_filename = os.path.basename(base_file)
final_output_path = os.path.join(DATA_DIR, 'weather', output_filename)
joblib.dump(merged_embeddings, final_output_path)
print(f"Saved final merged embeddings to {final_output_path}")
def main():
"""
Main execution function
"""
model = initialize_model()
# 1. Generate embeddings for each city individually
for borough in BOROUGHS:
process_dynamic_data_for_borough(model, borough)
# 2. Merge the embeddings from all cities
merge_borough_embeddings()
print("\nAll processing complete.")
if __name__ == "__main__":
main() |