Datasets:
File size: 4,259 Bytes
c2786b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import json
import joblib
import numpy as np
import torch
import os
from transformers import AutoModel
# Set proxy (replace with your proxy address and port)
os.environ['HTTP_PROXY'] = 'http://localhost:1080'
os.environ['HTTPS_PROXY'] = 'http://localhost:1080'
def create_static_embeddings(
input_path="./data/California_ISO/static_info_except_battery.json",
output_path="./data/California_ISO/static_info_embeddings_except_battery.pkl",
):
"""
Loads static information from a JSON file, generates embeddings for the text fields,
and saves the result as a pickle file.
Args:
input_path (str): Path to the input static_info.json file.
output_path (str): Path to save the output .pkl file with embeddings.
"""
# --- 2. Initialize Model ---
print("Initializing embedding model...")
# Set up device (use GPU if available)
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Load the pre-trained model
model = AutoModel.from_pretrained(
"jinaai/jina-embeddings-v3",
trust_remote_code=True
).to(device=device)
# --- 3. Load and Prepare Data ---
print(f"Loading data from '{input_path}'...")
with open(input_path, "rb") as f:
static_info = json.load(f)
# Start with general texts
texts_to_embed = [
static_info['general_info'],
static_info['downtime_prompt']
]
# Create a structure to remember the path to each channel text
# e.g., [('CFH_HQ', 'location_description'), ('CFH_HQ', 'panel_type'), ...]
channel_info_paths = []
for channel_name, details_dict in static_info['channel_info'].items():
for sub_key, text_value in details_dict.items():
texts_to_embed.append(text_value)
channel_info_paths.append((channel_name, sub_key))
print(f"Found {len(texts_to_embed)} text snippets to embed.")
# --- 4. Generate Embeddings ---
print("Generating embeddings...")
embeddings = model.encode(
texts_to_embed,
truncate_dim=256 # Truncate to 256 dimensions as in the notebook
)
print(f"Embeddings generated with shape: {embeddings.shape}")
# --- 5. Replace Text with Embeddings in the Dictionary ---
print("Replacing text data with embeddings...")
# The original static_info dictionary is modified in place
static_info['general_info'] = embeddings[0:1, :]
static_info['downtime_prompt'] = embeddings[1:2, :]
# Start from the 2nd index for channel info embeddings
channel_embeddings_start_index = 2
channel_embeddings_dict = {key: [] for key in static_info['channel_info'].keys()}
for i, (channel_name, sub_key) in enumerate(channel_info_paths):
# Calculate the correct index in the flat embeddings array
embedding_index = channel_embeddings_start_index + i
# The slice keeps the result as a 2D array (1, 256)
channel_embeddings_dict[channel_name].append(embeddings[embedding_index:embedding_index+1, :])
# print(channel_embeddings_dict['weather_large'][0].shape) # Example to check the shape of the first embedding
for channel_name, embeddings_list in channel_embeddings_dict.items():
# Stack the embeddings for each channel into a single embedding
stacked_embedding = np.squeeze(np.stack(embeddings_list, axis=0))
# Replace the list of embeddings with the stacked embedding
if stacked_embedding.shape == (256,):
stacked_embedding = stacked_embedding.reshape(1, 256)
static_info['channel_info'][channel_name] = stacked_embedding
print(f"Channel '{channel_name}' embeddings shape: {stacked_embedding.shape}")
# --- 6. Save the Result ---
# Ensure the output directory exists
output_dir = os.path.dirname(output_path)
if output_dir:
os.makedirs(output_dir, exist_ok=True)
print(f"Saving embeddings to '{output_path}'...")
with open(output_path, "wb") as f:
joblib.dump(static_info, f)
print("Process completed successfully!")
if __name__ == "__main__":
create_static_embeddings() |