import json import joblib import numpy as np import torch import os from transformers import AutoModel # Set proxy (replace with your proxy address and port) os.environ['HTTP_PROXY'] = 'http://localhost:1080' os.environ['HTTPS_PROXY'] = 'http://localhost:1080' def create_static_embeddings( input_path="./data/Bear_room/static_info.json", output_path="./data/Bear_room/static_info_embeddings.pkl", ): """ Loads static information from a JSON file, generates embeddings for the text fields, and saves the result as a pickle file. Args: input_path (str): Path to the input static_info.json file. output_path (str): Path to save the output .pkl file with embeddings. """ # --- 2. Initialize Model --- print("Initializing embedding model...") # Set up device (use GPU if available) device = "cuda:0" if torch.cuda.is_available() else "cpu" print(f"Using device: {device}") # Load the pre-trained model model = AutoModel.from_pretrained( "jinaai/jina-embeddings-v3", trust_remote_code=True ).to(device=device) # --- 3. Load and Prepare Data --- print(f"Loading data from '{input_path}'...") with open(input_path, "r") as f: static_info = json.load(f) # Start with general texts texts_to_embed = [ static_info['general_info'], static_info['downtime_prompt'] ] # Create a structure to remember the path to each channel text # e.g., [('CFH_HQ', 'location_description'), ('CFH_HQ', 'panel_type'), ...] channel_info_paths = [] for channel_name, details_dict in static_info['channel_info'].items(): for sub_key, text_value in details_dict.items(): texts_to_embed.append(text_value) channel_info_paths.append((channel_name, sub_key)) print(f"Found {len(texts_to_embed)} text snippets to embed.") # --- 4. Generate Embeddings --- print("Generating embeddings...") embeddings = model.encode( texts_to_embed, truncate_dim=256 # Truncate to 256 dimensions as in the notebook ) print(f"Embeddings generated with shape: {embeddings.shape}") # --- 5. Replace Text with Embeddings in the Dictionary --- print("Replacing text data with embeddings...") # The original static_info dictionary is modified in place static_info['general_info'] = embeddings[0:1, :] static_info['downtime_prompt'] = embeddings[1:2, :] # Start from the 2nd index for channel info embeddings channel_embeddings_start_index = 2 channel_embeddings_dict = {key: [] for key in static_info['channel_info'].keys()} for i, (channel_name, sub_key) in enumerate(channel_info_paths): # Calculate the correct index in the flat embeddings array embedding_index = channel_embeddings_start_index + i # The slice keeps the result as a 2D array (1, 256) channel_embeddings_dict[channel_name].append(embeddings[embedding_index:embedding_index+1, :]) # print(channel_embeddings_dict['weather_large'][0].shape) # Example to check the shape of the first embedding for channel_name, embeddings_list in channel_embeddings_dict.items(): # Stack the embeddings for each channel into a single embedding stacked_embedding = np.squeeze(np.stack(embeddings_list, axis=0)) # Replace the list of embeddings with the stacked embedding static_info['channel_info'][channel_name] = stacked_embedding print(f"Channel '{channel_name}' embeddings shape: {stacked_embedding.shape}") # --- 6. Save the Result --- # Ensure the output directory exists output_dir = os.path.dirname(output_path) if output_dir: os.makedirs(output_dir, exist_ok=True) print(f"Saving embeddings to '{output_path}'...") with open(output_path, "wb") as f: joblib.dump(static_info, f) print("Process completed successfully!") if __name__ == "__main__": create_static_embeddings()