Datasets:
File size: 5,433 Bytes
40fa79e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
import os
import glob
import json
import joblib
import torch
from transformers import AutoModel
from tqdm import tqdm
# --- Configuration Section ---
# Model related configurations
MODEL_NAME = "jinaai/jina-embeddings-v3"
TRUST_REMOTE_CODE = True
TRUNCATE_DIM = 256
# Data processing configurations
# The script will search for occupy.json files in all room subdirectories
DATA_ROOT_DIR = "./data/Bear_room/hetero/room/room_report/formal_report"
FILE_PATTERN = os.path.join(DATA_ROOT_DIR, "***/occupy.json")
BATCH_SIZE = 2000 # Number of texts to encode at once, adjust based on your GPU memory
def load_embedding_model(model_name, device):
"""
Load the text embedding model and move it to the specified device.
"""
print(f"Loading model '{model_name}' to device '{device}'...")
try:
model = AutoModel.from_pretrained(
model_name,
trust_remote_code=TRUST_REMOTE_CODE
).to(device)
model.eval() # Set to evaluation mode
print("Model loaded successfully.")
return model
except Exception as e:
print(f"Error: Failed to load model. {e}")
print("Please ensure 'transformers' and 'torch' libraries are installed and network connection is working.")
exit(1)
def process_and_embed_files(model, file_list, batch_size, truncate_dim):
"""
Process the file list, generate embeddings for texts in each file, and save results.
"""
if not file_list:
print("No matching files found. Please check FILE_PATTERN and DATA_ROOT_DIR configurations.")
return
print(f"\nFound {len(file_list)} files, starting processing...")
# Use tqdm to create a progress bar for file processing
for file_path in tqdm(file_list, desc="Overall progress"):
try:
with open(file_path, "r") as f:
data = json.load(f)
except (json.JSONDecodeError, FileNotFoundError) as e:
print(f"\nWarning: Skipping file {file_path} as it couldn't be read or parsed: {e}")
continue
timestamps = list(data.keys())
if not timestamps:
print(f"\nWarning: Skipping empty file {file_path}")
continue
# Extract room number from file path as text context
# Example: './data/Bear_room/.../room123/occupy.json' -> 'room123'
room_name = os.path.basename(os.path.dirname(file_path))
context_prefix = room_name
# context_prefix = ""
print(f"Add prefix in context: {context_prefix}")
emb_dict = {}
# Process all timestamps in a file in batches
for i in tqdm(range(0, len(timestamps), batch_size), desc=f"Processing {os.path.basename(file_path)}", leave=False):
batch_timestamps = timestamps[i : i + batch_size]
# Combine all daily texts in the batch into one big list
all_texts_in_batch = []
# len_list records the end positions of each timestamp's texts for later embedding splitting
len_list = [0]
for timestamp in batch_timestamps:
# Ensure data[timestamp] is a dictionary
if isinstance(data.get(timestamp), dict):
daily_texts = list(data[timestamp].values())
all_texts_in_batch.extend(daily_texts)
len_list.append(len(all_texts_in_batch))
if not all_texts_in_batch:
continue
# Add context prefix to each text
prefixed_texts = [f'{context_prefix}: {text}' for text in all_texts_in_batch]
# Encode using the model
with torch.no_grad():
embeddings = model.encode(
prefixed_texts,
truncate_dim=truncate_dim
)
# Split the large embedding block back into dictionary according to original timestamps
for j, timestamp in enumerate(batch_timestamps):
start_index = len_list[j]
end_index = len_list[j + 1]
emb_dict[timestamp] = embeddings[start_index:end_index, :]
# Save embedding results
output_base_dir = "./data/Bear_room/hetero/room/report_embedding/formal_report"
relative_path = os.path.relpath(os.path.dirname(file_path), DATA_ROOT_DIR)
output_dir = os.path.join(output_base_dir, relative_path)
os.makedirs(output_dir, exist_ok=True)
base_name = os.path.basename(file_path)
output_filename = base_name.replace("forecast", "embeddings").replace(".json", ".pkl")
output_path = os.path.join(output_dir, output_filename)
with open(output_path, "wb") as f:
joblib.dump(emb_dict, f)
def main():
"""
Main execution function
"""
# 1. Set up computing device
device = "cuda:1" if torch.cuda.is_available() else "cpu"
print(f"Using computing device: {device}")
# 2. Load the model
model = load_embedding_model(MODEL_NAME, device)
# 3. Find all files to process
print(f"Searching for files matching '{FILE_PATTERN}'...")
files_to_process = sorted(glob.glob(FILE_PATTERN))
# 4. Execute embedding and saving process
process_and_embed_files(model, files_to_process, BATCH_SIZE, TRUNCATE_DIM)
print("\nAll files processed successfully!")
if __name__ == "__main__":
main() |