|
|
import os |
|
|
import glob |
|
|
import json |
|
|
import joblib |
|
|
import torch |
|
|
from transformers import AutoModel |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
|
|
|
MODEL_NAME = "jinaai/jina-embeddings-v3" |
|
|
TRUST_REMOTE_CODE = True |
|
|
TRUNCATE_DIM = 256 |
|
|
|
|
|
|
|
|
|
|
|
DATA_ROOT_DIR = "./data/Bear_room/hetero/room/room_report/formal_report" |
|
|
FILE_PATTERN = os.path.join(DATA_ROOT_DIR, "***/occupy.json") |
|
|
BATCH_SIZE = 2000 |
|
|
|
|
|
def load_embedding_model(model_name, device): |
|
|
""" |
|
|
Load the text embedding model and move it to the specified device. |
|
|
""" |
|
|
print(f"Loading model '{model_name}' to device '{device}'...") |
|
|
try: |
|
|
model = AutoModel.from_pretrained( |
|
|
model_name, |
|
|
trust_remote_code=TRUST_REMOTE_CODE |
|
|
).to(device) |
|
|
model.eval() |
|
|
print("Model loaded successfully.") |
|
|
return model |
|
|
except Exception as e: |
|
|
print(f"Error: Failed to load model. {e}") |
|
|
print("Please ensure 'transformers' and 'torch' libraries are installed and network connection is working.") |
|
|
exit(1) |
|
|
|
|
|
def process_and_embed_files(model, file_list, batch_size, truncate_dim): |
|
|
""" |
|
|
Process the file list, generate embeddings for texts in each file, and save results. |
|
|
""" |
|
|
if not file_list: |
|
|
print("No matching files found. Please check FILE_PATTERN and DATA_ROOT_DIR configurations.") |
|
|
return |
|
|
|
|
|
print(f"\nFound {len(file_list)} files, starting processing...") |
|
|
|
|
|
|
|
|
for file_path in tqdm(file_list, desc="Overall progress"): |
|
|
try: |
|
|
with open(file_path, "r") as f: |
|
|
data = json.load(f) |
|
|
except (json.JSONDecodeError, FileNotFoundError) as e: |
|
|
print(f"\nWarning: Skipping file {file_path} as it couldn't be read or parsed: {e}") |
|
|
continue |
|
|
|
|
|
timestamps = list(data.keys()) |
|
|
if not timestamps: |
|
|
print(f"\nWarning: Skipping empty file {file_path}") |
|
|
continue |
|
|
|
|
|
|
|
|
|
|
|
room_name = os.path.basename(os.path.dirname(file_path)) |
|
|
context_prefix = room_name |
|
|
|
|
|
|
|
|
|
|
|
print(f"Add prefix in context: {context_prefix}") |
|
|
|
|
|
emb_dict = {} |
|
|
|
|
|
for i in tqdm(range(0, len(timestamps), batch_size), desc=f"Processing {os.path.basename(file_path)}", leave=False): |
|
|
batch_timestamps = timestamps[i : i + batch_size] |
|
|
|
|
|
|
|
|
all_texts_in_batch = [] |
|
|
|
|
|
len_list = [0] |
|
|
for timestamp in batch_timestamps: |
|
|
|
|
|
if isinstance(data.get(timestamp), dict): |
|
|
daily_texts = list(data[timestamp].values()) |
|
|
all_texts_in_batch.extend(daily_texts) |
|
|
len_list.append(len(all_texts_in_batch)) |
|
|
|
|
|
if not all_texts_in_batch: |
|
|
continue |
|
|
|
|
|
|
|
|
prefixed_texts = [f'{context_prefix}: {text}' for text in all_texts_in_batch] |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
embeddings = model.encode( |
|
|
prefixed_texts, |
|
|
truncate_dim=truncate_dim |
|
|
) |
|
|
|
|
|
|
|
|
for j, timestamp in enumerate(batch_timestamps): |
|
|
start_index = len_list[j] |
|
|
end_index = len_list[j + 1] |
|
|
emb_dict[timestamp] = embeddings[start_index:end_index, :] |
|
|
|
|
|
|
|
|
output_base_dir = "./data/Bear_room/hetero/room/report_embedding/formal_report" |
|
|
relative_path = os.path.relpath(os.path.dirname(file_path), DATA_ROOT_DIR) |
|
|
output_dir = os.path.join(output_base_dir, relative_path) |
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
base_name = os.path.basename(file_path) |
|
|
output_filename = base_name.replace("forecast", "embeddings").replace(".json", ".pkl") |
|
|
output_path = os.path.join(output_dir, output_filename) |
|
|
|
|
|
with open(output_path, "wb") as f: |
|
|
joblib.dump(emb_dict, f) |
|
|
|
|
|
def main(): |
|
|
""" |
|
|
Main execution function |
|
|
""" |
|
|
|
|
|
device = "cuda:1" if torch.cuda.is_available() else "cpu" |
|
|
print(f"Using computing device: {device}") |
|
|
|
|
|
|
|
|
model = load_embedding_model(MODEL_NAME, device) |
|
|
|
|
|
|
|
|
print(f"Searching for files matching '{FILE_PATTERN}'...") |
|
|
files_to_process = sorted(glob.glob(FILE_PATTERN)) |
|
|
|
|
|
|
|
|
process_and_embed_files(model, files_to_process, BATCH_SIZE, TRUNCATE_DIM) |
|
|
|
|
|
print("\nAll files processed successfully!") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |