# import os # import numpy as np # import torch # from safetensors.torch import save_file # from PIL import Image # from tqdm import tqdm # # === CONFIG === # ROOT_DIR = "light_arkitscenes" # MAX_IMAGES = 50 # # === Utility: Load images === # def load_images_from_folder(folder, exts, max_images=None, is_depth=False): # files = sorted([f for f in os.listdir(folder) if f.lower().endswith(exts)]) # if not files: # print(f"Skipping {folder}: no {exts} files found.") # return None # if max_images: # files = files[:max_images] # images = [] # for file in files: # img_path = os.path.join(folder, file) # img = Image.open(img_path) # if is_depth: # img_array = np.array(img) # if img_array.ndim == 2: # img_array = img_array[..., None] # add channel # else: # img = img.convert("RGB") # img_array = np.array(img) # images.append(img_array) # return np.stack(images, axis=0) # (N, H, W, C) # # === Conversion === # scene_list = [ # s for s in sorted(os.listdir(ROOT_DIR)) # if os.path.isdir(os.path.join(ROOT_DIR, s)) # ] # for scene_folder in tqdm(scene_list, desc="Converting scene data to .safetensors"): # scene_path = os.path.join(ROOT_DIR, scene_folder) # pm_path = os.path.join(scene_path, "pm.npy") # color_folder = os.path.join(scene_path, "color") # depth_folder = os.path.join(scene_path, "depth") # safetensors_path = f'{scene_path}.safetensors' # # Check existence # if not (os.path.exists(pm_path) and os.path.exists(color_folder) and os.path.exists(depth_folder)): # print(f"Skipping {scene_folder}: missing pm.npy or color/depth folder.") # continue # try: # # Load point map # point_map = np.load(pm_path) # point_map_tensor = torch.from_numpy(point_map) # # Load color images # color_images = load_images_from_folder(color_folder, (".jpg",), MAX_IMAGES) # if color_images is None: # print(f"Skipping {scene_folder}: no color images found.") # continue # color_tensor = torch.from_numpy(color_images) # # Load depth images # depth_images = load_images_from_folder(depth_folder, (".png",), MAX_IMAGES, is_depth=True) # if depth_images is None: # print(f"Skipping {scene_folder}: no depth images found.") # continue # depth_tensor = torch.from_numpy(depth_images) # print(point_map_tensor.shape, color_tensor.shape, depth_tensor.shape) # # Save all tensors in one .safetensors file # save_file( # { # "point_map": point_map_tensor, # "color_images": color_tensor, # "depth_images": depth_tensor # }, # safetensors_path # ) # print(f"✅ Saved {safetensors_path}") # except Exception as e: # print(f"❌ Error processing {scene_folder}: {e}") # print("✅ All scenes converted to .safetensors.") # # from safetensors.torch import load_file # # scene_path = "/home/m50048399/SceneVerse/light_arkitscenes/scene0067_02.safetensors" # # data = load_file(scene_path) # # breakpoint() # import json # def load_jsonl(file_path): # """Load a JSONL file and return a list of dictionaries.""" # data = [] # with open(file_path, 'r') as f: # for line in f: # data.append(json.loads(line.strip())) # return data # def save_jsonl(data, file_path): # """Save a list of dictionaries to a JSONL file.""" # with open(file_path, 'w') as f: # for item in data: # f.write(json.dumps(item) + '\n') # data = '/home/m50048399/SceneVerse/existing_datasets/Arkitscenes/annotations/arkitscenes_caption_per_view.jsonl' # data = load_jsonl(data) # for item in data: # # drop 'image_path' key # if 'image_path' in item: # del item['image_path'] # if 'point_map_path' in item: # del item['point_map_path'] # item['safetensors_path'] = 'light_arkitscenes/' + item['scan_id'] + '.safetensors' # # save to new jsonl file # save_jsonl(data, '/home/m50048399/SceneVerse/existing_datasets/Arkitscenes/annotations/arkitscenes_caption_per_view.jsonl') from transformers import AutoModel, AutoTokenizer model = AutoModel.from_pretrained('jinaai/jina-clip-v2', trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained('jinaai/jina-clip-v2', trust_remote_code=True) sentences = ['I am standing at the corner of a table to grab the blanket on it.What is under the bed in front of me?', 'I opened up the door in front of me and to my right was the sink.Which direction should I go if I want to use mirror?', 'I am standing next the green chair on the left side as I write with my left hand on the whiteboard.From my current position, can I be able to water the plant without moving?', 'I am lying on the bed facing the dresser.Which direction should I go if I want to exit the room?', 'I am about to sit on the toilet in front of the washing machine.Can I see myself in the mirror?', 'I just got up from the toilet and grabbed something from the cabinet in front of me.Which the door to my left or right?', 'I step away from the chair behind me and grab another chair that is in front of me.How many chairs are behind me?', 'I am facing a cabinet, standing in between a trash can and an office chair.What color is the table?', 'I am standing by the nightstand on my left facing the armchair.What shape is the footrest in front of me?', "I am grabbing paper towel from the paper towel dispenser after emptying the trash can to my immediate right.Is the trashcan in front of me the same color as the sink in my six o'clock direction?", 'I am sitting on the chair thinking about a huge mess I made by placing clothes on the floor to my left.What direction would I have to face, to look out the window?', 'I am baking cookies.How many doors does the fridge have in front of me?'] text_embeddings = model.encode_text(sentences) print(text_embeddings.shape)