diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..c7974f6758914bb6dc39856240324070b1a0aa8c 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+imgs/.ipynb_checkpoints/AffordanceNet-checkpoint.jpg filter=lfs diff=lfs merge=lfs -text
+imgs/AffordanceNet.jpg filter=lfs diff=lfs merge=lfs -text
+imgs/AffordanceNet.png filter=lfs diff=lfs merge=lfs -text
+vis_output/.ipynb_checkpoints/my_workspace-checkpoint.JPG filter=lfs diff=lfs merge=lfs -text
+vis_output/.ipynb_checkpoints/my_workspace_masked_img_0-checkpoint.jpg filter=lfs diff=lfs merge=lfs -text
+vis_output/my_workspace.JPG filter=lfs diff=lfs merge=lfs -text
+vis_output/my_workspace_masked_img_0.jpg filter=lfs diff=lfs merge=lfs -text
diff --git a/.ipynb_checkpoints/batch_generate-checkpoint.py b/.ipynb_checkpoints/batch_generate-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fe8790f1f71715367f73d861ce7fd410019ff47
--- /dev/null
+++ b/.ipynb_checkpoints/batch_generate-checkpoint.py
@@ -0,0 +1,401 @@
+"""
+Batch affordance mask generation for per-step datasets.
+
+Reads a per-step dataset (converted by convert_lerobot_to_perstep.py) and
+generates affordance masks for every image_primary.jpg and image_wrist.jpg
+using AffordanceVLM.
+
+Input structure:
+ {data_dir}/
+ ├── meta_info.h5
+ └── episodes/
+ └── {episode_id:06d}/
+ └── steps/
+ └── {step_id:04d}/
+ ├── other.h5 # language_instruction
+ ├── image_primary.jpg
+ └── image_wrist.jpg
+
+Output structure:
+ {save_dir}/
+ └── episode_{episode_id}/
+ └── steps/
+ └── step_{step_id}/
+ ├── image_primary_mask.png # binary 0/255
+ └── image_wrist_mask.png
+
+Usage:
+ python batch_generate.py \
+ --data_dir /path/to/perstep_dataset \
+ --save_dir /path/to/mask_output \
+ --start_episode 0 --end_episode 10
+"""
+
+import argparse
+import os
+import sys
+from pathlib import Path
+
+import cv2
+import h5py
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(
+ description="Batch affordance mask generation for per-step datasets"
+ )
+ # Model arguments (same as chat.py)
+ parser.add_argument("--version", default="/gemini/code/AffordanceNet/ckpts/AffordanceVLM-7B")
+ parser.add_argument(
+ "--precision", default="bf16", type=str,
+ choices=["fp32", "bf16", "fp16"],
+ )
+ parser.add_argument("--image_size", default=1024, type=int)
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument("--vision-tower", default="openai/clip-vit-large-patch14", type=str)
+ parser.add_argument("--local-rank", default=0, type=int)
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type", default="llava_v1", type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+
+ # Batch processing arguments
+ parser.add_argument("--data_dir", type=str, required=True,
+ help="Root of per-step dataset (contains episodes/)")
+ parser.add_argument("--save_dir", type=str, required=True,
+ help="Output directory for masks")
+ parser.add_argument("--prompt_template", type=str,
+ default="{}",
+ help="Template wrapping language_instruction. Use {} as placeholder.")
+ parser.add_argument("--start_episode", type=int, default=None,
+ help="First episode index to process (inclusive)")
+ parser.add_argument("--end_episode", type=int, default=None,
+ help="Last episode index to process (exclusive)")
+ return parser.parse_args(args)
+
+
+def preprocess(
+ x,
+ pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
+ pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
+ img_size=1024,
+) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ x = (x - pixel_mean) / pixel_std
+ h, w = x.shape[-2:]
+ padh = img_size - h
+ padw = img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+
+def load_model(args):
+ """Load tokenizer and model, identical to chat.py."""
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+
+ kwargs = {"torch_dtype": torch_dtype}
+ if args.load_in_4bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "load_in_4bit": True,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ })
+ elif args.load_in_8bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "quantization_config": BitsAndBytesConfig(
+ llm_int8_skip_modules=["visual_model"],
+ load_in_8bit=True,
+ ),
+ })
+
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version,
+ low_cpu_mem_usage=True,
+ vision_tower=args.vision_tower,
+ seg_token_idx=args.seg_token_idx,
+ aff_token_idx=args.aff_token_idx,
+ **kwargs,
+ )
+
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype)
+
+ if args.precision == "bf16":
+ model = model.bfloat16().cuda()
+ elif args.precision == "fp16" and (not args.load_in_4bit) and (not args.load_in_8bit):
+ vision_tower = model.get_model().get_vision_tower()
+ model.model.vision_tower = None
+ import deepspeed
+ model_engine = deepspeed.init_inference(
+ model=model,
+ dtype=torch.half,
+ replace_with_kernel_inject=True,
+ replace_method="auto",
+ )
+ model = model_engine.module
+ model.model.vision_tower = vision_tower.half().cuda()
+ elif args.precision == "fp32":
+ model = model.float().cuda()
+
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(device=args.local_rank)
+
+ clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
+ transform = ResizeLongestSide(args.image_size)
+
+ model.eval()
+ return model, tokenizer, clip_image_processor, transform
+
+
+def build_prompt(text: str, args) -> str:
+ """Build the full conversation prompt from a text query."""
+ conv = conversation_lib.conv_templates[args.conv_type].copy()
+ conv.messages = []
+
+ prompt = DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + text
+ if args.use_mm_start_end:
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
+ )
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], "")
+ return conv.get_prompt()
+
+
+def infer_single_image(
+ image_path: str,
+ prompt_str: str,
+ model,
+ tokenizer,
+ clip_image_processor,
+ transform,
+ args,
+) -> "np.ndarray | None":
+ """Run inference on a single image. Returns binary mask (H, W) uint8 0/255 or None."""
+ image_np = cv2.imread(image_path)
+ if image_np is None:
+ print(f" [WARNING] Cannot read image: {image_path}")
+ return None
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
+ original_size_list = [image_np.shape[:2]]
+
+ # CLIP preprocessing
+ image_clip = (
+ clip_image_processor.preprocess(image_np, return_tensors="pt")["pixel_values"][0]
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image_clip = image_clip.bfloat16()
+ elif args.precision == "fp16":
+ image_clip = image_clip.half()
+ else:
+ image_clip = image_clip.float()
+
+ # SAM preprocessing
+ image = transform.apply_image(image_np)
+ resize_list = [image.shape[:2]]
+ image = (
+ preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image = image.bfloat16()
+ elif args.precision == "fp16":
+ image = image.half()
+ else:
+ image = image.float()
+
+ # Tokenize
+ input_ids = tokenizer_image_token(prompt_str, tokenizer, return_tensors="pt")
+ input_ids = input_ids.unsqueeze(0).cuda()
+
+ # Inference
+ with torch.no_grad():
+ output_ids, pred_masks = model.evaluate(
+ image_clip,
+ image,
+ input_ids,
+ resize_list,
+ original_size_list,
+ max_new_tokens=512,
+ tokenizer=tokenizer,
+ )
+
+ # Merge all predicted masks via union (logical OR)
+ h, w = original_size_list[0]
+ merged = np.zeros((h, w), dtype=bool)
+ has_mask = False
+ for pred_mask in pred_masks:
+ if pred_mask.shape[0] == 0:
+ continue
+ mask_np = pred_mask.detach().cpu().numpy()[0] # (H, W)
+ merged |= (mask_np > 0)
+ has_mask = True
+
+ if not has_mask:
+ return None
+
+ return (merged.astype(np.uint8) * 255)
+
+
+def read_language_instruction(h5_path: str) -> str:
+ """Read language_instruction from other.h5."""
+ with h5py.File(h5_path, "r") as f:
+ instr = f["language_instruction"][()]
+ if isinstance(instr, bytes):
+ instr = instr.decode("utf-8")
+ return str(instr)
+
+
+def main(args):
+ args = parse_args(args)
+ data_dir = Path(args.data_dir)
+ save_dir = Path(args.save_dir)
+
+ episodes_dir = data_dir / "episodes"
+ if not episodes_dir.is_dir():
+ print(f"Error: episodes directory not found at {episodes_dir}")
+ sys.exit(1)
+
+ # Collect and sort episode directories
+ episode_dirs = sorted(
+ [d for d in episodes_dir.iterdir() if d.is_dir()],
+ key=lambda p: p.name,
+ )
+
+ # Filter by episode range
+ if args.start_episode is not None or args.end_episode is not None:
+ start = args.start_episode if args.start_episode is not None else 0
+ end = args.end_episode if args.end_episode is not None else len(episode_dirs)
+ episode_dirs = [
+ d for d in episode_dirs
+ if start <= int(d.name) < end
+ ]
+
+ print(f"Data dir : {data_dir}")
+ print(f"Save dir : {save_dir}")
+ print(f"Episodes : {len(episode_dirs)}")
+ print(f"Prompt : {args.prompt_template}")
+ print()
+
+ # Load model
+ print("Loading model...")
+ model, tokenizer, clip_image_processor, transform = load_model(args)
+ print("Model loaded.\n")
+
+ total_steps = 0
+ empty_mask_count = 0
+
+ for ep_dir in episode_dirs:
+ episode_id = ep_dir.name # e.g. "000000"
+ steps_dir = ep_dir / "steps"
+ if not steps_dir.is_dir():
+ print(f" [WARNING] No steps/ in {ep_dir}, skipping.")
+ continue
+
+ step_dirs = sorted(
+ [d for d in steps_dir.iterdir() if d.is_dir()],
+ key=lambda p: p.name,
+ )
+
+ for step_dir in step_dirs:
+ step_id = step_dir.name # e.g. "0000"
+
+ # Read language instruction
+ other_h5 = step_dir / "other.h5"
+ if not other_h5.exists():
+ print(f" [WARNING] Missing other.h5 in {step_dir}, skipping.")
+ continue
+ language_instruction = read_language_instruction(str(other_h5))
+ # debug
+ # print(language_instruction)
+
+ # Build prompt
+ query_text = args.prompt_template.format(language_instruction)
+ prompt_str = build_prompt(query_text, args)
+
+ # Output directory (same structure as input: episodes/{episode_id}/steps/{step_id}/)
+ out_dir = save_dir / "episodes" / episode_id / "steps" / step_id
+ out_dir.mkdir(parents=True, exist_ok=True)
+
+ # Process both cameras
+ for cam_name in ("image_primary", "image_wrist"):
+ img_path = step_dir / f"{cam_name}.jpg"
+ mask_path = out_dir / f"{cam_name}_mask.png"
+
+ if not img_path.exists():
+ print(f" [WARNING] Missing {img_path}, skipping.")
+ continue
+
+ mask = infer_single_image(
+ str(img_path), prompt_str,
+ model, tokenizer, clip_image_processor, transform, args,
+ )
+
+ if mask is None:
+ # Save blank mask and warn
+ h, w = cv2.imread(str(img_path)).shape[:2]
+ mask = np.zeros((h, w), dtype=np.uint8)
+ empty_mask_count += 1
+
+ cv2.imwrite(str(mask_path), mask)
+
+ total_steps += 1
+ if total_steps % 50 == 0:
+ print(f" Processed {total_steps} steps (episode {episode_id}, step {step_id})")
+
+ print(f"Episode {episode_id} done ({len(step_dirs)} steps)")
+
+ print(f"\nFinished. {total_steps} steps processed, {empty_mask_count} empty masks.")
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/.ipynb_checkpoints/batch_generate-checkpoint.sh b/.ipynb_checkpoints/batch_generate-checkpoint.sh
new file mode 100644
index 0000000000000000000000000000000000000000..458f89eacdf098ab25f30f246ed2f6496f4c4e7b
--- /dev/null
+++ b/.ipynb_checkpoints/batch_generate-checkpoint.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Batch generate affordance masks for all four LIBERO subsets sequentially.
+
+SRC_ROOT="/gemini/space/wrz/libero_per_frame"
+TGT_ROOT="/gemini/space/wrz/ragnet_results"
+
+for ds in libero_object libero_goal libero_spatial libero_10; do
+ echo "========== Processing ${ds} =========="
+ CUDA_VISIBLE_DEVICES=0 python batch_generate.py \
+ --data_dir "${SRC_ROOT}/${ds}_converted" \
+ --save_dir "${TGT_ROOT}/${ds}"
+ echo "========== ${ds} done =========="
+ echo
+done
diff --git a/.ipynb_checkpoints/batch_generate_prefill_accelerate-checkpoint.py b/.ipynb_checkpoints/batch_generate_prefill_accelerate-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..849e9699a456afb7b4cc34d75ea0fe3ce7bcf09a
--- /dev/null
+++ b/.ipynb_checkpoints/batch_generate_prefill_accelerate-checkpoint.py
@@ -0,0 +1,418 @@
+"""
+Batch affordance mask generation for per-step datasets.
+
+Reads a per-step dataset (converted by convert_lerobot_to_perstep.py) and
+generates affordance masks for every image_primary.jpg and image_wrist.jpg
+using AffordanceVLM.
+
+Input structure:
+ {data_dir}/
+ ├── meta_info.h5
+ └── episodes/
+ └── {episode_id:06d}/
+ └── steps/
+ └── {step_id:04d}/
+ ├── other.h5 # language_instruction
+ ├── image_primary.jpg
+ └── image_wrist.jpg
+
+Output structure:
+ {save_dir}/
+ └── episodes/
+ └── {episode_id:06d}/
+ └── steps/
+ └── {step_id:04d}/
+ ├── image_primary_mask.png # binary 0/255
+ └── image_wrist_mask.png
+
+Usage:
+ CUDA_VISIBLE_DEVICES=1 python batch_generate_prefill_accelerate.py \
+ --data_dir /gemini/space/wrz/libero_per_frame/libero_spatial_converted \
+ --save_dir /gemini/space/wrz/ragnet_results/libero_spatial
+"""
+
+import argparse
+import os
+import sys
+from pathlib import Path
+
+import cv2
+import h5py
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(
+ description="Batch affordance mask generation for per-step datasets"
+ )
+ # Model arguments (same as chat.py)
+ parser.add_argument("--version", default="/gemini/code/AffordanceNet/ckpts/AffordanceVLM-7B")
+ parser.add_argument(
+ "--precision", default="bf16", type=str,
+ choices=["fp32", "bf16", "fp16"],
+ )
+ parser.add_argument("--image_size", default=1024, type=int)
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument("--vision-tower", default="openai/clip-vit-large-patch14", type=str)
+ parser.add_argument("--local-rank", default=0, type=int)
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type", default="llava_v1", type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+
+ # Batch processing arguments
+ parser.add_argument("--data_dir", type=str, required=True,
+ help="Root of per-step dataset (contains episodes/)")
+ parser.add_argument("--save_dir", type=str, required=True,
+ help="Output directory for masks")
+ parser.add_argument("--prompt_template", type=str,
+ default="{}",
+ help="Template wrapping language_instruction. Use {} as placeholder.")
+ # "{}"
+ # Segment the most suitable manipulation region on the single target object for the task '{}'.
+ # Segment the affordance map for the task '{}' in this image.
+ # Segment the affordance map of the single target object for the task '{}' in this image.
+ # Given the task instruction '{}', what is the affordance map of the target object in this image? Please output segmentation mask.
+ # Given the task instruction '{}', what is the affordance map of the single target object in this image? There is only one target object. Please output segmentation mask.
+ parser.add_argument("--start_episode", type=int, default=None,
+ help="First episode index to process (inclusive)")
+ parser.add_argument("--end_episode", type=int, default=None,
+ help="Last episode index to process (exclusive)")
+ return parser.parse_args(args)
+
+
+def preprocess(
+ x,
+ pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
+ pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
+ img_size=1024,
+) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ x = (x - pixel_mean) / pixel_std
+ h, w = x.shape[-2:]
+ padh = img_size - h
+ padw = img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+
+def load_model(args):
+ """Load tokenizer and model, identical to chat.py."""
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+
+ kwargs = {"torch_dtype": torch_dtype}
+ if args.load_in_4bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "load_in_4bit": True,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ })
+ elif args.load_in_8bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "quantization_config": BitsAndBytesConfig(
+ llm_int8_skip_modules=["visual_model"],
+ load_in_8bit=True,
+ ),
+ })
+
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version,
+ low_cpu_mem_usage=True,
+ vision_tower=args.vision_tower,
+ seg_token_idx=args.seg_token_idx,
+ aff_token_idx=args.aff_token_idx,
+ **kwargs,
+ )
+
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype)
+
+ if args.precision == "bf16":
+ model = model.bfloat16().cuda()
+ elif args.precision == "fp16" and (not args.load_in_4bit) and (not args.load_in_8bit):
+ vision_tower = model.get_model().get_vision_tower()
+ model.model.vision_tower = None
+ import deepspeed
+ model_engine = deepspeed.init_inference(
+ model=model,
+ dtype=torch.half,
+ replace_with_kernel_inject=True,
+ replace_method="auto",
+ )
+ model = model_engine.module
+ model.model.vision_tower = vision_tower.half().cuda()
+ elif args.precision == "fp32":
+ model = model.float().cuda()
+
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(device=args.local_rank)
+
+ clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
+ transform = ResizeLongestSide(args.image_size)
+
+ model.eval()
+ return model, tokenizer, clip_image_processor, transform
+
+
+def build_prompt(text: str, args) -> str:
+ """Build the full conversation prompt from a text query."""
+ conv = conversation_lib.conv_templates[args.conv_type].copy()
+ conv.messages = []
+
+ prompt = DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + text
+ if args.use_mm_start_end:
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
+ )
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], "[AFF].")
+ return conv.get_prompt()
+
+
+def infer_single_image(
+ image_path: str,
+ prompt_str: str,
+ model,
+ tokenizer,
+ clip_image_processor,
+ transform,
+ args,
+) -> "np.ndarray | None":
+ """Run inference on a single image. Returns binary mask (H, W) uint8 0/255 or None."""
+ image_np = cv2.imread(image_path)
+ if image_np is None:
+ print(f" [WARNING] Cannot read image: {image_path}")
+ return None
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
+ original_size_list = [image_np.shape[:2]]
+
+ # CLIP preprocessing
+ image_clip = (
+ clip_image_processor.preprocess(image_np, return_tensors="pt")["pixel_values"][0]
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image_clip = image_clip.bfloat16()
+ elif args.precision == "fp16":
+ image_clip = image_clip.half()
+ else:
+ image_clip = image_clip.float()
+
+ # SAM preprocessing
+ image = transform.apply_image(image_np)
+ resize_list = [image.shape[:2]]
+ image = (
+ preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image = image.bfloat16()
+ elif args.precision == "fp16":
+ image = image.half()
+ else:
+ image = image.float()
+
+ # Tokenize
+ input_ids = tokenizer_image_token(prompt_str, tokenizer, return_tensors="pt")
+ input_ids = input_ids.unsqueeze(0).cuda()
+ attention_masks = input_ids.ne(tokenizer.pad_token_id)
+
+ # Prefill inference (single forward pass instead of autoregressive generation)
+ h, w = original_size_list[0]
+ labels = input_ids.clone()
+ offset = torch.LongTensor([0, 1]).cuda()
+ masks_list = [torch.zeros(1, h, w).float().cuda()]
+ label_list = [torch.zeros(h, w).long().cuda()]
+
+ with torch.no_grad():
+ output_dict = model(
+ images=image,
+ images_clip=image_clip,
+ input_ids=input_ids,
+ labels=labels,
+ attention_masks=attention_masks,
+ offset=offset,
+ masks_list=masks_list,
+ label_list=label_list,
+ resize_list=resize_list,
+ inference=True,
+ )
+
+ pred_masks = output_dict["pred_masks"]
+
+ # Merge all predicted masks via union (logical OR)
+ merged = np.zeros((h, w), dtype=bool)
+ has_mask = False
+ for pred_mask in pred_masks:
+ if pred_mask.shape[0] == 0:
+ continue
+ mask_np = pred_mask.detach().cpu().numpy()[0] # (H, W)
+ merged |= (mask_np > 0)
+ has_mask = True
+
+ if not has_mask:
+ return None
+
+ return (merged.astype(np.uint8) * 255)
+
+
+def read_language_instruction(h5_path: str) -> str:
+ """Read language_instruction from other.h5."""
+ with h5py.File(h5_path, "r") as f:
+ instr = f["language_instruction"][()]
+ if isinstance(instr, bytes):
+ instr = instr.decode("utf-8")
+ return str(instr)
+
+
+def main(args):
+ args = parse_args(args)
+ data_dir = Path(args.data_dir)
+ save_dir = Path(args.save_dir)
+
+ episodes_dir = data_dir / "episodes"
+ if not episodes_dir.is_dir():
+ print(f"Error: episodes directory not found at {episodes_dir}")
+ sys.exit(1)
+
+ # Collect and sort episode directories
+ episode_dirs = sorted(
+ [d for d in episodes_dir.iterdir() if d.is_dir()],
+ key=lambda p: p.name,
+ )
+
+ # Filter by episode range
+ if args.start_episode is not None or args.end_episode is not None:
+ start = args.start_episode if args.start_episode is not None else 0
+ end = args.end_episode if args.end_episode is not None else len(episode_dirs)
+ episode_dirs = [
+ d for d in episode_dirs
+ if start <= int(d.name) < end
+ ]
+
+ print(f"Data dir : {data_dir}")
+ print(f"Save dir : {save_dir}")
+ print(f"Episodes : {len(episode_dirs)}")
+ print(f"Prompt : {args.prompt_template}")
+ print()
+
+ # Load model
+ print("Loading model...")
+ model, tokenizer, clip_image_processor, transform = load_model(args)
+ print("Model loaded.\n")
+
+ total_steps = 0
+ empty_mask_count = 0
+
+ for ep_dir in episode_dirs:
+ episode_id = ep_dir.name # e.g. "000000"
+ steps_dir = ep_dir / "steps"
+ if not steps_dir.is_dir():
+ print(f" [WARNING] No steps/ in {ep_dir}, skipping.")
+ continue
+
+ step_dirs = sorted(
+ [d for d in steps_dir.iterdir() if d.is_dir()],
+ key=lambda p: p.name,
+ )
+
+ for step_dir in step_dirs:
+ step_id = step_dir.name # e.g. "0000"
+
+ # Read language instruction
+ other_h5 = step_dir / "other.h5"
+ if not other_h5.exists():
+ print(f" [WARNING] Missing other.h5 in {step_dir}, skipping.")
+ continue
+ language_instruction = read_language_instruction(str(other_h5))
+ # debug
+ # print(language_instruction)
+
+ # Build prompt
+ query_text = args.prompt_template.format(language_instruction)
+ prompt_str = build_prompt(query_text, args)
+
+ # Output directory (same structure as input: episodes/{episode_id}/steps/{step_id}/)
+ out_dir = save_dir / "episodes" / episode_id / "steps" / step_id
+ out_dir.mkdir(parents=True, exist_ok=True)
+
+ # Process both cameras
+ for cam_name in ("image_primary", "image_wrist"):
+ img_path = step_dir / f"{cam_name}.jpg"
+ mask_path = out_dir / f"{cam_name}_mask.png"
+
+ if not img_path.exists():
+ print(f" [WARNING] Missing {img_path}, skipping.")
+ continue
+
+ mask = infer_single_image(
+ str(img_path), prompt_str,
+ model, tokenizer, clip_image_processor, transform, args,
+ )
+
+ if mask is None:
+ # Save blank mask and warn
+ h, w = cv2.imread(str(img_path)).shape[:2]
+ mask = np.zeros((h, w), dtype=np.uint8)
+ empty_mask_count += 1
+
+ cv2.imwrite(str(mask_path), mask)
+
+ total_steps += 1
+ if total_steps % 50 == 0:
+ print(f" Processed {total_steps} steps (episode {episode_id}, step {step_id})")
+
+ print(f"Episode {episode_id} done ({len(step_dirs)} steps)")
+
+ print(f"\nFinished. {total_steps} steps processed, {empty_mask_count} empty masks.")
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/.ipynb_checkpoints/chat-checkpoint.py b/.ipynb_checkpoints/chat-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..62d3a4efb378236d4d6ed4b1e917ea32f3731318
--- /dev/null
+++ b/.ipynb_checkpoints/chat-checkpoint.py
@@ -0,0 +1,255 @@
+import argparse
+import os
+import sys
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(description="LISA chat")
+ parser.add_argument("--version", default="/gemini/code/AffordanceNet/ckpts/AffordanceVLM-7B")
+ parser.add_argument("--vis_save_path", default="./vis_output", type=str)
+ parser.add_argument(
+ "--precision",
+ default="bf16",
+ type=str,
+ choices=["fp32", "bf16", "fp16"],
+ help="precision for inference",
+ )
+ parser.add_argument("--image_size", default=1024, type=int, help="image size")
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument(
+ "--vision-tower", default="openai/clip-vit-large-patch14", type=str
+ )
+ parser.add_argument("--local-rank", default=0, type=int, help="node rank")
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type",
+ default="llava_v1",
+ type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+ return parser.parse_args(args)
+
+
+def preprocess(
+ x,
+ pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
+ pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
+ img_size=1024,
+) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - pixel_mean) / pixel_std
+ # Pad
+ h, w = x.shape[-2:]
+ padh = img_size - h
+ padw = img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+
+def main(args):
+ args = parse_args(args)
+ os.makedirs(args.vis_save_path, exist_ok=True)
+
+ # Create model
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ num_added_tokens = tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ num_added_tokens = tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+
+ kwargs = {"torch_dtype": torch_dtype}
+ if args.load_in_4bit:
+ kwargs.update(
+ {
+ "torch_dtype": torch.half,
+ "load_in_4bit": True,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ }
+ )
+ elif args.load_in_8bit:
+ kwargs.update(
+ {
+ "torch_dtype": torch.half,
+ "quantization_config": BitsAndBytesConfig(
+ llm_int8_skip_modules=["visual_model"],
+ load_in_8bit=True,
+ ),
+ }
+ )
+
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version, low_cpu_mem_usage=True, vision_tower=args.vision_tower, seg_token_idx=args.seg_token_idx, aff_token_idx=args.aff_token_idx, **kwargs
+ )
+
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype)
+
+ if args.precision == "bf16":
+ model = model.bfloat16().cuda()
+ elif (
+ args.precision == "fp16" and (not args.load_in_4bit) and (not args.load_in_8bit)
+ ):
+ vision_tower = model.get_model().get_vision_tower()
+ model.model.vision_tower = None
+ import deepspeed
+
+ model_engine = deepspeed.init_inference(
+ model=model,
+ dtype=torch.half,
+ replace_with_kernel_inject=True,
+ replace_method="auto",
+ )
+ model = model_engine.module
+ model.model.vision_tower = vision_tower.half().cuda()
+ elif args.precision == "fp32":
+ model = model.float().cuda()
+
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(device=args.local_rank)
+
+ clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
+ transform = ResizeLongestSide(args.image_size)
+
+ model.eval()
+
+ while True:
+ conv = conversation_lib.conv_templates[args.conv_type].copy()
+ conv.messages = []
+
+ prompt = input("Please input your prompt: ")
+ prompt = DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + prompt
+ if args.use_mm_start_end:
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
+ )
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], "")
+ prompt = conv.get_prompt()
+
+ image_path = input("Please input the image path: ")
+ if not os.path.exists(image_path):
+ print("File not found in {}".format(image_path))
+ continue
+
+ image_np = cv2.imread(image_path)
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
+ original_size_list = [image_np.shape[:2]]
+
+ image_clip = (
+ clip_image_processor.preprocess(image_np, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image_clip = image_clip.bfloat16()
+ elif args.precision == "fp16":
+ image_clip = image_clip.half()
+ else:
+ image_clip = image_clip.float()
+
+ image = transform.apply_image(image_np)
+ resize_list = [image.shape[:2]]
+
+ image = (
+ preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image = image.bfloat16()
+ elif args.precision == "fp16":
+ image = image.half()
+ else:
+ image = image.float()
+
+ input_ids = tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ input_ids = input_ids.unsqueeze(0).cuda()
+
+ output_ids, pred_masks = model.evaluate(
+ image_clip,
+ image,
+ input_ids,
+ resize_list,
+ original_size_list,
+ max_new_tokens=512,
+ tokenizer=tokenizer,
+ )
+ output_ids = output_ids[0][output_ids[0] != IMAGE_TOKEN_INDEX]
+
+ text_output = tokenizer.decode(output_ids, skip_special_tokens=False)
+ text_output = text_output.replace("\n", "").replace(" ", " ")
+ print("text_output: ", text_output)
+
+ for i, pred_mask in enumerate(pred_masks):
+ if pred_mask.shape[0] == 0:
+ continue
+
+ pred_mask = pred_mask.detach().cpu().numpy()[0]
+ pred_mask = pred_mask > 0
+
+ save_path = "{}/{}_mask_{}.jpg".format(
+ args.vis_save_path, image_path.split("/")[-1].split(".")[0], i
+ )
+ cv2.imwrite(save_path, pred_mask * 100)
+ print("{} has been saved.".format(save_path))
+
+ save_path = "{}/{}_masked_img_{}.jpg".format(
+ args.vis_save_path, image_path.split("/")[-1].split(".")[0], i
+ )
+ save_img = image_np.copy()
+ save_img[pred_mask] = (
+ image_np * 0.5
+ + pred_mask[:, :, None].astype(np.uint8) * np.array([255, 0, 0]) * 0.5
+ )[pred_mask]
+ save_img = cv2.cvtColor(save_img, cv2.COLOR_RGB2BGR)
+ cv2.imwrite(save_path, save_img)
+ print("{} has been saved.".format(save_path))
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/.ipynb_checkpoints/chat_prefill-checkpoint.py b/.ipynb_checkpoints/chat_prefill-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..323af6bba8cb189584503f4da4780f91292a102c
--- /dev/null
+++ b/.ipynb_checkpoints/chat_prefill-checkpoint.py
@@ -0,0 +1,282 @@
+"""
+Interactive affordance mask generation using prefill mode (single forward pass).
+
+Same interactive workflow as chat.py, but uses prefill inference instead of
+autoregressive generation. The assistant response "[AFF]." is pre-filled in the
+prompt, so the model only does one forward pass to extract mask embeddings.
+"""
+
+import argparse
+import os
+import sys
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(description="AffordanceVLM chat (prefill mode)")
+ parser.add_argument("--version", default="/gemini/code/AffordanceNet/ckpts/AffordanceVLM-7B")
+ parser.add_argument("--vis_save_path", default="./vis_output_prefill", type=str)
+ parser.add_argument(
+ "--precision", default="bf16", type=str,
+ choices=["fp32", "bf16", "fp16"],
+ )
+ parser.add_argument("--image_size", default=1024, type=int)
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument("--vision-tower", default="openai/clip-vit-large-patch14", type=str)
+ parser.add_argument("--local-rank", default=0, type=int)
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type", default="llava_v1", type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+ parser.add_argument("--prompt_template", type=str,
+ default="Segment the most suitable manipulation region on the single target object for the task '{}'.",
+ help="Template wrapping language_instruction. Use {} as placeholder.")
+ # Segment the most suitable manipulation region on the single target object for the task '{}'.
+ # Segment the affordance map for the task '{}' in this image.
+ # Segment the affordance map of the single target object for the task '{}' in this image.
+ # Given the task instruction '{}', what is the affordance map of the target object in this image? Please output segmentation mask.
+ # Given the task instruction '{}', what is the affordance map of the single target object in this image? There is only one target object. Please output segmentation mask.
+ return parser.parse_args(args)
+
+
+def preprocess(
+ x,
+ pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
+ pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
+ img_size=1024,
+) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ x = (x - pixel_mean) / pixel_std
+ h, w = x.shape[-2:]
+ padh = img_size - h
+ padw = img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+
+def main(args):
+ args = parse_args(args)
+ os.makedirs(args.vis_save_path, exist_ok=True)
+
+ # Create model
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+
+ kwargs = {"torch_dtype": torch_dtype}
+ if args.load_in_4bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "load_in_4bit": True,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ })
+ elif args.load_in_8bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "quantization_config": BitsAndBytesConfig(
+ llm_int8_skip_modules=["visual_model"],
+ load_in_8bit=True,
+ ),
+ })
+
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version,
+ low_cpu_mem_usage=True,
+ vision_tower=args.vision_tower,
+ seg_token_idx=args.seg_token_idx,
+ aff_token_idx=args.aff_token_idx,
+ **kwargs,
+ )
+
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype)
+
+ if args.precision == "bf16":
+ model = model.bfloat16().cuda()
+ elif args.precision == "fp16" and (not args.load_in_4bit) and (not args.load_in_8bit):
+ vision_tower = model.get_model().get_vision_tower()
+ model.model.vision_tower = None
+ import deepspeed
+ model_engine = deepspeed.init_inference(
+ model=model,
+ dtype=torch.half,
+ replace_with_kernel_inject=True,
+ replace_method="auto",
+ )
+ model = model_engine.module
+ model.model.vision_tower = vision_tower.half().cuda()
+ elif args.precision == "fp32":
+ model = model.float().cuda()
+
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(device=args.local_rank)
+
+ clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
+ transform = ResizeLongestSide(args.image_size)
+
+ model.eval()
+
+ # debug
+ template = "Given the task instruction '{}', what is the affordance map of the target object in this image? Please output segmentation mask."
+
+ while True:
+ conv = conversation_lib.conv_templates[args.conv_type].copy()
+ conv.messages = []
+
+ prompt = input("Please input your prompt: ")
+ # 加入模版
+ prompt = args.prompt_template.format(prompt)
+
+ prompt = DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + prompt
+ if args.use_mm_start_end:
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
+ )
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], "[AFF].")
+ prompt = conv.get_prompt()
+
+ image_path = input("Please input the image path: ")
+ if not os.path.exists(image_path):
+ print("File not found in {}".format(image_path))
+ continue
+
+ image_np = cv2.imread(image_path)
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
+ original_size_list = [image_np.shape[:2]]
+ h, w = original_size_list[0]
+
+ image_clip = (
+ clip_image_processor.preprocess(image_np, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image_clip = image_clip.bfloat16()
+ elif args.precision == "fp16":
+ image_clip = image_clip.half()
+ else:
+ image_clip = image_clip.float()
+
+ image = transform.apply_image(image_np)
+ resize_list = [image.shape[:2]]
+
+ image = (
+ preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image = image.bfloat16()
+ elif args.precision == "fp16":
+ image = image.half()
+ else:
+ image = image.float()
+
+ input_ids = tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ input_ids = input_ids.unsqueeze(0).cuda()
+ attention_masks = input_ids.ne(tokenizer.pad_token_id)
+
+ # Print the full prompt text (prefill mode has no generated text)
+ # debug
+ text_ids = input_ids[0][input_ids[0] != IMAGE_TOKEN_INDEX]
+ text_output = tokenizer.decode(text_ids, skip_special_tokens=False)
+ text_output = text_output.replace("\n", "").replace(" ", " ")
+ print("text_output: ", text_output)
+
+ # Prefill inference
+ labels = input_ids.clone()
+ offset = torch.LongTensor([0, 1]).cuda()
+ masks_list = [torch.zeros(1, h, w).float().cuda()]
+ label_list = [torch.zeros(h, w).long().cuda()]
+
+ with torch.no_grad():
+ output_dict = model(
+ images=image,
+ images_clip=image_clip,
+ input_ids=input_ids,
+ labels=labels,
+ attention_masks=attention_masks,
+ offset=offset,
+ masks_list=masks_list,
+ label_list=label_list,
+ resize_list=resize_list,
+ inference=True,
+ )
+
+ pred_masks = output_dict["pred_masks"]
+
+ for i, pred_mask in enumerate(pred_masks):
+ if pred_mask.shape[0] == 0:
+ continue
+
+ pred_mask = pred_mask.detach().cpu().numpy()[0]
+ pred_mask = pred_mask > 0
+
+ save_path = "{}/{}_mask_{}.jpg".format(
+ args.vis_save_path, image_path.split("/")[-1].split(".")[0], i
+ )
+ cv2.imwrite(save_path, pred_mask * 100)
+ print("{} has been saved.".format(save_path))
+
+ save_path = "{}/{}_masked_img_{}.jpg".format(
+ args.vis_save_path, image_path.split("/")[-1].split(".")[0], i
+ )
+ save_img = image_np.copy()
+ save_img[pred_mask] = (
+ image_np * 0.5
+ + pred_mask[:, :, None].astype(np.uint8) * np.array([255, 0, 0]) * 0.5
+ )[pred_mask]
+ save_img = cv2.cvtColor(save_img, cv2.COLOR_RGB2BGR)
+ cv2.imwrite(save_path, save_img)
+ print("{} has been saved.".format(save_path))
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/.ipynb_checkpoints/train_aff-checkpoint.py b/.ipynb_checkpoints/train_aff-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..87f22173252054d43f44bf78cd8bdecf479e5b1b
--- /dev/null
+++ b/.ipynb_checkpoints/train_aff-checkpoint.py
@@ -0,0 +1,620 @@
+import argparse
+import os
+import shutil
+import sys
+import time
+from functools import partial
+
+import deepspeed
+import numpy as np
+import torch
+import tqdm
+import transformers
+from peft import LoraConfig, get_peft_model
+from torch.utils.tensorboard import SummaryWriter
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from utils.dataset import HybridDataset, ValDataset, collate_fn
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ AverageMeter, ProgressMeter, Summary, dict_to_cuda,
+ intersectionAndUnionGPU)
+
+from utils.aff_seg_dataset import AffValDataset
+from utils.reason_aff_dataset import ReasonAffValDataset
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(description="LISA Model Training")
+ parser.add_argument("--local_rank", default=0, type=int, help="node rank")
+ parser.add_argument(
+ "--version", default="liuhaotian/llava-llama-2-13b-chat-lightning-preview"
+ )
+ parser.add_argument("--vis_save_path", default="./vis_output", type=str)
+ parser.add_argument(
+ "--precision",
+ default="bf16",
+ type=str,
+ choices=["fp32", "bf16", "fp16"],
+ help="precision for inference",
+ )
+ parser.add_argument("--image_size", default=1024, type=int, help="image size")
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument(
+ "--vision-tower", default="openai/clip-vit-large-patch14", type=str
+ )
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+
+ parser.add_argument(
+ "--dataset", default="sem_seg||refer_seg||vqa||reason_seg", type=str
+ )
+ parser.add_argument("--sample_rates", default="9,3,3,1", type=str)
+ parser.add_argument(
+ "--sem_seg_data",
+ default="ade20k||cocostuff||pascal_part||paco_lvis||mapillary",
+ type=str,
+ )
+ parser.add_argument(
+ "--refer_seg_data", default="refclef||refcoco||refcoco+||refcocog", type=str
+ )
+ parser.add_argument("--vqa_data", default="llava_instruct_150k", type=str)
+ parser.add_argument("--reason_seg_data", default="ReasonSeg|train", type=str)
+ parser.add_argument("--aff_seg_data", default="handal", type=str)
+ parser.add_argument("--aff_sample_rates", default="1", type=str)
+ parser.add_argument("--reason_aff_data", default="handal_hard_reasoning", type=str)
+ parser.add_argument("--reason_aff_sample_rates", default="1", type=str)
+ parser.add_argument("--val_dataset", default="ReasonSeg|val", type=str)
+ parser.add_argument("--dataset_dir", default="./dataset", type=str)
+ parser.add_argument("--log_base_dir", default="./runs", type=str)
+ parser.add_argument("--exp_name", default="lisa", type=str)
+ parser.add_argument("--epochs", default=10, type=int)
+ parser.add_argument("--steps_per_epoch", default=500, type=int)
+ parser.add_argument(
+ "--batch_size", default=2, type=int, help="batch size per device per step"
+ )
+ parser.add_argument(
+ "--grad_accumulation_steps",
+ default=10,
+ type=int,
+ )
+ parser.add_argument("--val_batch_size", default=1, type=int)
+ parser.add_argument("--workers", default=4, type=int)
+ parser.add_argument("--lr", default=0.0003, type=float)
+ parser.add_argument("--ce_loss_weight", default=1.0, type=float)
+ parser.add_argument("--dice_loss_weight", default=0.5, type=float)
+ parser.add_argument("--bce_loss_weight", default=2.0, type=float)
+ parser.add_argument("--lora_alpha", default=16, type=int)
+ parser.add_argument("--lora_dropout", default=0.05, type=float)
+ parser.add_argument("--lora_target_modules", default="q_proj,v_proj", type=str)
+ parser.add_argument("--explanatory", default=0.1, type=float)
+ parser.add_argument("--beta1", default=0.9, type=float)
+ parser.add_argument("--beta2", default=0.95, type=float)
+ parser.add_argument("--num_classes_per_sample", default=3, type=int)
+ parser.add_argument("--exclude_val", action="store_true", default=False)
+ parser.add_argument("--no_eval", action="store_true", default=False)
+ parser.add_argument("--eval_only", action="store_true", default=False)
+ parser.add_argument("--eval_affordance", action="store_true", default=False)
+ parser.add_argument("--eval_reason_aff", action="store_true", default=False)
+ parser.add_argument("--vision_pretrained", default="PATH_TO_SAM_ViT-H", type=str)
+ parser.add_argument("--out_dim", default=256, type=int)
+ parser.add_argument("--resume", default="", type=str)
+ parser.add_argument("--print_freq", default=1, type=int)
+ parser.add_argument("--start_epoch", default=0, type=int)
+ parser.add_argument("--gradient_checkpointing", action="store_true", default=True)
+ parser.add_argument("--train_mask_decoder", action="store_true", default=True)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument("--auto_resume", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type",
+ default="llava_v1",
+ type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+ return parser.parse_args(args)
+
+
+def main(args):
+ args = parse_args(args)
+ args.log_dir = os.path.join(args.log_base_dir, args.exp_name)
+ if args.local_rank == 0:
+ os.makedirs(args.log_dir, exist_ok=True)
+ writer = SummaryWriter(args.log_dir)
+ else:
+ writer = None
+
+ # Create model
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ num_added_tokens = tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ num_added_tokens = tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ if args.use_mm_start_end:
+ tokenizer.add_tokens(
+ [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True
+ )
+
+ model_args = {
+ "train_mask_decoder": args.train_mask_decoder,
+ "out_dim": args.out_dim,
+ "ce_loss_weight": args.ce_loss_weight,
+ "dice_loss_weight": args.dice_loss_weight,
+ "bce_loss_weight": args.bce_loss_weight,
+ "seg_token_idx": args.seg_token_idx,
+ "aff_token_idx": args.aff_token_idx,
+ "vision_pretrained": args.vision_pretrained,
+ "vision_tower": args.vision_tower,
+ "use_mm_start_end": args.use_mm_start_end,
+ }
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version, torch_dtype=torch_dtype, low_cpu_mem_usage=True, **model_args
+ )
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.enable_input_require_grads()
+ model.gradient_checkpointing_enable()
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype, device=args.local_rank)
+ if not args.eval_only:
+ model.get_model().initialize_lisa_modules(model.get_model().config)
+
+ for p in vision_tower.parameters():
+ p.requires_grad = False
+ for p in model.get_model().mm_projector.parameters():
+ p.requires_grad = False
+
+ conversation_lib.default_conversation = conversation_lib.conv_templates[
+ args.conv_type
+ ]
+
+ lora_r = args.lora_r
+ if lora_r > 0:
+
+ def find_linear_layers(model, lora_target_modules):
+ cls = torch.nn.Linear
+ lora_module_names = set()
+ for name, module in model.named_modules():
+ if (
+ isinstance(module, cls)
+ and all(
+ [
+ x not in name
+ for x in [
+ "visual_model",
+ "vision_tower",
+ "mm_projector",
+ "text_hidden_fcs",
+ ]
+ ]
+ )
+ and any([x in name for x in lora_target_modules])
+ ):
+ lora_module_names.add(name)
+ return sorted(list(lora_module_names))
+
+ lora_alpha = args.lora_alpha
+ lora_dropout = args.lora_dropout
+ lora_target_modules = find_linear_layers(
+ model, args.lora_target_modules.split(",")
+ )
+ lora_config = LoraConfig(
+ r=lora_r,
+ lora_alpha=lora_alpha,
+ target_modules=lora_target_modules,
+ lora_dropout=lora_dropout,
+ bias="none",
+ task_type="CAUSAL_LM",
+ )
+ model = get_peft_model(model, lora_config)
+ model.print_trainable_parameters()
+
+ model.resize_token_embeddings(len(tokenizer))
+
+ # make text_hidden_fcs, mask_decoder, lm_head, embed_tokens trainable
+ for n, p in model.named_parameters():
+ if any(
+ [
+ x in n
+ for x in ["lm_head", "embed_tokens", "mask_decoder", "text_hidden_fcs"]
+ ]
+ ):
+ print("n: ", n, "p.shape: ", p.shape)
+ p.requires_grad = True
+
+ world_size = torch.cuda.device_count()
+ args.distributed = world_size > 1
+ train_dataset = HybridDataset(
+ args.dataset_dir,
+ tokenizer,
+ args.vision_tower,
+ samples_per_epoch=args.batch_size
+ * args.grad_accumulation_steps
+ * args.steps_per_epoch
+ * world_size,
+ precision=args.precision,
+ image_size=args.image_size,
+ num_classes_per_sample=args.num_classes_per_sample,
+ exclude_val=args.exclude_val,
+ dataset=args.dataset,
+ sample_rate=[float(x) for x in args.sample_rates.split(",")],
+ sem_seg_data=args.sem_seg_data,
+ refer_seg_data=args.refer_seg_data,
+ vqa_data=args.vqa_data,
+ reason_seg_data=args.reason_seg_data,
+ aff_seg_data=args.aff_seg_data,
+ aff_sample_rate=[float(x) for x in args.aff_sample_rates.split(",")],
+ reason_aff_data=args.reason_aff_data,
+ reason_aff_sample_rate=[float(x) for x in args.reason_aff_sample_rates.split(",")],
+ explanatory=args.explanatory,
+ )
+
+ if args.no_eval == False:
+ if args.eval_affordance:
+ val_dataset = AffValDataset(
+ args.dataset_dir,
+ tokenizer,
+ args.vision_tower,
+ args.val_dataset,
+ args.image_size,
+ )
+ elif args.eval_reason_aff:
+ val_dataset = ReasonAffValDataset(
+ args.dataset_dir,
+ tokenizer,
+ args.vision_tower,
+ args.val_dataset,
+ args.image_size,
+ )
+ else:
+ val_dataset = ValDataset(
+ args.dataset_dir,
+ tokenizer,
+ args.vision_tower,
+ args.val_dataset,
+ args.image_size,
+ )
+ print(
+ f"Training with {len(train_dataset)} examples and validating with {len(val_dataset)} examples."
+ )
+ else:
+ val_dataset = None
+ print(f"Training with {len(train_dataset)} examples.")
+
+ ds_config = {
+ "train_micro_batch_size_per_gpu": args.batch_size,
+ "gradient_accumulation_steps": args.grad_accumulation_steps,
+ "optimizer": {
+ "type": "AdamW",
+ "params": {
+ "lr": args.lr,
+ "weight_decay": 0.0,
+ "betas": (args.beta1, args.beta2),
+ },
+ },
+ "scheduler": {
+ "type": "WarmupDecayLR",
+ "params": {
+ "total_num_steps": args.epochs * args.steps_per_epoch,
+ "warmup_min_lr": 0,
+ "warmup_max_lr": args.lr,
+ "warmup_num_steps": 100,
+ "warmup_type": "linear",
+ },
+ },
+ "fp16": {
+ "enabled": args.precision == "fp16",
+ },
+ "bf16": {
+ "enabled": args.precision == "bf16",
+ },
+ "gradient_clipping": 1.0,
+ "zero_optimization": {
+ "stage": 2,
+ "contiguous_gradients": True,
+ "overlap_comm": True,
+ "reduce_scatter": True,
+ "reduce_bucket_size": 5e8,
+ "allgather_bucket_size": 5e8,
+ },
+ }
+ model_engine, optimizer, train_loader, scheduler = deepspeed.initialize(
+ model=model,
+ model_parameters=model.parameters(),
+ training_data=train_dataset,
+ collate_fn=partial(
+ collate_fn,
+ tokenizer=tokenizer,
+ conv_type=args.conv_type,
+ use_mm_start_end=args.use_mm_start_end,
+ local_rank=args.local_rank,
+ ),
+ config=ds_config,
+ )
+
+ # resume deepspeed checkpoint
+ if args.auto_resume and len(args.resume) == 0:
+ resume = os.path.join(args.log_dir, "ckpt_model")
+ if os.path.exists(resume):
+ args.resume = resume
+
+ if args.resume:
+ load_path, client_state = model_engine.load_checkpoint(args.resume)
+ with open(os.path.join(args.resume, "latest"), "r") as f:
+ ckpt_dir = f.readlines()[0].strip()
+ args.start_epoch = (
+ int(ckpt_dir.replace("global_step", "")) // args.steps_per_epoch
+ )
+ print(
+ "resume training from {}, start from epoch {}".format(
+ args.resume, args.start_epoch
+ )
+ )
+
+ # validation dataset
+ if val_dataset is not None:
+ assert args.val_batch_size == 1
+ val_sampler = torch.utils.data.distributed.DistributedSampler(
+ val_dataset, shuffle=False, drop_last=False
+ )
+ val_loader = torch.utils.data.DataLoader(
+ val_dataset,
+ batch_size=args.val_batch_size,
+ shuffle=False,
+ num_workers=args.workers,
+ pin_memory=False,
+ sampler=val_sampler,
+ collate_fn=partial(
+ collate_fn,
+ tokenizer=tokenizer,
+ conv_type=args.conv_type,
+ use_mm_start_end=args.use_mm_start_end,
+ local_rank=args.local_rank,
+ ),
+ )
+
+ train_iter = iter(train_loader)
+ best_score, cur_ciou = 0.0, 0.0
+
+ if args.eval_only:
+ giou, ciou = validate(val_loader, model_engine, 0, writer, args)
+ if args.local_rank == 0:
+ with open(os.path.join(args.version, "eval_result.txt"), "a") as f:
+ f.write(f"dataset: {args.val_dataset}, giou: {giou}, ciou: {ciou} \n")
+ exit()
+
+ for epoch in range(args.start_epoch, args.epochs):
+ # train for one epoch
+ train_iter = train(
+ train_loader,
+ model_engine,
+ epoch,
+ scheduler,
+ writer,
+ train_iter,
+ args,
+ )
+
+ if args.no_eval == False:
+ giou, ciou = validate(val_loader, model_engine, epoch, writer, args)
+ is_best = giou > best_score
+ best_score = max(giou, best_score)
+ cur_ciou = ciou if is_best else cur_ciou
+
+ if args.no_eval or is_best:
+ save_dir = os.path.join(args.log_dir, "ckpt_model")
+ if args.local_rank == 0:
+ torch.save(
+ {"epoch": epoch},
+ os.path.join(
+ args.log_dir,
+ "meta_log_giou{:.3f}_ciou{:.3f}.pth".format(
+ best_score, cur_ciou
+ ),
+ ),
+ )
+ if os.path.exists(save_dir):
+ shutil.rmtree(save_dir)
+ torch.distributed.barrier()
+ model_engine.save_checkpoint(save_dir)
+
+
+def train(
+ train_loader,
+ model,
+ epoch,
+ scheduler,
+ writer,
+ train_iter,
+ args,
+):
+ """Main training loop."""
+ batch_time = AverageMeter("Time", ":6.3f")
+ data_time = AverageMeter("Data", ":6.3f")
+ losses = AverageMeter("Loss", ":.4f")
+ ce_losses = AverageMeter("CeLoss", ":.4f")
+ mask_bce_losses = AverageMeter("MaskBCELoss", ":.4f")
+ mask_dice_losses = AverageMeter("MaskDICELoss", ":.4f")
+ mask_losses = AverageMeter("MaskLoss", ":.4f")
+
+ progress = ProgressMeter(
+ args.steps_per_epoch,
+ [
+ batch_time,
+ losses,
+ ce_losses,
+ mask_losses,
+ mask_bce_losses,
+ mask_dice_losses,
+ ],
+ prefix="Epoch: [{}]".format(epoch),
+ )
+
+ # switch to train mode
+ model.train()
+ end = time.time()
+ for global_step in range(args.steps_per_epoch):
+ for i in range(args.grad_accumulation_steps):
+ try:
+ input_dict = next(train_iter)
+ except:
+ train_iter = iter(train_loader)
+ input_dict = next(train_iter)
+
+ data_time.update(time.time() - end)
+ input_dict = dict_to_cuda(input_dict)
+
+ if args.precision == "fp16":
+ input_dict["images"] = input_dict["images"].half()
+ input_dict["images_clip"] = input_dict["images_clip"].half()
+ elif args.precision == "bf16":
+ input_dict["images"] = input_dict["images"].bfloat16()
+ input_dict["images_clip"] = input_dict["images_clip"].bfloat16()
+ else:
+ input_dict["images"] = input_dict["images"].float()
+ input_dict["images_clip"] = input_dict["images_clip"].float()
+
+ output_dict = model(**input_dict)
+
+ loss = output_dict["loss"]
+ ce_loss = output_dict["ce_loss"]
+ mask_bce_loss = output_dict["mask_bce_loss"]
+ mask_dice_loss = output_dict["mask_dice_loss"]
+ mask_loss = output_dict["mask_loss"]
+
+ losses.update(loss.item(), input_dict["images"].size(0))
+ ce_losses.update(ce_loss.item(), input_dict["images"].size(0))
+ mask_bce_losses.update(mask_bce_loss.item(), input_dict["images"].size(0))
+ mask_dice_losses.update(mask_dice_loss.item(), input_dict["images"].size(0))
+ mask_losses.update(mask_loss.item(), input_dict["images"].size(0))
+ model.backward(loss)
+ model.step()
+
+ # measure elapsed time
+ batch_time.update(time.time() - end)
+ end = time.time()
+
+ if global_step % args.print_freq == 0:
+ if args.distributed:
+ batch_time.all_reduce()
+ data_time.all_reduce()
+
+ losses.all_reduce()
+ ce_losses.all_reduce()
+ mask_bce_losses.all_reduce()
+ mask_dice_losses.all_reduce()
+ mask_losses.all_reduce()
+
+ if args.local_rank == 0:
+ progress.display(global_step + 1)
+ writer.add_scalar("train/loss", losses.avg, global_step)
+ writer.add_scalar("train/ce_loss", ce_losses.avg, global_step)
+ writer.add_scalar(
+ "train/mask_bce_loss", mask_bce_losses.avg, global_step
+ )
+ writer.add_scalar(
+ "train/mask_dice_loss", mask_dice_losses.avg, global_step
+ )
+ writer.add_scalar("train/mask_loss", mask_losses.avg, global_step)
+ writer.add_scalar(
+ "metrics/total_secs_per_batch", batch_time.avg, global_step
+ )
+ writer.add_scalar(
+ "metrics/data_secs_per_batch", data_time.avg, global_step
+ )
+
+ batch_time.reset()
+ data_time.reset()
+ losses.reset()
+ ce_losses.reset()
+ mask_bce_losses.reset()
+ mask_dice_losses.reset()
+ mask_losses.reset()
+
+ if global_step != 0:
+ curr_lr = scheduler.get_last_lr()
+ if args.local_rank == 0:
+ writer.add_scalar("train/lr", curr_lr[0], global_step)
+
+ return train_iter
+
+
+def validate(val_loader, model_engine, epoch, writer, args):
+ intersection_meter = AverageMeter("Intersec", ":6.3f", Summary.SUM)
+ union_meter = AverageMeter("Union", ":6.3f", Summary.SUM)
+ acc_iou_meter = AverageMeter("gIoU", ":6.3f", Summary.SUM)
+
+ model_engine.eval()
+
+ for input_dict in tqdm.tqdm(val_loader):
+ torch.cuda.empty_cache()
+
+ input_dict = dict_to_cuda(input_dict)
+ if args.precision == "fp16":
+ input_dict["images"] = input_dict["images"].half()
+ input_dict["images_clip"] = input_dict["images_clip"].half()
+ elif args.precision == "bf16":
+ input_dict["images"] = input_dict["images"].bfloat16()
+ input_dict["images_clip"] = input_dict["images_clip"].bfloat16()
+ else:
+ input_dict["images"] = input_dict["images"].float()
+ input_dict["images_clip"] = input_dict["images_clip"].float()
+
+ with torch.no_grad():
+ output_dict = model_engine(**input_dict)
+
+ pred_masks = output_dict["pred_masks"]
+ masks_list = output_dict["gt_masks"][0].int()
+ output_list = (pred_masks[0] > 0).int()
+ assert len(pred_masks) == 1
+
+ intersection, union, acc_iou = 0.0, 0.0, 0.0
+ for mask_i, output_i in zip(masks_list, output_list):
+ intersection_i, union_i, _ = intersectionAndUnionGPU(
+ output_i.contiguous().clone(), mask_i.contiguous(), 2, ignore_index=255
+ )
+ intersection += intersection_i
+ union += union_i
+ acc_iou += intersection_i / (union_i + 1e-5)
+ acc_iou[union_i == 0] += 1.0 # no-object target
+ intersection, union = intersection.cpu().numpy(), union.cpu().numpy()
+ acc_iou = acc_iou.cpu().numpy() / masks_list.shape[0]
+ intersection_meter.update(intersection), union_meter.update(
+ union
+ ), acc_iou_meter.update(acc_iou, n=masks_list.shape[0])
+
+ intersection_meter.all_reduce()
+ union_meter.all_reduce()
+ acc_iou_meter.all_reduce()
+
+ iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
+ ciou = iou_class[1]
+ giou = acc_iou_meter.avg[1]
+
+ if args.local_rank == 0:
+ writer.add_scalar("val/giou", giou, epoch)
+ writer.add_scalar("val/ciou", ciou, epoch)
+ print("giou: {:.4f}, ciou: {:.4f}".format(giou, ciou))
+
+ return giou, ciou
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/README.md b/README.md
index 7b95401dc46245ac339fc25059d4a56d90b4cde5..dcb6bee478ea1818f71ee3de5aa1bc6f904bd22a 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,79 @@
----
-license: apache-2.0
----
+
+
+
+RAGNet: Large-scale Reasoning-based Affordance Segmentation Benchmark towards General Grasping
+
+
+
+
+
+
+| [**📑 Paper**](https://arxiv.org/abs/2507.23734) | [**🤗 Model**](https://huggingface.co/Dongming97/AffordanceVLM) | [**🤗 Dataset**](https://huggingface.co/datasets/Dongming97/RAGNet) | [**🖥️ Website**](https://wudongming97.github.io/RAGNet/) |
+
+
+
+
+
+
+
+> **[RAGNet: Large-scale Reasoning-based Affordance Segmentation Benchmark towards General Grasping](https://arxiv.org/abs/2507.23734)**
+>
+> Dongming Wu, Yanping Fu, Saike Huang, Yingfei Liu, Fan Jia, Nian Liu, Feng Dai, Tiancai Wang, Rao Muhammad Anwer, Fahad Shahbaz Khan, Jianbing Shen
+
+## 📝 TL;DR
+- To push forward general robotic grasping, we introduce a large-scale reasoning-based affordance segmentation benchmark, **RAGNet**. It contains 273k images, 180 categories, and 26k reasoning instructions.
+- Furthermore, we propose a comprehensive affordance-based grasping framework, named AffordanceNet, which consists of a VLM (named AffordanceVLM) pre-trained on our massive affordance data and a grasping network that conditions an affordance map to grasp the target.
+
+---
+
+## 📰 News
+- [2025.08] Paper is released at [arXiv](https://arxiv.org/abs/2507.23734).
+- [2025.07] Inference code and the [AffordanceVLM](https://huggingface.co/Dongming97/AffordanceVLM) model are released. Welcome to try it!
+- [2025.06] Paper is accepted by ICCV 2025!
+
+---
+
+## 🚀 Getting Started
+
+* [Installation](docs/installation.md)
+* [Download dataset](docs/dataset.md)
+* [Training and evaluation](docs/training_and_evaluation.md)
+* To deploy using Gradio, run the following command:
+
+ ```bash
+ python app.py --version='./exps/AffordanceVLM-7B'
+ ```
+
+
+
+## 📊 Main Results
+### 🔹 Affordance Segmentation
+| Method | HANDAL gIoU | HANDAL cIoU | HANDAL† gIoU | HANDAL† cIoU | GraspNet seen gIoU | GraspNet seen cIoU | GraspNet novel gIoU | GraspNet novel cIoU | 3DOI gIoU | 3DOI cIoU |
+|--------------------------------------|-------------|-------------|---------------|---------------|----------------------|----------------------|------------------------|------------------------|------------|------------|
+| AffordanceNet | 60.3| 60.8 |60.5|60.3|63.3 |64.0| 45.6 |33.2 | 37.4| 37.4 |
+
+### 🔸 Reasoning-Based Affordance Segmentation
+
+| Method | HANDAL (easy) gIoU | HANDAL (easy) cIoU | HANDAL (hard) gIoU | HANDAL (hard) cIoU | 3DOI gIoU | 3DOI cIoU |
+|---------|---------------------|---------------------|---------------------|---------------------|-----------|-----------|
+| AffordanceNet| 58.3| 58.1 | 58.2| 57.8 | 38.1 | 39.4|
+
+
+## 📚 Citation
+If you find our work useful, please consider citing:
+
+```bibtex
+@inproceedings{wu2025ragnet,
+ title={RAGNet: Large-scale Reasoning-based Affordance Segmentation Benchmark towards General Grasping},
+ author={Wu, Dongming and Fu, Yanping and Huang, Saike and Liu, Yingfei and Jia, Fan and Liu, Nian and Dai, Feng and Wang, Tiancai and Anwer, Rao Muhammad and Khan, Fahad Shahbaz and others},
+ booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
+ pages={11980--11990},
+ year={2025}
+}
+```
+
+## 🙏 Acknowledgements
+We thank the authors that open the following projects.
+- [LISA](https://github.com/dvlab-research/LISA)
+- [LLaVA](https://github.com/haotian-liu/LLaVA)
+- [SAM](https://github.com/facebookresearch/segment-anything)
\ No newline at end of file
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..d744703be8057c8596e210469bed56a96f8d8d4e
--- /dev/null
+++ b/app.py
@@ -0,0 +1,329 @@
+import argparse
+import os
+import re
+import sys
+
+import bleach
+import cv2
+import gradio as gr
+import numpy as np
+import torch
+import torch.nn.functional as F
+from PIL import Image
+from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
+
+from datetime import datetime
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(description="AffordanceVLM chat")
+ parser.add_argument("--version", default="./exps/AffordanceVLM-7B")
+ parser.add_argument("--vis_save_path", default="./vis_output", type=str)
+ parser.add_argument(
+ "--precision",
+ default="bf16",
+ type=str,
+ choices=["fp32", "bf16", "fp16"],
+ help="precision for inference",
+ )
+ parser.add_argument("--image_size", default=1024, type=int, help="image size")
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument(
+ "--vision-tower", default="openai/clip-vit-large-patch14", type=str
+ )
+ parser.add_argument("--local-rank", default=0, type=int, help="node rank")
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type",
+ default="llava_v1",
+ type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+ return parser.parse_args(args)
+
+
+def preprocess(
+ x,
+ pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
+ pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
+ img_size=1024,
+) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - pixel_mean) / pixel_std
+ # Pad
+ h, w = x.shape[-2:]
+ padh = img_size - h
+ padw = img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+args = parse_args(sys.argv[1:])
+os.makedirs(args.vis_save_path, exist_ok=True)
+
+# Create model
+tokenizer = AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+)
+tokenizer.pad_token = tokenizer.unk_token
+args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+torch_dtype = torch.float32
+if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+elif args.precision == "fp16":
+ torch_dtype = torch.half
+
+kwargs = {"torch_dtype": torch_dtype}
+if args.load_in_4bit:
+ kwargs.update(
+ {
+ "torch_dtype": torch.half,
+ "load_in_4bit": True,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ }
+ )
+elif args.load_in_8bit:
+ kwargs.update(
+ {
+ "torch_dtype": torch.half,
+ "quantization_config": BitsAndBytesConfig(
+ llm_int8_skip_modules=["visual_model"],
+ load_in_8bit=True,
+ ),
+ }
+ )
+
+model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version, low_cpu_mem_usage=True, vision_tower=args.vision_tower, seg_token_idx=args.seg_token_idx, aff_token_idx=args.aff_token_idx, **kwargs
+)
+
+model.config.eos_token_id = tokenizer.eos_token_id
+model.config.bos_token_id = tokenizer.bos_token_id
+model.config.pad_token_id = tokenizer.pad_token_id
+
+model.get_model().initialize_vision_modules(model.get_model().config)
+vision_tower = model.get_model().get_vision_tower()
+vision_tower.to(dtype=torch_dtype)
+
+if args.precision == "bf16":
+ model = model.bfloat16().cuda()
+elif (
+ args.precision == "fp16" and (not args.load_in_4bit) and (not args.load_in_8bit)
+):
+ vision_tower = model.get_model().get_vision_tower()
+ model.model.vision_tower = None
+ import deepspeed
+
+ model_engine = deepspeed.init_inference(
+ model=model,
+ dtype=torch.half,
+ replace_with_kernel_inject=True,
+ replace_method="auto",
+ )
+ model = model_engine.module
+ model.model.vision_tower = vision_tower.half().cuda()
+elif args.precision == "fp32":
+ model = model.float().cuda()
+
+vision_tower = model.get_model().get_vision_tower()
+vision_tower.to(device=args.local_rank)
+
+clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
+transform = ResizeLongestSide(args.image_size)
+
+model.eval()
+
+
+# Gradio
+examples = [
+ [
+ "Please segment the affordance map of mug in this image.",
+ "/data/AffordanceNet/vis_output/my_workspace.JPG",
+ ],
+]
+output_labels = ["Segmentation Output"]
+
+title = "RAGNet: Large-scale Reasoning-based Affordance Segmentation Benchmark towards General Grasping"
+
+description = """
+
+This is the online demo of AffordanceVLM. \n
+**Note**: **Different prompts can lead to significantly varied results**. \n
+**Note**: Please try to **standardize** your input text prompts to **avoid ambiguity**, and also pay attention to whether the **punctuations** of the input are correct. \n
+**Note**: Current model is **AffordanceVLM-7B**. \n
+**Usage**:
+To let AffordanceVLM **segment something**, input prompt like: "Can you segment the affordance map of xxx in this image?", "What is the affordance map of xxx in this image?";
+
+"""
+
+article = """
+
+
+Preprint Paper
+
+\n
+
+ Github Repo
+"""
+
+
+## to be implemented
+def inference(input_str, input_image):
+ ## filter out special chars
+ input_str = bleach.clean(input_str)
+
+ print("input_str: ", input_str, "input_image: ", input_image)
+
+ ## input valid check
+ if not re.match(r"^[A-Za-z ,.!?\'\"]+$", input_str) or len(input_str) < 1:
+ output_str = "[Error] Invalid input: ", input_str
+ # output_image = np.zeros((128, 128, 3))
+ ## error happened
+ output_image = cv2.imread("./resources/error_happened.png")[:, :, ::-1]
+ return output_image, output_str
+
+ # Model Inference
+ conv = conversation_lib.conv_templates[args.conv_type].copy()
+ conv.messages = []
+
+ prompt = input_str
+ prompt = DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + prompt
+ if args.use_mm_start_end:
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
+ )
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], "")
+ prompt = conv.get_prompt()
+
+ image_np = cv2.imread(input_image)
+
+ # save the input image
+ SAVE_DIR = "./gradio_images/"
+ os.makedirs(SAVE_DIR, exist_ok=True)
+
+ # generate a timestamped filename
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ filename = f"{timestamp}.png"
+ save_path = os.path.join(SAVE_DIR, filename)
+
+ # save the image
+ cv2.imwrite(save_path, image_np)
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
+ original_size_list = [image_np.shape[:2]]
+
+ image_clip = (
+ clip_image_processor.preprocess(image_np, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image_clip = image_clip.bfloat16()
+ elif args.precision == "fp16":
+ image_clip = image_clip.half()
+ else:
+ image_clip = image_clip.float()
+
+ image = transform.apply_image(image_np)
+ resize_list = [image.shape[:2]]
+
+ image = (
+ preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image = image.bfloat16()
+ elif args.precision == "fp16":
+ image = image.half()
+ else:
+ image = image.float()
+
+ input_ids = tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ input_ids = input_ids.unsqueeze(0).cuda()
+
+ output_ids, pred_masks = model.evaluate(
+ image_clip,
+ image,
+ input_ids,
+ resize_list,
+ original_size_list,
+ max_new_tokens=512,
+ tokenizer=tokenizer,
+ )
+ output_ids = output_ids[0][output_ids[0] != IMAGE_TOKEN_INDEX]
+
+ text_output = tokenizer.decode(output_ids, skip_special_tokens=False)
+ text_output = text_output.replace("\n", "").replace(" ", " ")
+ text_output = text_output.split("ASSISTANT: ")[-1].replace('', '')
+
+ print("text_output: ", text_output)
+ save_img = None
+ for i, pred_mask in enumerate(pred_masks):
+ if pred_mask.shape[0] == 0:
+ continue
+
+ pred_mask = pred_mask.detach().cpu().numpy()[0]
+ pred_mask = pred_mask > 0
+
+ save_img = image_np.copy()
+ save_img[pred_mask] = (
+ image_np * 0.5
+ + pred_mask[:, :, None].astype(np.uint8) * np.array([255, 0, 0]) * 0.5
+ )[pred_mask]
+
+ output_str = "ASSITANT: " + text_output # input_str
+ if save_img is not None:
+ output_image = save_img # input_image
+ else:
+ ## no seg output
+ output_image = cv2.imread("./resources/no_seg_out.png")[:, :, ::-1]
+ return output_image, output_str
+
+
+demo = gr.Interface(
+ inference,
+ inputs=[
+ gr.Textbox(lines=1, placeholder=None, label="Text Instruction"),
+ gr.Image(type="filepath", label="Input Image"),
+ ],
+ outputs=[
+ gr.Image(type="pil", label="Affordance Output"),
+ gr.Textbox(lines=1, placeholder=None, label="Text Output"),
+ ],
+ title=title,
+ description=description,
+ article=article,
+ examples=examples,
+ allow_flagging="auto",
+)
+
+demo.queue()
+# demo.launch()
+demo.launch(server_name="0.0.0.0", server_port=3200)
\ No newline at end of file
diff --git a/batch_generate.sh b/batch_generate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..458f89eacdf098ab25f30f246ed2f6496f4c4e7b
--- /dev/null
+++ b/batch_generate.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Batch generate affordance masks for all four LIBERO subsets sequentially.
+
+SRC_ROOT="/gemini/space/wrz/libero_per_frame"
+TGT_ROOT="/gemini/space/wrz/ragnet_results"
+
+for ds in libero_object libero_goal libero_spatial libero_10; do
+ echo "========== Processing ${ds} =========="
+ CUDA_VISIBLE_DEVICES=0 python batch_generate.py \
+ --data_dir "${SRC_ROOT}/${ds}_converted" \
+ --save_dir "${TGT_ROOT}/${ds}"
+ echo "========== ${ds} done =========="
+ echo
+done
diff --git a/batch_generate_prefill_accelerate.py b/batch_generate_prefill_accelerate.py
new file mode 100644
index 0000000000000000000000000000000000000000..849e9699a456afb7b4cc34d75ea0fe3ce7bcf09a
--- /dev/null
+++ b/batch_generate_prefill_accelerate.py
@@ -0,0 +1,418 @@
+"""
+Batch affordance mask generation for per-step datasets.
+
+Reads a per-step dataset (converted by convert_lerobot_to_perstep.py) and
+generates affordance masks for every image_primary.jpg and image_wrist.jpg
+using AffordanceVLM.
+
+Input structure:
+ {data_dir}/
+ ├── meta_info.h5
+ └── episodes/
+ └── {episode_id:06d}/
+ └── steps/
+ └── {step_id:04d}/
+ ├── other.h5 # language_instruction
+ ├── image_primary.jpg
+ └── image_wrist.jpg
+
+Output structure:
+ {save_dir}/
+ └── episodes/
+ └── {episode_id:06d}/
+ └── steps/
+ └── {step_id:04d}/
+ ├── image_primary_mask.png # binary 0/255
+ └── image_wrist_mask.png
+
+Usage:
+ CUDA_VISIBLE_DEVICES=1 python batch_generate_prefill_accelerate.py \
+ --data_dir /gemini/space/wrz/libero_per_frame/libero_spatial_converted \
+ --save_dir /gemini/space/wrz/ragnet_results/libero_spatial
+"""
+
+import argparse
+import os
+import sys
+from pathlib import Path
+
+import cv2
+import h5py
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(
+ description="Batch affordance mask generation for per-step datasets"
+ )
+ # Model arguments (same as chat.py)
+ parser.add_argument("--version", default="/gemini/code/AffordanceNet/ckpts/AffordanceVLM-7B")
+ parser.add_argument(
+ "--precision", default="bf16", type=str,
+ choices=["fp32", "bf16", "fp16"],
+ )
+ parser.add_argument("--image_size", default=1024, type=int)
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument("--vision-tower", default="openai/clip-vit-large-patch14", type=str)
+ parser.add_argument("--local-rank", default=0, type=int)
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type", default="llava_v1", type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+
+ # Batch processing arguments
+ parser.add_argument("--data_dir", type=str, required=True,
+ help="Root of per-step dataset (contains episodes/)")
+ parser.add_argument("--save_dir", type=str, required=True,
+ help="Output directory for masks")
+ parser.add_argument("--prompt_template", type=str,
+ default="{}",
+ help="Template wrapping language_instruction. Use {} as placeholder.")
+ # "{}"
+ # Segment the most suitable manipulation region on the single target object for the task '{}'.
+ # Segment the affordance map for the task '{}' in this image.
+ # Segment the affordance map of the single target object for the task '{}' in this image.
+ # Given the task instruction '{}', what is the affordance map of the target object in this image? Please output segmentation mask.
+ # Given the task instruction '{}', what is the affordance map of the single target object in this image? There is only one target object. Please output segmentation mask.
+ parser.add_argument("--start_episode", type=int, default=None,
+ help="First episode index to process (inclusive)")
+ parser.add_argument("--end_episode", type=int, default=None,
+ help="Last episode index to process (exclusive)")
+ return parser.parse_args(args)
+
+
+def preprocess(
+ x,
+ pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
+ pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
+ img_size=1024,
+) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ x = (x - pixel_mean) / pixel_std
+ h, w = x.shape[-2:]
+ padh = img_size - h
+ padw = img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+
+def load_model(args):
+ """Load tokenizer and model, identical to chat.py."""
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+
+ kwargs = {"torch_dtype": torch_dtype}
+ if args.load_in_4bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "load_in_4bit": True,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ })
+ elif args.load_in_8bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "quantization_config": BitsAndBytesConfig(
+ llm_int8_skip_modules=["visual_model"],
+ load_in_8bit=True,
+ ),
+ })
+
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version,
+ low_cpu_mem_usage=True,
+ vision_tower=args.vision_tower,
+ seg_token_idx=args.seg_token_idx,
+ aff_token_idx=args.aff_token_idx,
+ **kwargs,
+ )
+
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype)
+
+ if args.precision == "bf16":
+ model = model.bfloat16().cuda()
+ elif args.precision == "fp16" and (not args.load_in_4bit) and (not args.load_in_8bit):
+ vision_tower = model.get_model().get_vision_tower()
+ model.model.vision_tower = None
+ import deepspeed
+ model_engine = deepspeed.init_inference(
+ model=model,
+ dtype=torch.half,
+ replace_with_kernel_inject=True,
+ replace_method="auto",
+ )
+ model = model_engine.module
+ model.model.vision_tower = vision_tower.half().cuda()
+ elif args.precision == "fp32":
+ model = model.float().cuda()
+
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(device=args.local_rank)
+
+ clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
+ transform = ResizeLongestSide(args.image_size)
+
+ model.eval()
+ return model, tokenizer, clip_image_processor, transform
+
+
+def build_prompt(text: str, args) -> str:
+ """Build the full conversation prompt from a text query."""
+ conv = conversation_lib.conv_templates[args.conv_type].copy()
+ conv.messages = []
+
+ prompt = DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + text
+ if args.use_mm_start_end:
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
+ )
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], "[AFF].")
+ return conv.get_prompt()
+
+
+def infer_single_image(
+ image_path: str,
+ prompt_str: str,
+ model,
+ tokenizer,
+ clip_image_processor,
+ transform,
+ args,
+) -> "np.ndarray | None":
+ """Run inference on a single image. Returns binary mask (H, W) uint8 0/255 or None."""
+ image_np = cv2.imread(image_path)
+ if image_np is None:
+ print(f" [WARNING] Cannot read image: {image_path}")
+ return None
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
+ original_size_list = [image_np.shape[:2]]
+
+ # CLIP preprocessing
+ image_clip = (
+ clip_image_processor.preprocess(image_np, return_tensors="pt")["pixel_values"][0]
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image_clip = image_clip.bfloat16()
+ elif args.precision == "fp16":
+ image_clip = image_clip.half()
+ else:
+ image_clip = image_clip.float()
+
+ # SAM preprocessing
+ image = transform.apply_image(image_np)
+ resize_list = [image.shape[:2]]
+ image = (
+ preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image = image.bfloat16()
+ elif args.precision == "fp16":
+ image = image.half()
+ else:
+ image = image.float()
+
+ # Tokenize
+ input_ids = tokenizer_image_token(prompt_str, tokenizer, return_tensors="pt")
+ input_ids = input_ids.unsqueeze(0).cuda()
+ attention_masks = input_ids.ne(tokenizer.pad_token_id)
+
+ # Prefill inference (single forward pass instead of autoregressive generation)
+ h, w = original_size_list[0]
+ labels = input_ids.clone()
+ offset = torch.LongTensor([0, 1]).cuda()
+ masks_list = [torch.zeros(1, h, w).float().cuda()]
+ label_list = [torch.zeros(h, w).long().cuda()]
+
+ with torch.no_grad():
+ output_dict = model(
+ images=image,
+ images_clip=image_clip,
+ input_ids=input_ids,
+ labels=labels,
+ attention_masks=attention_masks,
+ offset=offset,
+ masks_list=masks_list,
+ label_list=label_list,
+ resize_list=resize_list,
+ inference=True,
+ )
+
+ pred_masks = output_dict["pred_masks"]
+
+ # Merge all predicted masks via union (logical OR)
+ merged = np.zeros((h, w), dtype=bool)
+ has_mask = False
+ for pred_mask in pred_masks:
+ if pred_mask.shape[0] == 0:
+ continue
+ mask_np = pred_mask.detach().cpu().numpy()[0] # (H, W)
+ merged |= (mask_np > 0)
+ has_mask = True
+
+ if not has_mask:
+ return None
+
+ return (merged.astype(np.uint8) * 255)
+
+
+def read_language_instruction(h5_path: str) -> str:
+ """Read language_instruction from other.h5."""
+ with h5py.File(h5_path, "r") as f:
+ instr = f["language_instruction"][()]
+ if isinstance(instr, bytes):
+ instr = instr.decode("utf-8")
+ return str(instr)
+
+
+def main(args):
+ args = parse_args(args)
+ data_dir = Path(args.data_dir)
+ save_dir = Path(args.save_dir)
+
+ episodes_dir = data_dir / "episodes"
+ if not episodes_dir.is_dir():
+ print(f"Error: episodes directory not found at {episodes_dir}")
+ sys.exit(1)
+
+ # Collect and sort episode directories
+ episode_dirs = sorted(
+ [d for d in episodes_dir.iterdir() if d.is_dir()],
+ key=lambda p: p.name,
+ )
+
+ # Filter by episode range
+ if args.start_episode is not None or args.end_episode is not None:
+ start = args.start_episode if args.start_episode is not None else 0
+ end = args.end_episode if args.end_episode is not None else len(episode_dirs)
+ episode_dirs = [
+ d for d in episode_dirs
+ if start <= int(d.name) < end
+ ]
+
+ print(f"Data dir : {data_dir}")
+ print(f"Save dir : {save_dir}")
+ print(f"Episodes : {len(episode_dirs)}")
+ print(f"Prompt : {args.prompt_template}")
+ print()
+
+ # Load model
+ print("Loading model...")
+ model, tokenizer, clip_image_processor, transform = load_model(args)
+ print("Model loaded.\n")
+
+ total_steps = 0
+ empty_mask_count = 0
+
+ for ep_dir in episode_dirs:
+ episode_id = ep_dir.name # e.g. "000000"
+ steps_dir = ep_dir / "steps"
+ if not steps_dir.is_dir():
+ print(f" [WARNING] No steps/ in {ep_dir}, skipping.")
+ continue
+
+ step_dirs = sorted(
+ [d for d in steps_dir.iterdir() if d.is_dir()],
+ key=lambda p: p.name,
+ )
+
+ for step_dir in step_dirs:
+ step_id = step_dir.name # e.g. "0000"
+
+ # Read language instruction
+ other_h5 = step_dir / "other.h5"
+ if not other_h5.exists():
+ print(f" [WARNING] Missing other.h5 in {step_dir}, skipping.")
+ continue
+ language_instruction = read_language_instruction(str(other_h5))
+ # debug
+ # print(language_instruction)
+
+ # Build prompt
+ query_text = args.prompt_template.format(language_instruction)
+ prompt_str = build_prompt(query_text, args)
+
+ # Output directory (same structure as input: episodes/{episode_id}/steps/{step_id}/)
+ out_dir = save_dir / "episodes" / episode_id / "steps" / step_id
+ out_dir.mkdir(parents=True, exist_ok=True)
+
+ # Process both cameras
+ for cam_name in ("image_primary", "image_wrist"):
+ img_path = step_dir / f"{cam_name}.jpg"
+ mask_path = out_dir / f"{cam_name}_mask.png"
+
+ if not img_path.exists():
+ print(f" [WARNING] Missing {img_path}, skipping.")
+ continue
+
+ mask = infer_single_image(
+ str(img_path), prompt_str,
+ model, tokenizer, clip_image_processor, transform, args,
+ )
+
+ if mask is None:
+ # Save blank mask and warn
+ h, w = cv2.imread(str(img_path)).shape[:2]
+ mask = np.zeros((h, w), dtype=np.uint8)
+ empty_mask_count += 1
+
+ cv2.imwrite(str(mask_path), mask)
+
+ total_steps += 1
+ if total_steps % 50 == 0:
+ print(f" Processed {total_steps} steps (episode {episode_id}, step {step_id})")
+
+ print(f"Episode {episode_id} done ({len(step_dirs)} steps)")
+
+ print(f"\nFinished. {total_steps} steps processed, {empty_mask_count} empty masks.")
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/chat.py b/chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..62d3a4efb378236d4d6ed4b1e917ea32f3731318
--- /dev/null
+++ b/chat.py
@@ -0,0 +1,255 @@
+import argparse
+import os
+import sys
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(description="LISA chat")
+ parser.add_argument("--version", default="/gemini/code/AffordanceNet/ckpts/AffordanceVLM-7B")
+ parser.add_argument("--vis_save_path", default="./vis_output", type=str)
+ parser.add_argument(
+ "--precision",
+ default="bf16",
+ type=str,
+ choices=["fp32", "bf16", "fp16"],
+ help="precision for inference",
+ )
+ parser.add_argument("--image_size", default=1024, type=int, help="image size")
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument(
+ "--vision-tower", default="openai/clip-vit-large-patch14", type=str
+ )
+ parser.add_argument("--local-rank", default=0, type=int, help="node rank")
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type",
+ default="llava_v1",
+ type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+ return parser.parse_args(args)
+
+
+def preprocess(
+ x,
+ pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
+ pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
+ img_size=1024,
+) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - pixel_mean) / pixel_std
+ # Pad
+ h, w = x.shape[-2:]
+ padh = img_size - h
+ padw = img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+
+def main(args):
+ args = parse_args(args)
+ os.makedirs(args.vis_save_path, exist_ok=True)
+
+ # Create model
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ num_added_tokens = tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ num_added_tokens = tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+
+ kwargs = {"torch_dtype": torch_dtype}
+ if args.load_in_4bit:
+ kwargs.update(
+ {
+ "torch_dtype": torch.half,
+ "load_in_4bit": True,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ }
+ )
+ elif args.load_in_8bit:
+ kwargs.update(
+ {
+ "torch_dtype": torch.half,
+ "quantization_config": BitsAndBytesConfig(
+ llm_int8_skip_modules=["visual_model"],
+ load_in_8bit=True,
+ ),
+ }
+ )
+
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version, low_cpu_mem_usage=True, vision_tower=args.vision_tower, seg_token_idx=args.seg_token_idx, aff_token_idx=args.aff_token_idx, **kwargs
+ )
+
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype)
+
+ if args.precision == "bf16":
+ model = model.bfloat16().cuda()
+ elif (
+ args.precision == "fp16" and (not args.load_in_4bit) and (not args.load_in_8bit)
+ ):
+ vision_tower = model.get_model().get_vision_tower()
+ model.model.vision_tower = None
+ import deepspeed
+
+ model_engine = deepspeed.init_inference(
+ model=model,
+ dtype=torch.half,
+ replace_with_kernel_inject=True,
+ replace_method="auto",
+ )
+ model = model_engine.module
+ model.model.vision_tower = vision_tower.half().cuda()
+ elif args.precision == "fp32":
+ model = model.float().cuda()
+
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(device=args.local_rank)
+
+ clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
+ transform = ResizeLongestSide(args.image_size)
+
+ model.eval()
+
+ while True:
+ conv = conversation_lib.conv_templates[args.conv_type].copy()
+ conv.messages = []
+
+ prompt = input("Please input your prompt: ")
+ prompt = DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + prompt
+ if args.use_mm_start_end:
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
+ )
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], "")
+ prompt = conv.get_prompt()
+
+ image_path = input("Please input the image path: ")
+ if not os.path.exists(image_path):
+ print("File not found in {}".format(image_path))
+ continue
+
+ image_np = cv2.imread(image_path)
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
+ original_size_list = [image_np.shape[:2]]
+
+ image_clip = (
+ clip_image_processor.preprocess(image_np, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image_clip = image_clip.bfloat16()
+ elif args.precision == "fp16":
+ image_clip = image_clip.half()
+ else:
+ image_clip = image_clip.float()
+
+ image = transform.apply_image(image_np)
+ resize_list = [image.shape[:2]]
+
+ image = (
+ preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image = image.bfloat16()
+ elif args.precision == "fp16":
+ image = image.half()
+ else:
+ image = image.float()
+
+ input_ids = tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ input_ids = input_ids.unsqueeze(0).cuda()
+
+ output_ids, pred_masks = model.evaluate(
+ image_clip,
+ image,
+ input_ids,
+ resize_list,
+ original_size_list,
+ max_new_tokens=512,
+ tokenizer=tokenizer,
+ )
+ output_ids = output_ids[0][output_ids[0] != IMAGE_TOKEN_INDEX]
+
+ text_output = tokenizer.decode(output_ids, skip_special_tokens=False)
+ text_output = text_output.replace("\n", "").replace(" ", " ")
+ print("text_output: ", text_output)
+
+ for i, pred_mask in enumerate(pred_masks):
+ if pred_mask.shape[0] == 0:
+ continue
+
+ pred_mask = pred_mask.detach().cpu().numpy()[0]
+ pred_mask = pred_mask > 0
+
+ save_path = "{}/{}_mask_{}.jpg".format(
+ args.vis_save_path, image_path.split("/")[-1].split(".")[0], i
+ )
+ cv2.imwrite(save_path, pred_mask * 100)
+ print("{} has been saved.".format(save_path))
+
+ save_path = "{}/{}_masked_img_{}.jpg".format(
+ args.vis_save_path, image_path.split("/")[-1].split(".")[0], i
+ )
+ save_img = image_np.copy()
+ save_img[pred_mask] = (
+ image_np * 0.5
+ + pred_mask[:, :, None].astype(np.uint8) * np.array([255, 0, 0]) * 0.5
+ )[pred_mask]
+ save_img = cv2.cvtColor(save_img, cv2.COLOR_RGB2BGR)
+ cv2.imwrite(save_path, save_img)
+ print("{} has been saved.".format(save_path))
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/chat_prefill.py b/chat_prefill.py
new file mode 100644
index 0000000000000000000000000000000000000000..323af6bba8cb189584503f4da4780f91292a102c
--- /dev/null
+++ b/chat_prefill.py
@@ -0,0 +1,282 @@
+"""
+Interactive affordance mask generation using prefill mode (single forward pass).
+
+Same interactive workflow as chat.py, but uses prefill inference instead of
+autoregressive generation. The assistant response "[AFF]." is pre-filled in the
+prompt, so the model only does one forward pass to extract mask embeddings.
+"""
+
+import argparse
+import os
+import sys
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(description="AffordanceVLM chat (prefill mode)")
+ parser.add_argument("--version", default="/gemini/code/AffordanceNet/ckpts/AffordanceVLM-7B")
+ parser.add_argument("--vis_save_path", default="./vis_output_prefill", type=str)
+ parser.add_argument(
+ "--precision", default="bf16", type=str,
+ choices=["fp32", "bf16", "fp16"],
+ )
+ parser.add_argument("--image_size", default=1024, type=int)
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument("--vision-tower", default="openai/clip-vit-large-patch14", type=str)
+ parser.add_argument("--local-rank", default=0, type=int)
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type", default="llava_v1", type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+ parser.add_argument("--prompt_template", type=str,
+ default="Segment the most suitable manipulation region on the single target object for the task '{}'.",
+ help="Template wrapping language_instruction. Use {} as placeholder.")
+ # Segment the most suitable manipulation region on the single target object for the task '{}'.
+ # Segment the affordance map for the task '{}' in this image.
+ # Segment the affordance map of the single target object for the task '{}' in this image.
+ # Given the task instruction '{}', what is the affordance map of the target object in this image? Please output segmentation mask.
+ # Given the task instruction '{}', what is the affordance map of the single target object in this image? There is only one target object. Please output segmentation mask.
+ return parser.parse_args(args)
+
+
+def preprocess(
+ x,
+ pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
+ pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
+ img_size=1024,
+) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ x = (x - pixel_mean) / pixel_std
+ h, w = x.shape[-2:]
+ padh = img_size - h
+ padw = img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+
+def main(args):
+ args = parse_args(args)
+ os.makedirs(args.vis_save_path, exist_ok=True)
+
+ # Create model
+ tokenizer = AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+
+ kwargs = {"torch_dtype": torch_dtype}
+ if args.load_in_4bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "load_in_4bit": True,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ })
+ elif args.load_in_8bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "quantization_config": BitsAndBytesConfig(
+ llm_int8_skip_modules=["visual_model"],
+ load_in_8bit=True,
+ ),
+ })
+
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version,
+ low_cpu_mem_usage=True,
+ vision_tower=args.vision_tower,
+ seg_token_idx=args.seg_token_idx,
+ aff_token_idx=args.aff_token_idx,
+ **kwargs,
+ )
+
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype)
+
+ if args.precision == "bf16":
+ model = model.bfloat16().cuda()
+ elif args.precision == "fp16" and (not args.load_in_4bit) and (not args.load_in_8bit):
+ vision_tower = model.get_model().get_vision_tower()
+ model.model.vision_tower = None
+ import deepspeed
+ model_engine = deepspeed.init_inference(
+ model=model,
+ dtype=torch.half,
+ replace_with_kernel_inject=True,
+ replace_method="auto",
+ )
+ model = model_engine.module
+ model.model.vision_tower = vision_tower.half().cuda()
+ elif args.precision == "fp32":
+ model = model.float().cuda()
+
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(device=args.local_rank)
+
+ clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
+ transform = ResizeLongestSide(args.image_size)
+
+ model.eval()
+
+ # debug
+ template = "Given the task instruction '{}', what is the affordance map of the target object in this image? Please output segmentation mask."
+
+ while True:
+ conv = conversation_lib.conv_templates[args.conv_type].copy()
+ conv.messages = []
+
+ prompt = input("Please input your prompt: ")
+ # 加入模版
+ prompt = args.prompt_template.format(prompt)
+
+ prompt = DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + prompt
+ if args.use_mm_start_end:
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
+ )
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
+
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], "[AFF].")
+ prompt = conv.get_prompt()
+
+ image_path = input("Please input the image path: ")
+ if not os.path.exists(image_path):
+ print("File not found in {}".format(image_path))
+ continue
+
+ image_np = cv2.imread(image_path)
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
+ original_size_list = [image_np.shape[:2]]
+ h, w = original_size_list[0]
+
+ image_clip = (
+ clip_image_processor.preprocess(image_np, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image_clip = image_clip.bfloat16()
+ elif args.precision == "fp16":
+ image_clip = image_clip.half()
+ else:
+ image_clip = image_clip.float()
+
+ image = transform.apply_image(image_np)
+ resize_list = [image.shape[:2]]
+
+ image = (
+ preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+ .unsqueeze(0)
+ .cuda()
+ )
+ if args.precision == "bf16":
+ image = image.bfloat16()
+ elif args.precision == "fp16":
+ image = image.half()
+ else:
+ image = image.float()
+
+ input_ids = tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ input_ids = input_ids.unsqueeze(0).cuda()
+ attention_masks = input_ids.ne(tokenizer.pad_token_id)
+
+ # Print the full prompt text (prefill mode has no generated text)
+ # debug
+ text_ids = input_ids[0][input_ids[0] != IMAGE_TOKEN_INDEX]
+ text_output = tokenizer.decode(text_ids, skip_special_tokens=False)
+ text_output = text_output.replace("\n", "").replace(" ", " ")
+ print("text_output: ", text_output)
+
+ # Prefill inference
+ labels = input_ids.clone()
+ offset = torch.LongTensor([0, 1]).cuda()
+ masks_list = [torch.zeros(1, h, w).float().cuda()]
+ label_list = [torch.zeros(h, w).long().cuda()]
+
+ with torch.no_grad():
+ output_dict = model(
+ images=image,
+ images_clip=image_clip,
+ input_ids=input_ids,
+ labels=labels,
+ attention_masks=attention_masks,
+ offset=offset,
+ masks_list=masks_list,
+ label_list=label_list,
+ resize_list=resize_list,
+ inference=True,
+ )
+
+ pred_masks = output_dict["pred_masks"]
+
+ for i, pred_mask in enumerate(pred_masks):
+ if pred_mask.shape[0] == 0:
+ continue
+
+ pred_mask = pred_mask.detach().cpu().numpy()[0]
+ pred_mask = pred_mask > 0
+
+ save_path = "{}/{}_mask_{}.jpg".format(
+ args.vis_save_path, image_path.split("/")[-1].split(".")[0], i
+ )
+ cv2.imwrite(save_path, pred_mask * 100)
+ print("{} has been saved.".format(save_path))
+
+ save_path = "{}/{}_masked_img_{}.jpg".format(
+ args.vis_save_path, image_path.split("/")[-1].split(".")[0], i
+ )
+ save_img = image_np.copy()
+ save_img[pred_mask] = (
+ image_np * 0.5
+ + pred_mask[:, :, None].astype(np.uint8) * np.array([255, 0, 0]) * 0.5
+ )[pred_mask]
+ save_img = cv2.cvtColor(save_img, cv2.COLOR_RGB2BGR)
+ cv2.imwrite(save_path, save_img)
+ print("{} has been saved.".format(save_path))
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/ckpts/AffordanceVLM-7B/.gitattributes b/ckpts/AffordanceVLM-7B/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/.gitattributes
@@ -0,0 +1,35 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/ckpts/AffordanceVLM-7B/README.md b/ckpts/AffordanceVLM-7B/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7b95401dc46245ac339fc25059d4a56d90b4cde5
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/README.md
@@ -0,0 +1,3 @@
+---
+license: apache-2.0
+---
diff --git a/ckpts/AffordanceVLM-7B/added_tokens.json b/ckpts/AffordanceVLM-7B/added_tokens.json
new file mode 100644
index 0000000000000000000000000000000000000000..d16bb6d101a075586d13191eed081bc6990ed047
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/added_tokens.json
@@ -0,0 +1,7 @@
+{
+ "": 32002,
+ "": 32000,
+ "": 32001,
+ "[AFF]": 32004,
+ "[SEG]": 32003
+}
diff --git a/ckpts/AffordanceVLM-7B/config.json b/ckpts/AffordanceVLM-7B/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..a3f94412905a6f77849a66693ce8403b4d2f6bcd
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/config.json
@@ -0,0 +1,42 @@
+{
+ "_name_or_path": "./LLaVA/LLaVA-Lightning-7B-v1-1",
+ "architectures": [
+ "AffordanceVLMForCausalLM"
+ ],
+ "bos_token_id": 1,
+ "eos_token_id": 2,
+ "freeze_mm_mlp_adapter": true,
+ "hidden_act": "silu",
+ "hidden_size": 4096,
+ "image_aspect_ratio": "square",
+ "image_grid_pinpoints": null,
+ "initializer_range": 0.02,
+ "intermediate_size": 11008,
+ "max_position_embeddings": 2048,
+ "max_sequence_length": 2048,
+ "mm_hidden_size": 1024,
+ "mm_use_im_patch_token": false,
+ "mm_use_im_start_end": true,
+ "mm_vision_select_feature": "patch",
+ "mm_vision_select_layer": -2,
+ "mm_vision_tower": "openai/clip-vit-large-patch14",
+ "model_type": "llava",
+ "num_attention_heads": 32,
+ "num_hidden_layers": 32,
+ "num_key_value_heads": 32,
+ "out_dim": 256,
+ "pad_token_id": 0,
+ "pretrain_mm_mlp_adapter": null,
+ "pretraining_tp": 1,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "train_mask_decoder": true,
+ "transformers_version": "4.31.0",
+ "tune_mm_mlp_adapter": false,
+ "use_cache": false,
+ "use_mm_proj": true,
+ "vision_tower": "openai/clip-vit-large-patch14",
+ "vocab_size": 32005
+}
diff --git a/ckpts/AffordanceVLM-7B/eval_result.txt b/ckpts/AffordanceVLM-7B/eval_result.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7f8cb2ee89d33118e65c66e5640f321f09cbf346
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/eval_result.txt
@@ -0,0 +1 @@
+dataset: handal_all, giou: 0.60872483253479, ciou: 0.6054294109344482
diff --git a/ckpts/AffordanceVLM-7B/generation_config.json b/ckpts/AffordanceVLM-7B/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..4fd1bf1fd73d6d754da651772288c7222cee63d5
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/generation_config.json
@@ -0,0 +1,7 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 0,
+ "eos_token_id": 1,
+ "pad_token_id": 0,
+ "transformers_version": "4.31.0"
+}
diff --git a/ckpts/AffordanceVLM-7B/pytorch_model-00001-of-00002.bin b/ckpts/AffordanceVLM-7B/pytorch_model-00001-of-00002.bin
new file mode 100644
index 0000000000000000000000000000000000000000..544b6b1da5beb50167cdbe8b4fa056b2954a56dd
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/pytorch_model-00001-of-00002.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efdb3ff9accdd733412d083c770ba34ae1c6745b28e2bae07d3546dc9356bfec
+size 9976675518
diff --git a/ckpts/AffordanceVLM-7B/pytorch_model-00002-of-00002.bin b/ckpts/AffordanceVLM-7B/pytorch_model-00002-of-00002.bin
new file mode 100644
index 0000000000000000000000000000000000000000..a591618052b214ab625d18a49abc5ca855f8df7e
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/pytorch_model-00002-of-00002.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7259eabdd3c03be21d45a328177ac3e46e1385cbc5ff2d757cd8bb70dec81ae9
+size 6144654233
diff --git a/ckpts/AffordanceVLM-7B/pytorch_model.bin.index.json b/ckpts/AffordanceVLM-7B/pytorch_model.bin.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..617625805204fb39198fd0e037e94c4d20ddd793
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/pytorch_model.bin.index.json
@@ -0,0 +1,930 @@
+{
+ "metadata": {
+ "total_size": 16121002176
+ },
+ "weight_map": {
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
+ "model.mm_projector.bias": "pytorch_model-00002-of-00002.bin",
+ "model.mm_projector.weight": "pytorch_model-00002-of-00002.bin",
+ "model.norm.weight": "pytorch_model-00002-of-00002.bin",
+ "model.text_hidden_fcs.0.0.bias": "pytorch_model-00002-of-00002.bin",
+ "model.text_hidden_fcs.0.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.text_hidden_fcs.0.2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.text_hidden_fcs.0.2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.0.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.1.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.10.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.11.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.12.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.13.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.14.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.15.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.16.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.17.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.18.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.19.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.2.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.20.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.21.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.22.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.23.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.24.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.25.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.26.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.27.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.28.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.29.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.3.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.30.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.31.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.4.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.5.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.6.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.7.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.8.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.attn.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.attn.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.attn.qkv.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.attn.qkv.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.attn.rel_pos_h": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.attn.rel_pos_w": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.blocks.9.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.neck.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.neck.1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.neck.1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.neck.2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.neck.3.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.neck.3.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.patch_embed.proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.patch_embed.proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.image_encoder.pos_embed": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.iou_prediction_head.layers.0.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.iou_prediction_head.layers.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.iou_prediction_head.layers.1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.iou_prediction_head.layers.1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.iou_prediction_head.layers.2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.iou_prediction_head.layers.2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.iou_token.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.mask_tokens.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.0.layers.0.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.0.layers.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.0.layers.1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.0.layers.1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.0.layers.2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.0.layers.2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.1.layers.0.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.1.layers.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.1.layers.1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.1.layers.1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.1.layers.2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.1.layers.2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.2.layers.0.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.2.layers.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.2.layers.1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.2.layers.1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.2.layers.2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.2.layers.2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.3.layers.0.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.3.layers.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.3.layers.1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.3.layers.1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.3.layers.2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_hypernetworks_mlps.3.layers.2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_upscaling.0.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_upscaling.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_upscaling.1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_upscaling.1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_upscaling.3.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.output_upscaling.3.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.final_attn_token_to_image.k_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.final_attn_token_to_image.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.final_attn_token_to_image.out_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.final_attn_token_to_image.out_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.final_attn_token_to_image.q_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.final_attn_token_to_image.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.final_attn_token_to_image.v_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.final_attn_token_to_image.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_image_to_token.k_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_image_to_token.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_image_to_token.out_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_image_to_token.out_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_image_to_token.q_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_image_to_token.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_image_to_token.v_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_image_to_token.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_token_to_image.k_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_token_to_image.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_token_to_image.out_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_token_to_image.out_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_token_to_image.q_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_token_to_image.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_token_to_image.v_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.cross_attn_token_to_image.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.norm3.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.norm3.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.norm4.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.norm4.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.0.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_image_to_token.k_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_image_to_token.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_image_to_token.out_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_image_to_token.out_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_image_to_token.q_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_image_to_token.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_image_to_token.v_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_image_to_token.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_token_to_image.k_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_token_to_image.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_token_to_image.out_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_token_to_image.out_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_token_to_image.q_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_token_to_image.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_token_to_image.v_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.cross_attn_token_to_image.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.mlp.lin1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.mlp.lin1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.mlp.lin2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.mlp.lin2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.norm1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.norm1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.norm2.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.norm2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.norm3.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.norm3.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.norm4.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.norm4.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.layers.1.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.norm_final_attn.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.mask_decoder.transformer.norm_final_attn.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.0.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.1.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.3.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.3.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.4.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.4.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.6.bias": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.mask_downscaling.6.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.no_mask_embed.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.not_a_point_embed.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.pe_layer.positional_encoding_gaussian_matrix": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.point_embeddings.0.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.point_embeddings.1.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.point_embeddings.2.weight": "pytorch_model-00002-of-00002.bin",
+ "model.visual_model.prompt_encoder.point_embeddings.3.weight": "pytorch_model-00002-of-00002.bin"
+ }
+}
diff --git a/ckpts/AffordanceVLM-7B/special_tokens_map.json b/ckpts/AffordanceVLM-7B/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..f928b2409a393d47ce0d9fe519f17e048a471eca
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/ckpts/AffordanceVLM-7B/tokenizer.model b/ckpts/AffordanceVLM-7B/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/ckpts/AffordanceVLM-7B/tokenizer_config.json b/ckpts/AffordanceVLM-7B/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..5410e42d470550c1292a32f630a49b0285ff6432
--- /dev/null
+++ b/ckpts/AffordanceVLM-7B/tokenizer_config.json
@@ -0,0 +1,35 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "bos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "clean_up_tokenization_spaces": false,
+ "eos_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ },
+ "legacy": true,
+ "model_max_length": 512,
+ "pad_token": null,
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": {
+ "__type": "AddedToken",
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/ckpts/sam_vit_h_4b8939.pth b/ckpts/sam_vit_h_4b8939.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8523acce9ddab1cf7e355628a08b1aab8ce08a72
--- /dev/null
+++ b/ckpts/sam_vit_h_4b8939.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7bf3b02f3ebf1267aba913ff637d9a2d5c33d3173bb679e46d9f338c26f262e
+size 2564550879
diff --git a/client.py b/client.py
new file mode 100644
index 0000000000000000000000000000000000000000..1734a723ca12a2f57e2ee2c527c76394ec428a74
--- /dev/null
+++ b/client.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Client script to send an image and prompt to a Flask-based vision-language segmentation server.
+
+from __future__ import absolute_import, print_function, division
+import requests
+import cv2
+import base64
+import numpy as np
+
+# ---------------------------
+# Encode image to base64 string
+# ---------------------------
+def img2b64(img):
+ retval, buffer = cv2.imencode('.bmp', img) # Encode as BMP
+ pic_str = base64.b64encode(buffer).decode() # Convert to base64 string
+ return pic_str
+
+# ---------------------------
+# Decode base64 string back to image
+# ---------------------------
+def b642img(pic_str):
+ img_data = base64.b64decode(pic_str)
+ nparr = np.frombuffer(img_data, np.uint8)
+ img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
+ return img_np
+
+# ---------------------------
+# Send image and prompt to server, receive result and save
+# ---------------------------
+def post_files():
+ path = 'vis_output/my_workspace.JPG' # Input image path
+ img = cv2.imread(path)
+ if img is None:
+ print(f"Failed to read image at {path}")
+ return
+
+ pic_str = img2b64(img)
+ data = {
+ 'img': pic_str,
+ 'prompt': 'Please segment the affordance map of mug in this image.'
+ }
+
+ # Send POST request to Flask server
+ r = requests.post('http://localhost:3200/img_mask', json=data)
+
+ if r.status_code == 200:
+ print('Success. Received response from server.')
+ result = r.json()
+ result_b64 = result.get('img', None)
+
+ if result_b64:
+ result_img = b642img(result_b64)
+ save_path = 'affordance_mask_result.jpg'
+ cv2.imwrite(save_path, result_img)
+ print(f"Result saved to {save_path}")
+ else:
+ print("No image returned in the response.")
+ else:
+ print(f"Request failed with status code {r.status_code}")
+
+# ---------------------------
+# Main entry
+# ---------------------------
+if __name__ == '__main__':
+ post_files()
+
diff --git a/data_curation/.ipynb_checkpoints/check_dataset-checkpoint.py b/data_curation/.ipynb_checkpoints/check_dataset-checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..951b6de0d36f5458875502d173627919523c3e21
--- /dev/null
+++ b/data_curation/.ipynb_checkpoints/check_dataset-checkpoint.py
@@ -0,0 +1,100 @@
+import os
+import pickle as pkl
+
+DATA_DIR = '/gemini/space/wrz/AffordanceNet/data'
+
+# 新增一个路径修复函数
+def resolve_path(path):
+ """
+ 如果路径是相对路径 (比如 ./data/...),将其转换为绝对路径
+ """
+ if path.startswith('./data/'):
+ # 截掉前缀的 './data/' (长度为 7),拼接到真实的 DATA_DIR 后面
+ return os.path.join(DATA_DIR, path[7:])
+ elif path.startswith('./'):
+ # 兼容其他情况
+ return os.path.join(os.path.dirname(DATA_DIR), path[2:])
+ return path
+
+
+def get_data_paths():
+ """Retrieve train/val/reasoning/non-reasoning pkl file paths."""
+ all_files = os.listdir(DATA_DIR)
+ train_paths = [os.path.join(DATA_DIR, f) for f in all_files if f.endswith('train.pkl')]
+ val_paths = [os.path.join(DATA_DIR, f) for f in all_files if f.endswith('val.pkl')]
+ reasoning_paths = [os.path.join(DATA_DIR, f) for f in all_files if f.endswith('reasoning_val.pkl')]
+ non_reasoning_paths = [vp for vp in val_paths if vp not in reasoning_paths]
+
+ return train_paths, reasoning_paths, non_reasoning_paths
+
+
+def check_file_exists(file_path, description=""):
+ """Assert that the file exists, otherwise raise an error."""
+ assert os.path.exists(file_path), f"{description} does not exist: {file_path}"
+
+
+def check_train_data(train_path):
+ """Check frame and mask paths for each sample in training data."""
+ print(f"[Train] Checking: {train_path}")
+ with open(train_path, "rb") as f:
+ data = pkl.load(f)
+
+ for item in data:
+ # 修改这里:在检查之前先转换路径
+ real_frame_path = resolve_path(item["frame_path"])
+ real_mask_path = resolve_path(item["mask_path"])
+
+ check_file_exists(real_frame_path, "Frame path")
+ check_file_exists(real_mask_path, "Mask path")
+
+ print(f"[Train] ✅ Checked {train_path}. Samples: {len(data)}")
+
+
+def check_val_data(val_path, reasoning=False):
+ """Check validation data paths depending on reasoning mode."""
+ tag = "Reasoning Val" if reasoning else "Non-Reasoning Val"
+ print(f"[{tag}] Checking: {val_path}")
+
+ with open(val_path, "rb") as f:
+ data = pkl.load(f)
+
+ if reasoning:
+ for item in data:
+ # 修改这里
+ real_frame_path = resolve_path(item["frame_path"])
+ real_mask_path = resolve_path(item["mask_path"])
+
+ check_file_exists(real_frame_path, "Frame path")
+ check_file_exists(real_mask_path, "Mask path")
+ print(f"[{tag}] ✅ Checked {val_path}. Samples: {len(data)}")
+ else:
+ total_images = 0
+ for class_name, image_list in data.get('images', {}).items():
+ for image_path in image_list:
+ # 修改这里
+ check_file_exists(resolve_path(image_path), "Image path")
+ total_images += len(image_list)
+
+ for class_name, label_list in data.get('labels', {}).items():
+ for label_path in label_list:
+ # 修改这里
+ check_file_exists(resolve_path(label_path), "Label path")
+
+ print(f"[{tag}] ✅ Checked {val_path}. Samples: {total_images}")
+
+
+def main():
+ train_paths, reasoning_paths, non_reasoning_paths = get_data_paths()
+
+ for train_path in train_paths:
+ check_train_data(train_path)
+
+ for val_path in non_reasoning_paths:
+ check_val_data(val_path, reasoning=False)
+
+ for val_path in reasoning_paths:
+ check_val_data(val_path, reasoning=True)
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/data_curation/build_vlpart.py b/data_curation/build_vlpart.py
new file mode 100644
index 0000000000000000000000000000000000000000..0976f5cfd469f3602204227edbdf0d92c9eaed98
--- /dev/null
+++ b/data_curation/build_vlpart.py
@@ -0,0 +1,105 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import argparse
+import glob
+import multiprocessing as mp
+import numpy as np
+import os
+import tempfile
+import time
+import warnings
+import cv2
+import tqdm
+
+from detectron2.config import get_cfg
+from detectron2.data.detection_utils import read_image
+from detectron2.utils.logger import setup_logger
+
+import sys
+sys.path.append('.')
+from VLPart.vlpart.config import add_vlpart_config
+
+from VLPart.demo.predictor import VisualizationDemo
+
+
+# constants
+WINDOW_NAME = "image demo"
+
+
+def setup_cfg(args):
+ # load config from file and command-line arguments
+ cfg = get_cfg()
+ add_vlpart_config(cfg)
+ cfg.merge_from_file(args.config_file)
+ cfg.merge_from_list(args.opts)
+ # Set score_threshold for builtin models
+ cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
+ cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
+ cfg.freeze()
+ return cfg
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
+ parser.add_argument(
+ "--config-file",
+ default="VLPart/configs/joint/swinbase_cascade_lvis_paco_pascalpart_partimagenet.yaml",
+ metavar="FILE",
+ help="path to config file",
+ )
+ parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
+ parser.add_argument("--video-input", help="Path to video file.")
+ parser.add_argument(
+ "--input",
+ nargs="+",
+ default='',
+ help="A list of space separated input images; "
+ "or a single glob pattern such as 'directory/*.jpg'",
+ )
+ parser.add_argument(
+ "--output",
+ default='',
+ help="A file or directory to save output visualizations. "
+ "If not given, will show output in an OpenCV window.",
+ )
+ parser.add_argument(
+ "--vocabulary",
+ default="custom",
+ choices=['pascal_part', 'partimagenet', 'paco',
+ 'voc', 'coco', 'lvis',
+ 'pascal_part_voc', 'lvis_paco', 'custom'],
+ help="",
+ )
+ parser.add_argument(
+ "--custom_vocabulary",
+ default="",
+ help="",
+ )
+ parser.add_argument(
+ "--confidence-threshold",
+ type=float,
+ default=0.7,
+ help="Minimum score for instance predictions to be shown",
+ )
+
+ parser.add_argument(
+ "--opts",
+ help="Modify config options using the command-line 'KEY VALUE' pairs",
+ default=['MODEL.WEIGHTS', "/data/VLPart/ckpts/swinbase_cascade_lvis_paco_pascalpart_partimagenet.pth", "VIS.BOX", False],
+ nargs=argparse.REMAINDER,
+ )
+ return parser
+
+def build_vlpart_model(custom_vocabulary):
+
+ mp.set_start_method("spawn", force=True)
+ args = get_parser().parse_args()
+ args.custom_vocabulary = custom_vocabulary
+ setup_logger(name="fvcore")
+ logger = setup_logger()
+ logger.info("Arguments: " + str(args))
+
+ cfg = setup_cfg(args)
+ model = VisualizationDemo(cfg, args)
+
+ return model
diff --git a/data_curation/check_dataset.py b/data_curation/check_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..951b6de0d36f5458875502d173627919523c3e21
--- /dev/null
+++ b/data_curation/check_dataset.py
@@ -0,0 +1,100 @@
+import os
+import pickle as pkl
+
+DATA_DIR = '/gemini/space/wrz/AffordanceNet/data'
+
+# 新增一个路径修复函数
+def resolve_path(path):
+ """
+ 如果路径是相对路径 (比如 ./data/...),将其转换为绝对路径
+ """
+ if path.startswith('./data/'):
+ # 截掉前缀的 './data/' (长度为 7),拼接到真实的 DATA_DIR 后面
+ return os.path.join(DATA_DIR, path[7:])
+ elif path.startswith('./'):
+ # 兼容其他情况
+ return os.path.join(os.path.dirname(DATA_DIR), path[2:])
+ return path
+
+
+def get_data_paths():
+ """Retrieve train/val/reasoning/non-reasoning pkl file paths."""
+ all_files = os.listdir(DATA_DIR)
+ train_paths = [os.path.join(DATA_DIR, f) for f in all_files if f.endswith('train.pkl')]
+ val_paths = [os.path.join(DATA_DIR, f) for f in all_files if f.endswith('val.pkl')]
+ reasoning_paths = [os.path.join(DATA_DIR, f) for f in all_files if f.endswith('reasoning_val.pkl')]
+ non_reasoning_paths = [vp for vp in val_paths if vp not in reasoning_paths]
+
+ return train_paths, reasoning_paths, non_reasoning_paths
+
+
+def check_file_exists(file_path, description=""):
+ """Assert that the file exists, otherwise raise an error."""
+ assert os.path.exists(file_path), f"{description} does not exist: {file_path}"
+
+
+def check_train_data(train_path):
+ """Check frame and mask paths for each sample in training data."""
+ print(f"[Train] Checking: {train_path}")
+ with open(train_path, "rb") as f:
+ data = pkl.load(f)
+
+ for item in data:
+ # 修改这里:在检查之前先转换路径
+ real_frame_path = resolve_path(item["frame_path"])
+ real_mask_path = resolve_path(item["mask_path"])
+
+ check_file_exists(real_frame_path, "Frame path")
+ check_file_exists(real_mask_path, "Mask path")
+
+ print(f"[Train] ✅ Checked {train_path}. Samples: {len(data)}")
+
+
+def check_val_data(val_path, reasoning=False):
+ """Check validation data paths depending on reasoning mode."""
+ tag = "Reasoning Val" if reasoning else "Non-Reasoning Val"
+ print(f"[{tag}] Checking: {val_path}")
+
+ with open(val_path, "rb") as f:
+ data = pkl.load(f)
+
+ if reasoning:
+ for item in data:
+ # 修改这里
+ real_frame_path = resolve_path(item["frame_path"])
+ real_mask_path = resolve_path(item["mask_path"])
+
+ check_file_exists(real_frame_path, "Frame path")
+ check_file_exists(real_mask_path, "Mask path")
+ print(f"[{tag}] ✅ Checked {val_path}. Samples: {len(data)}")
+ else:
+ total_images = 0
+ for class_name, image_list in data.get('images', {}).items():
+ for image_path in image_list:
+ # 修改这里
+ check_file_exists(resolve_path(image_path), "Image path")
+ total_images += len(image_list)
+
+ for class_name, label_list in data.get('labels', {}).items():
+ for label_path in label_list:
+ # 修改这里
+ check_file_exists(resolve_path(label_path), "Label path")
+
+ print(f"[{tag}] ✅ Checked {val_path}. Samples: {total_images}")
+
+
+def main():
+ train_paths, reasoning_paths, non_reasoning_paths = get_data_paths()
+
+ for train_path in train_paths:
+ check_train_data(train_path)
+
+ for val_path in non_reasoning_paths:
+ check_val_data(val_path, reasoning=False)
+
+ for val_path in reasoning_paths:
+ check_val_data(val_path, reasoning=True)
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/data_curation/prompt_generation_handal_easy_reasoning.py b/data_curation/prompt_generation_handal_easy_reasoning.py
new file mode 100644
index 0000000000000000000000000000000000000000..771e76defc66251d6c6afd39674cfaffe7f57ba5
--- /dev/null
+++ b/data_curation/prompt_generation_handal_easy_reasoning.py
@@ -0,0 +1,126 @@
+import os
+import json
+import pickle
+import requests
+from concurrent.futures import ThreadPoolExecutor
+
+# Dataset name
+DATASET = 'handal'
+
+# Handle-equipped objects to filter
+OBJECTS_WITH_HANDLE = [
+ 'strainers', 'fixed joint pliers', 'hammers', 'ladles', 'whisks', 'measuring cups',
+ 'locking pliers', 'power drills', 'adjustable wrenches', 'mugs', 'ratchets', 'utensils',
+ 'combinational wrenches', 'pots pans', 'spatulas', 'screwdrivers', 'slip joint pliers'
+]
+
+# OpenAI API settings (update key!)
+API_URL = 'https://api.openai.com/v1/chat/completions'
+HEADERS = {
+ 'Content-Type': 'application/json',
+ 'Authorization': 'Bearer YOUR-API-KEY' # Replace with your real key
+}
+
+
+def read_pkl_file(pkl_path):
+ """Reads pkl file and filters entries for objects with handles."""
+ with open(pkl_path, 'rb') as f:
+ val_data = pickle.load(f)
+
+ filtered_data = []
+ for class_name, image_list in val_data['images'].items():
+ if class_name in OBJECTS_WITH_HANDLE:
+ for idx, img in enumerate(image_list):
+ class_label = val_data['class_names'][class_name][idx]
+ save_path = os.path.join(
+ f'./reason_affordance/{DATASET}_easy_reasoning',
+ class_label,
+ os.path.splitext(os.path.basename(img))[0] + ".json"
+ )
+ if not os.path.exists(save_path):
+ filtered_data.append({'img_name': img, 'class_name': class_label})
+ return filtered_data
+
+
+def process_sentence(class_name):
+ """Send prompt to OpenAI and return generated sentence."""
+ prompt = [
+ {'role': 'system', 'content': 'You are a helpful assistant.'},
+ {'role': 'system',
+ 'content': (
+ 'Based on several words where the first is category name, '
+ 'please design an instruction <1> and instruction <2> in embodied scenes. '
+ 'The instruction <1> must include object category name itself. '
+ 'The instruction <2> must include the object category name itself. '
+ 'The instruction <2> must belong to embodied manipulation and give action if instruction <1> provides. '
+ 'The instruction <2> does not exceed 50 words.'
+ )},
+ {'role': 'user', 'content': 'mug'},
+ {'role': 'assistant',
+ 'content': '<1> I need a drink. Please find a mug to fill water. <2> The mug has a handle as affordance map. So the robot can hold its handle.'},
+ {'role': 'user', 'content': 'knife'},
+ {'role': 'assistant',
+ 'content': '<1> Please give me a knife to cut apple. <2> The knife has a handle, and you can use its handle to cut apple.'},
+ {'role': 'user', 'content': 'hammers'},
+ {'role': 'assistant',
+ 'content': '<1> What is the proper way to hold the hammers? <2> The correct method is to hold the hammer by its handle.'},
+ {'role': 'user', 'content': 'fork'},
+ {'role': 'assistant',
+ 'content': '<1> Kindly pick up the fork. <2> You will be holding the fork handle.'},
+ {'role': 'user', 'content': 'screwdrivers'},
+ {'role': 'assistant',
+ 'content': '<1> I need a tool to tighten or loosen screws. <2> The screwdriver is here, hold its handle to turn and control screws.'},
+ {'role': 'user', 'content': class_name}
+ ]
+
+ response = requests.post(API_URL, headers=HEADERS, json={'model': 'gpt-4', 'messages': prompt})
+ if response.status_code == 200:
+ return response.json()['choices'][0]['message']['content']
+ else:
+ print(f"API Error for {class_name}:", response.text)
+ return None
+
+
+def process_json(data):
+ """Process a single data entry and save result to JSON file."""
+ class_name = data["class_name"]
+
+ # Retry up to 5 times
+ for _ in range(5):
+ result = process_sentence(class_name)
+ if not result or '<1>' not in result or '<2>' not in result:
+ continue
+ break
+ else:
+ print(f"Failed to process: {class_name}")
+ return
+
+ print("Processed:", result)
+
+ try:
+ question = result.split('<2>')[0].split('<1>')[-1].strip()
+ answer = result.split('<2>')[-1].strip()
+
+ save_dir = os.path.join(f'./reason_affordance/{DATASET}_easy_reasoning', class_name)
+ os.makedirs(save_dir, exist_ok=True)
+
+ save_path = os.path.join(save_dir, os.path.splitext(os.path.basename(data["img_name"]))[0] + ".json")
+ output = {'img_name': data["img_name"], 'class_name': class_name, 'question': question, 'answer': answer}
+
+ with open(save_path, 'w') as f:
+ json.dump(output, f, indent=4)
+
+ except Exception as e:
+ print(f"Error saving file for {class_name}:", e)
+
+
+def main():
+ pkl_file = f'./data/{DATASET}_val.pkl'
+ data_list = read_pkl_file(pkl_file)
+
+ with ThreadPoolExecutor(max_workers=2) as executor:
+ executor.map(process_json, data_list)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/data_curation/prompt_generation_handal_hard_reasoning.py b/data_curation/prompt_generation_handal_hard_reasoning.py
new file mode 100644
index 0000000000000000000000000000000000000000..caf9c29fa211489f242a48e0ce0e81972f6558dc
--- /dev/null
+++ b/data_curation/prompt_generation_handal_hard_reasoning.py
@@ -0,0 +1,136 @@
+import os
+import json
+import pickle
+import requests
+from concurrent.futures import ThreadPoolExecutor
+
+# Dataset configuration
+DATASET = 'handal'
+
+# Object categories with handle
+OBJECTS_WITH_HANDLE = [
+ 'strainers', 'fixed joint pliers', 'hammers', 'ladles', 'whisks', 'measuring cups',
+ 'locking pliers', 'power drills', 'adjustable wrenches', 'mugs', 'ratchets', 'utensils',
+ 'combinational wrenches', 'pots pans', 'spatulas', 'screwdrivers', 'slip joint pliers'
+]
+
+# OpenAI API settings (update key!)
+API_URL = 'https://api.openai.com/v1/chat/completions'
+HEADERS = {
+ 'Content-Type': 'application/json',
+ 'Authorization': 'Bearer YOUR-API-KEY' # Replace with your real key
+}
+
+
+def read_pkl_file(pkl_path):
+ """
+ Load a pickle file and extract data entries containing objects with handles,
+ skipping already processed samples.
+ """
+ with open(pkl_path, 'rb') as f:
+ val_data = pickle.load(f)
+
+ filtered_data = []
+ for class_name, img_list in val_data['images'].items():
+ if class_name not in OBJECTS_WITH_HANDLE:
+ continue
+ for i, img_path in enumerate(img_list):
+ class_label = val_data['class_names'][class_name][i]
+ save_path = os.path.join(
+ f'./reason_affordance/{DATASET}_hard_reasoning',
+ class_label,
+ os.path.splitext(os.path.basename(img_path))[0] + ".json"
+ )
+ if not os.path.exists(save_path):
+ filtered_data.append({'img_name': img_path, 'class_name': class_label})
+
+ return filtered_data
+
+
+def process_sentence(category):
+ """
+ Generate reasoning instructions (<1>, <2>) from category name using GPT.
+ """
+ payload = {
+ 'model': 'gpt-4',
+ 'messages': [
+ {'role': 'system', 'content': 'You are a helpful assistant.'},
+ {'role': 'system',
+ 'content': (
+ 'Based on several words where the first is category name, please design an instruction <1> and instruction <2> in embodied scenes. '
+ 'The instruction <1> must not include object category name itself. '
+ 'The instruction <2> must include the object category name itself. '
+ 'The instruction <2> must belong to embodied manipulation and give action if instruction <1> provides. '
+ 'The instruction <2> does not exceed 50 words.'
+ )},
+ {'role': 'user', 'content': 'microwave, open'},
+ {'role': 'assistant', 'content': '<1> Heat up food quickly. <2> The microwave is closed, so it can be open to access the food inside.'},
+ {'role': 'user', 'content': 'knife'},
+ {'role': 'assistant', 'content': '<1> I want to cut a bread. <2> The knife has a handle, you can use its handle to cut bread.'},
+ {'role': 'user', 'content': 'computer mouse'},
+ {'role': 'assistant', 'content': '<1> Give me a tool to control the cursor on the screen. <2> The computer mouse is here. It has no handle, so you can grasp its whole body.'},
+ {'role': 'user', 'content': 'fork'},
+ {'role': 'assistant', 'content': '<1> Use to pierce and lift food. <2> The fork is here, and its handle can be grasped.'},
+ {'role': 'user', 'content': 'screwdrivers'},
+ {'role': 'assistant', 'content': '<1> I need a tool to tighten or loosen screws. <2> The screwdriver is here, hold its handle to turn and control screws.'},
+ {'role': 'user', 'content': category}
+ ]
+ }
+
+ response = requests.post(API_URL, headers=HEADERS, json=payload)
+ if response.status_code == 200:
+ return response.json()['choices'][0]['message']['content']
+ else:
+ print(f"[API Error] {category}: {response.status_code} - {response.text}")
+ return None
+
+
+def process_json(entry):
+ """
+ Process a single image/class entry by generating reasoning and saving result to file.
+ """
+ class_name = entry['class_name']
+
+ for _ in range(5):
+ result = process_sentence(class_name)
+ if result and '<1>' in result and '<2>' in result:
+ break
+ else:
+ print(f"[Retry Failed] {class_name}")
+ return
+
+ try:
+ question = result.split('<2>')[0].split('<1>')[-1].strip()
+ answer = result.split('<2>')[-1].strip()
+
+ save_dir = os.path.join(f'./reason_affordance/{DATASET}_hard_reasoning', class_name)
+ os.makedirs(save_dir, exist_ok=True)
+
+ save_path = os.path.join(save_dir, os.path.splitext(os.path.basename(entry['img_name']))[0] + ".json")
+ output = {
+ 'img_name': entry['img_name'],
+ 'class_name': class_name,
+ 'question': question,
+ 'answer': answer
+ }
+
+ with open(save_path, 'w') as f:
+ json.dump(output, f, indent=4)
+ print(f"[Saved] {save_path}")
+ except Exception as e:
+ print(f"[Error] Failed to save {class_name}: {e}")
+
+
+def main():
+ """
+ Main execution: loads data, then processes in parallel.
+ """
+ pkl_path = f'./data/{DATASET}_val.pkl'
+ entries = read_pkl_file(pkl_path)
+
+ with ThreadPoolExecutor(max_workers=2) as executor:
+ executor.map(process_json, entries)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/data_curation/vlpart_sam2_tracking.py b/data_curation/vlpart_sam2_tracking.py
new file mode 100644
index 0000000000000000000000000000000000000000..dceae8ea8c54c700b6acb3784534b4daf7bb5f24
--- /dev/null
+++ b/data_curation/vlpart_sam2_tracking.py
@@ -0,0 +1,187 @@
+import os
+import cv2
+import torch
+import pickle
+import argparse
+import numpy as np
+import warnings
+from tqdm import tqdm
+from pathlib import Path
+from PIL import Image
+
+from detectron2.data.detection_utils import read_image
+from supervision import Detections, BoxAnnotator, MaskAnnotator, LabelAnnotator, mask_to_xyxy
+
+from sam2.build_sam import build_sam2_video_predictor
+from VLPart.build_vlpart import build_vlpart_model
+
+
+warnings.filterwarnings('ignore')
+
+# Constants
+SAM2_CONFIG = "sam2_hiera_l.yaml"
+SAM2_CHECKPOINT = "./checkpoints/sam2_hiera_large.pt"
+OUTPUT_ROOT = "/data/robot-merlin/mask_vlpart+sam2_tracking"
+OUTPUT_ROOT_IMG = "/data/robot-merlin/mask_vlpart+sam2_tracking_with_image"
+
+# Set up torch environment
+torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
+if torch.cuda.get_device_properties(0).major >= 8:
+ torch.backends.cuda.matmul.allow_tf32 = True
+ torch.backends.cudnn.allow_tf32 = True
+
+device = "cuda:0" if torch.cuda.is_available() else "cpu"
+
+
+def load_affordance_data(pkl_path):
+ """
+ Load affordance data from a pickle file and organize it by video directory.
+ Args:
+ pkl_path (str): Path to the pickle file containing affordance data.
+ Returns:
+ dict: A dictionary where keys are video directory paths and values are lists of data entries.
+ """
+ with open(pkl_path, 'rb') as f:
+ datas = pickle.load(f)
+
+ data_dict = {}
+ for data in datas:
+ vid_path = os.path.dirname(data['frame_path'])
+ data_dict.setdefault(vid_path, []).append(data)
+ return data_dict
+
+
+def init_vlpart_once(text, prev_text, vlpart_model):
+ """
+ Initialize VLPart model if the text has changed.
+ """
+ if text != prev_text:
+ if vlpart_model is not None:
+ del vlpart_model
+ vlpart_model = build_vlpart_model(text)
+ return vlpart_model, text
+
+
+def run_vlpart_on_first_frame(vlpart_model, image_path):
+ """
+ Run VLPart model on the first frame to get bounding boxes.
+ """
+ img = read_image(image_path, format="BGR")
+ predictions, _ = vlpart_model.run_on_image(img)
+ if len(predictions["instances"]) != 1:
+ return None
+ return predictions["instances"].pred_boxes.tensor.cpu().numpy()
+
+
+def run_sam2_tracking(video_dir, frame_names, sam2_predictor, boxes):
+ """
+ Run SAM2 tracking on the video frames using the provided bounding boxes.
+ """
+ inference_state = sam2_predictor.init_state(video_path=video_dir)
+ sam2_predictor.reset_state(inference_state)
+
+ _, obj_ids, mask_logits = sam2_predictor.add_new_points_or_box(
+ inference_state=inference_state,
+ frame_idx=0,
+ obj_id=1,
+ box=boxes,
+ )
+
+ results = {}
+ for frame_idx, out_ids, out_logits in sam2_predictor.propagate_in_video(inference_state):
+ results[frame_idx] = {
+ oid: (out_logits[i] > 0).cpu().numpy()
+ for i, oid in enumerate(out_ids)
+ }
+ return results
+
+
+def save_tracking_results(video_dir, frame_names, video_segments, object_name, output_base, vid):
+ """
+ Save the tracking results to the specified output directory.
+ """
+ objects = [object_name]
+ id_to_objects = {i: obj for i, obj in enumerate(objects, start=1)}
+
+ output_dir = Path(f"{output_base}/{vid:06d}")
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ output_dir_img = Path(f"{OUTPUT_ROOT_IMG}/{vid:06d}")
+ output_dir_img.mkdir(parents=True, exist_ok=True)
+
+ box_annotator = BoxAnnotator()
+ label_annotator = LabelAnnotator()
+ mask_annotator = MaskAnnotator()
+
+ for idx, masks in video_segments.items():
+ frame_path = os.path.join(video_dir, frame_names[idx])
+ frame = cv2.imread(frame_path)
+
+ obj_ids = list(masks.keys())
+ mask_arr = np.concatenate(list(masks.values()), axis=0)
+
+ detections = Detections(
+ xyxy=mask_to_xyxy(mask_arr),
+ mask=mask_arr,
+ class_id=np.array(obj_ids, dtype=np.int32),
+ )
+
+ annotated = box_annotator.annotate(frame.copy(), detections)
+ annotated = label_annotator.annotate(annotated, detections, [id_to_objects[i] for i in obj_ids])
+ annotated = mask_annotator.annotate(annotated, detections)
+
+ cv2.imwrite(str(output_dir_img / frame_names[idx]), annotated)
+ cv2.imwrite(str(output_dir / frame_names[idx]), mask_arr[0] * 255)
+
+
+def get_sorted_frame_names(video_dir):
+ return sorted([
+ f for f in os.listdir(video_dir)
+ if f.lower().endswith(('.jpg', '.jpeg'))
+ ], key=lambda name: int(os.path.splitext(name)[0]))
+
+
+def main(openx_data, text_override=None):
+ # You can reorganize the data loading logic as needed
+ data_dict = load_affordance_data(f'./data/{openx_data}_for_affordance.pkl')
+
+ # Initialize SAM2 predictor
+ sam2_predictor = build_sam2_video_predictor(SAM2_CONFIG, SAM2_CHECKPOINT, device=device)
+
+ prev_text = ''
+ vlpart_model = None
+
+ for video_dir, data_list in tqdm(data_dict.items()):
+ first_sample = data_list[0]
+ frame_path = first_sample['frame_path']
+ task_class = first_sample['task_object_class']
+
+ # Only process specific classes
+ if not any(k in task_class for k in ['door', 'drawer', 'knife']):
+ continue
+
+ # Initialize VLPart model with the task class
+ input_text = f"{task_class} handle" if not text_override else text_override
+ vlpart_model, prev_text = init_vlpart_once(input_text, prev_text, vlpart_model)
+
+ # Process the first frame to get bounding boxes
+ boxes = run_vlpart_on_first_frame(vlpart_model, frame_path)
+ if boxes is None:
+ continue
+
+ # Run SAM2 tracking on the video frames
+ frame_names = get_sorted_frame_names(video_dir)
+ segments = run_sam2_tracking(video_dir, frame_names, sam2_predictor, boxes)
+ save_tracking_results(video_dir, frame_names, segments, input_text,
+ f"{OUTPUT_ROOT}/", first_sample['vid'])
+ print(f"[Done] {frame_path} | {task_class}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser("VLPart + SAM2 Tracking Demo")
+ parser.add_argument("--pipeline", type=str, default="referring_expression_segmentation", help="Pipeline task")
+ parser.add_argument("--text_input", type=str, default=None, help="Optional override for input text")
+ parser.add_argument("--dataset", type=str, default="bridge", help="Dataset name (e.g., bridge)")
+ args = parser.parse_args()
+
+ main(args.dataset, args.pipeline, args.text_input)
diff --git a/docs/dataset.md b/docs/dataset.md
new file mode 100644
index 0000000000000000000000000000000000000000..d1268e914456843e48c46f61526a1773cd27a64d
--- /dev/null
+++ b/docs/dataset.md
@@ -0,0 +1,93 @@
+## Dataset
+
+To train our affordance segmentation model, we use two types of data:
+* **General Segmentation Data**: This follows [LISA](https://github.com/dvlab-research/LISA).
+* **Affordance Segmentation Data**: This is a large-scale dataset that we collect.
+
+### General Segmentation Data
+These data is organized as follows:
+```
+./data/
+├── lisa_data
+│ ├── ade20k
+│ ├── coco
+│ ├── cocostuff
+│ ├── llava_dataset
+│ ├── mapillary
+│ ├── reason_seg
+│ ├── refer_seg
+│ ├── vlpart
+```
+
+### Affordance Segmentation Data
+
+We employ images from HANDAL, Open-X, GraspNet, EgoObjects, and RLBench in our affordance segmentation task.
+
+The HANDAL data is downloaded and organized according to its official [repo](https://github.com/NVlabs/HANDAL).
+Other data can be downloaded from the [Hugging Face](https://huggingface.co/datasets/Dongming97/RAGNet).
+
+The training data is organized as follows:
+```
+./data/
+├── openx_train.pkl
+├── graspnet_train.pkl
+├── egoobjects_train.pkl
+├── rlbench_train.pkl
+├── handal_hard_reasoning_train.pkl
+├── egoobjects_easy_reasoning_train.pkl
+├── egoobjects_hard_reasoning_train.pkl
+├── HANDAL
+│ ├── without_depth
+│ ├── handal_dataset_adjustable_wrenches
+│ ├── handal_dataset_combinational_wrenches
+│ ├── handal_dataset_fixed_joint_pliers
+│ ├── ...
+├── openx
+│ ├── images
+│ ├── fractal20220817_data
+│ ├── bridge
+│ ├── masks
+│ ├── fractal20220817_data
+│ ├── bridge
+├── graspnet
+│ ├── images
+│ ├── masks
+│ ├── test_seen
+│ ├── test_novel
+├── egoobjects
+│ ├── images
+│ ├── masks
+├── rlbench
+│ ├── images
+│ ├── masks
+├── 3doi
+│ ├── images
+│ ├── masks
+```
+
+The evaluation data is also in the same dictory, but with the `*_eval.pkl` files instead of `*_train.pkl`.
+
+```
+./data/
+├── handal_mini_val.pkl
+├── graspnet_test_seen_val.pkl
+├── graspnet_test_novel_val.pkl
+├── 3doi_val.pkl
+├── handal_easy_reasoning_val.pkl
+├── handal_hard_reasoning_val.pkl
+├── 3doi_easy_reasoning_val.pkl
+```
+
+You can use the following script to confirm if data is organized correctly:
+```bash
+python data_curation/check_dataset.py
+```
+
+### About data curation
+1. **SAM2**: We use SAM2 to generate affordance mask if the dataset provides box annotation.
+2. **Florence-2 + SAM2**: We use Florence-2 to generate the initial segmentation masks of some complete objects, and then refine them with SAM2. Please see [Florence-2+SAM2](https://github.com/IDEA-Research/Grounded-SAM-2).
+3. **VLPart + SAM2**: We use VLPart to generate box of object part, and then refine them with SAM2. We refer to [VLPart](https://github.com/facebookresearch/VLPart).
+We provide our inference demo scripts in `data_curation/build_vlpart.py` and `data_curation/vlpart_sam2_tracking.py`.
+4. **Reasoning Instruction**: We provide two example scripts to generate reasoning instructions for the affordance segmentation task:
+ - `data_curation/prompt_generation_handal_easy_reasoning.py`: This script generates easy reasoning instructions for the HANDAL dataset.
+ - `data_curation/prompt_generation_handal_hard_reasoning.py`: This script generates hard reasoning instructions for the HANDAL dataset.
\ No newline at end of file
diff --git a/docs/installation.md b/docs/installation.md
new file mode 100644
index 0000000000000000000000000000000000000000..7c32709de7ae6b7ccc202063b58241c65a728e34
--- /dev/null
+++ b/docs/installation.md
@@ -0,0 +1,10 @@
+## Installation
+The environment installation mainly follows [LISA](https://github.com/dvlab-research/LISA).
+```
+https://github.com/wudongming97/AffordanceNet.git
+cd AffordanceNet
+conda create -n affordancenet python=3.9
+conda activate affordancenet
+pip install -r requirements.txt
+pip install flash-attn --no-build-isolation
+```
diff --git a/docs/training_and_evaluation.md b/docs/training_and_evaluation.md
new file mode 100644
index 0000000000000000000000000000000000000000..33206b7b00f15beac797f4d916195f6b5f08a39a
--- /dev/null
+++ b/docs/training_and_evaluation.md
@@ -0,0 +1,56 @@
+## Training and Evaluation
+
+### Pre-trained Weights
+#### LLaVA
+For convenience of using pre-trained LLaVA weights, we provide a link from [Hugging Face](https://huggingface.co/Dongming97/LLaVA-Lightning-7B-v1-1).
+
+#### SAM
+Download SAM ViT-H pre-trained weights from the [link](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth).
+
+
+### Training
+To train AffordanceVLM, you can use the following command.
+```
+bash ./scripts/train.sh
+```
+When training is finished, to get the full model weight:
+
+```
+cd ./runs/AffordanceVLM-7B/ckpt_model && python zero_to_fp32.py . ../pytorch_model.bin
+```
+
+### Merge LoRA Weight
+Merge the LoRA weights of `pytorch_model.bin`, save the resulting model into your desired path in the Hugging Face format:
+```
+CUDA_VISIBLE_DEVICES="" python merge_lora_weights_and_save_hf_model.py \
+ --version="PATH_TO_LLaVA" \
+ --weight="PATH_TO_pytorch_model.bin" \
+ --save_path="PATH_TO_SAVED_MODEL"
+```
+
+For example:
+```
+CUDA_VISIBLE_DEVICES="" python3 merge_lora_weights_and_save_hf_model.py \
+ --version="./LLaVA/LLaVA-Lightning-7B-v1-1" \
+ --weight="./runs/AffordanceVLM-7B/pytorch_model.bin" \
+ --save_path="./exps/AffordanceVLM-7B"
+```
+
+### Evaluation
+To evaluate AffordanceVLM on the entire [HANDAL](https://github.com/NVlabs/HANDAL) dataset, please adjust the `--dataset_dir` parameter in `evaluate.sh`.
+```
+bash ./scripts/evaluate.sh
+```
+
+To chat with [AffordanceVLM-7B](https://huggingface.co/Dongming97/AffordanceVLM):
+```
+CUDA_VISIBLE_DEVICES=0 python chat.py --version=./exps/AffordanceVLM-7B
+```
+
+### Main Results
+
+HANDAL:
+
+| Method | gIoU | cIoU |
+|:----------------:|:----:|-----:|
+| AffordanceVLM-7B | 60.3 | 60.8 |
\ No newline at end of file
diff --git a/imgs/.ipynb_checkpoints/AffordanceNet-checkpoint.jpg b/imgs/.ipynb_checkpoints/AffordanceNet-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a936b446f7764ca01255a56a9be4ae410a7dbd08
--- /dev/null
+++ b/imgs/.ipynb_checkpoints/AffordanceNet-checkpoint.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3abd71b7ead1d3353faf60d65da4ceeafed34314a4c123059b5d92f53685c797
+size 1166532
diff --git a/imgs/AffordanceNet.jpg b/imgs/AffordanceNet.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a936b446f7764ca01255a56a9be4ae410a7dbd08
--- /dev/null
+++ b/imgs/AffordanceNet.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3abd71b7ead1d3353faf60d65da4ceeafed34314a4c123059b5d92f53685c797
+size 1166532
diff --git a/imgs/AffordanceNet.png b/imgs/AffordanceNet.png
new file mode 100644
index 0000000000000000000000000000000000000000..d07a4c11ab54305dc16863627eacb8e80dc91431
--- /dev/null
+++ b/imgs/AffordanceNet.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6c1537d2a0442b1685bdfdefbb8f028acf2cc9d90782a8f37c77037126aab550
+size 1881195
diff --git a/merge_lora_weights_and_save_hf_model.py b/merge_lora_weights_and_save_hf_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..c18e5cfa93f4604f0e61e4b841cc6135f9d36ad9
--- /dev/null
+++ b/merge_lora_weights_and_save_hf_model.py
@@ -0,0 +1,162 @@
+import argparse
+import glob
+import os
+import sys
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+import transformers
+from peft import LoraConfig, get_peft_model
+from transformers import AutoTokenizer
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from utils.utils import DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(
+ description="merge lora weights and save model with hf format"
+ )
+ parser.add_argument(
+ "--version", default="liuhaotian/llava-llama-2-13b-chat-lightning-preview"
+ )
+ parser.add_argument("--vis_save_path", default="./vis_output", type=str)
+ parser.add_argument(
+ "--precision",
+ default="bf16",
+ type=str,
+ choices=["fp32", "bf16", "fp16"],
+ help="precision for inference",
+ )
+ parser.add_argument("--vision_pretrained", default="PATH_TO_SAM_ViT-H", type=str)
+ parser.add_argument("--out_dim", default=256, type=int)
+ parser.add_argument("--image_size", default=1024, type=int, help="image size")
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument(
+ "--vision-tower", default="openai/clip-vit-large-patch14", type=str
+ )
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument("--lora_alpha", default=16, type=int)
+ parser.add_argument("--lora_dropout", default=0.05, type=float)
+ parser.add_argument("--lora_target_modules", default="q_proj,v_proj", type=str)
+ parser.add_argument("--local-rank", default=0, type=int, help="node rank")
+ parser.add_argument("--train_mask_decoder", action="store_true", default=True)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type",
+ default="llava_v1",
+ type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+ parser.add_argument("--weight", default="", type=str, required=True)
+ parser.add_argument("--save_path", default="./lisa_model", type=str, required=True)
+ return parser.parse_args(args)
+
+
+def main(args):
+ args = parse_args(args)
+ os.makedirs(args.vis_save_path, exist_ok=True)
+
+ # Create model
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ num_added_tokens = tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ num_added_tokens = tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ if args.use_mm_start_end:
+ tokenizer.add_tokens(
+ [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True
+ )
+
+ model_args = {
+ "train_mask_decoder": args.train_mask_decoder,
+ "out_dim": args.out_dim,
+ "seg_token_idx": args.seg_token_idx,
+ "aff_token_idx": args.aff_token_idx,
+ "vision_tower": args.vision_tower,
+ }
+
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version, torch_dtype=torch_dtype, low_cpu_mem_usage=True, **model_args
+ )
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype)
+ model.get_model().initialize_lisa_modules(model.get_model().config)
+
+ lora_r = args.lora_r
+ if lora_r > 0:
+
+ def find_linear_layers(model, lora_target_modules):
+ cls = torch.nn.Linear
+ lora_module_names = set()
+ for name, module in model.named_modules():
+ if (
+ isinstance(module, cls)
+ and all(
+ [
+ x not in name
+ for x in [
+ "visual_model",
+ "vision_tower",
+ "mm_projector",
+ "text_hidden_fcs",
+ ]
+ ]
+ )
+ and any([x in name for x in lora_target_modules])
+ ):
+ lora_module_names.add(name)
+ return sorted(list(lora_module_names))
+
+ lora_alpha = args.lora_alpha
+ lora_dropout = args.lora_dropout
+ lora_target_modules = find_linear_layers(
+ model, args.lora_target_modules.split(",")
+ )
+ lora_config = LoraConfig(
+ r=lora_r,
+ lora_alpha=lora_alpha,
+ target_modules=lora_target_modules,
+ lora_dropout=lora_dropout,
+ bias="none",
+ task_type="CAUSAL_LM",
+ )
+ model = get_peft_model(model, lora_config)
+ model.print_trainable_parameters()
+
+ model.resize_token_embeddings(len(tokenizer))
+
+ state_dict = torch.load(args.weight, map_location="cpu")
+ model.load_state_dict(state_dict, strict=True)
+
+ model = model.merge_and_unload()
+ state_dict = {}
+ for k, v in model.state_dict().items():
+ if "vision_tower" not in k:
+ state_dict[k] = v
+ model.save_pretrained(args.save_path, state_dict=state_dict)
+ tokenizer.save_pretrained(args.save_path)
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/model/AffordanceVLM.py b/model/AffordanceVLM.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ba686155450e169ce9648ba24e88dd7064f2523
--- /dev/null
+++ b/model/AffordanceVLM.py
@@ -0,0 +1,428 @@
+from typing import List
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from transformers import BitsAndBytesConfig, CLIPVisionModel
+
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_PATCH_TOKEN)
+
+from .llava.model.language_model.llava_llama import (LlavaLlamaForCausalLM,
+ LlavaLlamaModel)
+from .segment_anything import build_sam_vit_h
+
+
+def dice_loss(
+ inputs: torch.Tensor,
+ targets: torch.Tensor,
+ num_masks: float,
+ scale=1000, # 100000.0,
+ eps=1e-6,
+):
+ """
+ Compute the DICE loss, similar to generalized IOU for masks
+ Args:
+ inputs: A float tensor of arbitrary shape.
+ The predictions for each example.
+ targets: A float tensor with the same shape as inputs. Stores the binary
+ classification label for each element in inputs
+ (0 for the negative class and 1 for the positive class).
+ """
+ inputs = inputs.sigmoid()
+ inputs = inputs.flatten(1, 2)
+ targets = targets.flatten(1, 2)
+ numerator = 2 * (inputs / scale * targets).sum(-1)
+ denominator = (inputs / scale).sum(-1) + (targets / scale).sum(-1)
+ loss = 1 - (numerator + eps) / (denominator + eps)
+ loss = loss.sum() / (num_masks + 1e-8)
+ return loss
+
+
+def sigmoid_ce_loss(
+ inputs: torch.Tensor,
+ targets: torch.Tensor,
+ num_masks: float,
+):
+ """
+ Args:
+ inputs: A float tensor of arbitrary shape.
+ The predictions for each example.
+ targets: A float tensor with the same shape as inputs. Stores the binary
+ classification label for each element in inputs
+ (0 for the negative class and 1 for the positive class).
+ Returns:
+ Loss tensor
+ """
+ loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
+ loss = loss.flatten(1, 2).mean(1).sum() / (num_masks + 1e-8)
+ return loss
+
+
+class LisaMetaModel:
+ def __init__(
+ self,
+ config,
+ **kwargs,
+ ):
+ super(LisaMetaModel, self).__init__(config)
+
+ self.config = config
+ if not hasattr(self.config, "train_mask_decoder"):
+ self.config.train_mask_decoder = kwargs["train_mask_decoder"]
+ self.config.out_dim = kwargs["out_dim"]
+ self.vision_pretrained = kwargs.get("vision_pretrained", None)
+ else:
+ self.vision_pretrained = kwargs.get("vision_pretrained", None)
+ self.initialize_lisa_modules(self.config)
+
+ def initialize_lisa_modules(self, config):
+ # SAM
+ self.visual_model = build_sam_vit_h(self.vision_pretrained)
+ for param in self.visual_model.parameters():
+ param.requires_grad = False
+ if config.train_mask_decoder:
+ self.visual_model.mask_decoder.train()
+ for param in self.visual_model.mask_decoder.parameters():
+ param.requires_grad = True
+
+ # Projection layer
+ in_dim = config.hidden_size
+ out_dim = config.out_dim
+ text_fc = [
+ nn.Linear(in_dim, in_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(in_dim, out_dim),
+ nn.Dropout(0.0),
+ ]
+ self.text_hidden_fcs = nn.ModuleList([nn.Sequential(*text_fc)])
+ self.text_hidden_fcs.train()
+ for param in self.text_hidden_fcs.parameters():
+ param.requires_grad = True
+
+
+class LisaModel(LisaMetaModel, LlavaLlamaModel):
+ def __init__(
+ self,
+ config,
+ **kwargs,
+ ):
+ super(LisaModel, self).__init__(config, **kwargs)
+
+ self.config.use_cache = False
+ self.config.vision_tower = self.config.mm_vision_tower
+ self.config.mm_vision_select_feature = "patch"
+ self.config.image_aspect_ratio = "square"
+ self.config.image_grid_pinpoints = None
+ self.config.tune_mm_mlp_adapter = False
+ self.config.freeze_mm_mlp_adapter = True
+ self.config.pretrain_mm_mlp_adapter = None
+ self.config.mm_use_im_patch_token = False
+
+
+class AffordanceVLMForCausalLM(LlavaLlamaForCausalLM):
+ def __init__(
+ self,
+ config,
+ **kwargs,
+ ):
+ if not hasattr(config, "train_mask_decoder"):
+ config.mm_use_im_start_end = kwargs.pop("use_mm_start_end", True)
+ config.mm_vision_tower = kwargs.get(
+ "vision_tower", "openai/clip-vit-large-patch14"
+ )
+ self.ce_loss_weight = kwargs.pop("ce_loss_weight", None)
+ self.dice_loss_weight = kwargs.pop("dice_loss_weight", None)
+ self.bce_loss_weight = kwargs.pop("bce_loss_weight", None)
+ else:
+ config.mm_vision_tower = config.vision_tower
+
+ self.seg_token_idx = kwargs.pop("seg_token_idx")
+ self.aff_token_idx = kwargs.pop("aff_token_idx")
+
+ super().__init__(config)
+
+ self.model = LisaModel(config, **kwargs)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_visual_embs(self, pixel_values: torch.FloatTensor):
+ with torch.no_grad():
+ image_embeddings_list = []
+ for i in range(pixel_values.shape[0]):
+ torch.cuda.empty_cache()
+ image_embeddings = self.model.visual_model.image_encoder(
+ pixel_values[i].unsqueeze(0)
+ )
+ image_embeddings_list.append(image_embeddings)
+ torch.cuda.empty_cache()
+ image_embeddings = torch.cat(image_embeddings_list, 0)
+ return image_embeddings
+
+ def forward(self, **kwargs):
+ if "past_key_values" in kwargs:
+ return super().forward(**kwargs)
+ return self.model_forward(**kwargs)
+
+ def model_forward(
+ self,
+ images: torch.FloatTensor,
+ images_clip: torch.FloatTensor,
+ input_ids: torch.LongTensor,
+ labels: torch.LongTensor,
+ attention_masks: torch.LongTensor,
+ offset: torch.LongTensor,
+ masks_list: List[torch.FloatTensor],
+ label_list: List[torch.Tensor],
+ resize_list: List[tuple],
+ inference: bool = False,
+ **kwargs,
+ ):
+ image_embeddings = self.get_visual_embs(images)
+ batch_size = image_embeddings.shape[0]
+ assert batch_size == len(offset) - 1
+
+ seg_token_mask = (input_ids[:, 1:] == self.seg_token_idx) + (input_ids[:, 1:] == self.aff_token_idx)
+ seg_token_mask = torch.cat(
+ [
+ seg_token_mask,
+ torch.zeros((seg_token_mask.shape[0], 1)).bool().cuda(),
+ ],
+ dim=1,
+ )
+ # hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)
+ seg_token_mask = torch.cat(
+ [torch.zeros((seg_token_mask.shape[0], 255)).bool().cuda(), seg_token_mask],
+ dim=1,
+ )
+
+ if inference:
+ n_batch = 1
+ length = input_ids.shape[0]
+ assert images_clip.shape[0] == 1
+ images_clip_extend = images_clip.expand(length, -1, -1, -1).contiguous()
+
+ output_hidden_states = []
+ for i in range(n_batch):
+ start_i, end_i = i * length, min((i + 1) * length, input_ids.shape[0])
+ output_i = super().forward(
+ images=images_clip_extend[: end_i - start_i],
+ attention_mask=attention_masks[start_i:end_i],
+ input_ids=input_ids[start_i:end_i],
+ output_hidden_states=True,
+ )
+ output_hidden_states.append(output_i.hidden_states)
+ torch.cuda.empty_cache()
+
+ output_hidden_states_list = []
+ output_hidden_states_level = torch.cat(output_hidden_states, dim=0)
+ output_hidden_states_list.append(output_hidden_states_level)
+ output_hidden_states = output_hidden_states_list
+ output = None
+
+ else:
+ images_clip_list = []
+ for i in range(len(offset) - 1):
+ start_i, end_i = offset[i], offset[i + 1]
+ images_clip_i = (
+ images_clip[i]
+ .unsqueeze(0)
+ .expand(end_i - start_i, -1, -1, -1)
+ .contiguous()
+ )
+ images_clip_list.append(images_clip_i)
+ images_clip = torch.cat(images_clip_list, dim=0)
+
+ output = super().forward(
+ images=images_clip,
+ attention_mask=attention_masks,
+ input_ids=input_ids,
+ labels=labels,
+ output_hidden_states=True,
+ )
+ output_hidden_states = output.hidden_states
+
+ hidden_states = []
+
+ assert len(self.model.text_hidden_fcs) == 1
+ hidden_states.append(self.model.text_hidden_fcs[0](output_hidden_states[-1]))
+
+ last_hidden_state = torch.stack(hidden_states, dim=-1).sum(dim=-1)
+ pred_embeddings = last_hidden_state[seg_token_mask]
+ seg_token_counts = seg_token_mask.int().sum(-1) # [bs, ]
+
+ seg_token_offset = seg_token_counts.cumsum(-1)
+ seg_token_offset = torch.cat(
+ [torch.zeros(1).long().cuda(), seg_token_offset], dim=0
+ )
+
+ seg_token_offset = seg_token_offset[offset]
+
+ pred_embeddings_ = []
+ for i in range(len(seg_token_offset) - 1):
+ start_i, end_i = seg_token_offset[i], seg_token_offset[i + 1]
+ pred_embeddings_.append(pred_embeddings[start_i:end_i])
+ pred_embeddings = pred_embeddings_
+
+ multimask_output = False
+ pred_masks = []
+ for i in range(len(pred_embeddings)):
+ (
+ sparse_embeddings,
+ dense_embeddings,
+ ) = self.model.visual_model.prompt_encoder(
+ points=None,
+ boxes=None,
+ masks=None,
+ text_embeds=pred_embeddings[i].unsqueeze(1),
+ )
+ sparse_embeddings = sparse_embeddings.to(pred_embeddings[i].dtype)
+ low_res_masks, iou_predictions = self.model.visual_model.mask_decoder(
+ image_embeddings=image_embeddings[i].unsqueeze(0),
+ image_pe=self.model.visual_model.prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ )
+ pred_mask = self.model.visual_model.postprocess_masks(
+ low_res_masks,
+ input_size=resize_list[i],
+ original_size=label_list[i].shape,
+ )
+ pred_masks.append(pred_mask[:, 0])
+
+ model_output = output
+ gt_masks = masks_list
+
+ if inference:
+ return {
+ "pred_masks": pred_masks,
+ "gt_masks": gt_masks,
+ }
+
+ output = model_output.logits
+
+ ce_loss = model_output.loss
+ ce_loss = ce_loss * self.ce_loss_weight
+ mask_bce_loss = 0
+ mask_dice_loss = 0
+ num_masks = 0
+ for batch_idx in range(len(pred_masks)):
+ gt_mask = gt_masks[batch_idx]
+ pred_mask = pred_masks[batch_idx]
+
+ assert (
+ gt_mask.shape[0] == pred_mask.shape[0]
+ ), "gt_mask.shape: {}, pred_mask.shape: {}".format(
+ gt_mask.shape, pred_mask.shape
+ )
+ mask_bce_loss += (
+ sigmoid_ce_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])
+ * gt_mask.shape[0]
+ )
+ mask_dice_loss += (
+ dice_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])
+ * gt_mask.shape[0]
+ )
+ num_masks += gt_mask.shape[0]
+
+ mask_bce_loss = self.bce_loss_weight * mask_bce_loss / (num_masks + 1e-8)
+ mask_dice_loss = self.dice_loss_weight * mask_dice_loss / (num_masks + 1e-8)
+ mask_loss = mask_bce_loss + mask_dice_loss
+
+ loss = ce_loss + mask_loss
+
+ return {
+ "loss": loss,
+ "ce_loss": ce_loss,
+ "mask_bce_loss": mask_bce_loss,
+ "mask_dice_loss": mask_dice_loss,
+ "mask_loss": mask_loss,
+ }
+
+ def evaluate(
+ self,
+ images_clip,
+ images,
+ input_ids,
+ resize_list,
+ original_size_list,
+ max_new_tokens=32,
+ tokenizer=None,
+ ):
+ with torch.no_grad():
+ outputs = self.generate(
+ images=images_clip,
+ input_ids=input_ids,
+ max_new_tokens=max_new_tokens,
+ num_beams=1,
+ output_hidden_states=True,
+ return_dict_in_generate=True,
+ )
+ output_hidden_states = outputs.hidden_states[-1]
+ output_ids = outputs.sequences
+
+ seg_token_mask = (output_ids[:, 1:] == self.seg_token_idx) + (output_ids[:, 1:] == self.aff_token_idx)
+ # hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)
+ seg_token_mask = torch.cat(
+ [
+ torch.zeros((seg_token_mask.shape[0], 255)).bool().cuda(),
+ seg_token_mask,
+ ],
+ dim=1,
+ )
+
+ hidden_states = []
+
+ assert len(self.model.text_hidden_fcs) == 1
+ hidden_states.append(self.model.text_hidden_fcs[0](output_hidden_states))
+
+ last_hidden_state = torch.stack(hidden_states, dim=-1).sum(dim=-1)
+ pred_embeddings = last_hidden_state[seg_token_mask]
+
+ seg_token_counts = seg_token_mask.int().sum(-1) # [bs, ]
+ seg_token_offset = seg_token_counts.cumsum(-1)
+ seg_token_offset = torch.cat(
+ [torch.zeros(1).long().cuda(), seg_token_offset], dim=0
+ )
+
+ pred_embeddings_ = []
+ for i in range(len(seg_token_offset) - 1):
+ start_i, end_i = seg_token_offset[i], seg_token_offset[i + 1]
+ pred_embeddings_.append(pred_embeddings[start_i:end_i])
+ pred_embeddings = pred_embeddings_
+
+ image_embeddings = self.get_visual_embs(images)
+
+ multimask_output = False
+ pred_masks = []
+ for i in range(len(pred_embeddings)):
+ (
+ sparse_embeddings,
+ dense_embeddings,
+ ) = self.model.visual_model.prompt_encoder(
+ points=None,
+ boxes=None,
+ masks=None,
+ text_embeds=pred_embeddings[i].unsqueeze(1),
+ )
+
+ sparse_embeddings = sparse_embeddings.to(pred_embeddings[i].dtype)
+ low_res_masks, iou_predictions = self.model.visual_model.mask_decoder(
+ image_embeddings=image_embeddings[i].unsqueeze(0),
+ image_pe=self.model.visual_model.prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ )
+ pred_mask = self.model.visual_model.postprocess_masks(
+ low_res_masks,
+ input_size=resize_list[i],
+ original_size=original_size_list[i],
+ )
+ pred_masks.append(pred_mask[:, 0])
+
+ return output_ids, pred_masks
diff --git a/model/__pycache__/AffordanceVLM.cpython-39.pyc b/model/__pycache__/AffordanceVLM.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c85e7d4d15747457bce9dd0681cd508bc2cfbe00
Binary files /dev/null and b/model/__pycache__/AffordanceVLM.cpython-39.pyc differ
diff --git a/model/llava/__init__.py b/model/llava/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d1f016db1028101d45ba7d68cb3f0bcb558c2bb
--- /dev/null
+++ b/model/llava/__init__.py
@@ -0,0 +1 @@
+from .model import LlavaLlamaForCausalLM
diff --git a/model/llava/__pycache__/__init__.cpython-39.pyc b/model/llava/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3fda626cfe8962832d2904861d5da243a695824e
Binary files /dev/null and b/model/llava/__pycache__/__init__.cpython-39.pyc differ
diff --git a/model/llava/__pycache__/constants.cpython-39.pyc b/model/llava/__pycache__/constants.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20642ce835a061a8e67ccea490e0604730fb4414
Binary files /dev/null and b/model/llava/__pycache__/constants.cpython-39.pyc differ
diff --git a/model/llava/__pycache__/conversation.cpython-39.pyc b/model/llava/__pycache__/conversation.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e09f6d16c15b82b6eadce43afe22074183e8c5db
Binary files /dev/null and b/model/llava/__pycache__/conversation.cpython-39.pyc differ
diff --git a/model/llava/__pycache__/mm_utils.cpython-39.pyc b/model/llava/__pycache__/mm_utils.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e7b2b9087f6484353712c398ef2a2c595116bfb
Binary files /dev/null and b/model/llava/__pycache__/mm_utils.cpython-39.pyc differ
diff --git a/model/llava/constants.py b/model/llava/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..be8cf0204969a6c973f442b383d8e425d684e826
--- /dev/null
+++ b/model/llava/constants.py
@@ -0,0 +1,12 @@
+CONTROLLER_HEART_BEAT_EXPIRATION = 30
+WORKER_HEART_BEAT_INTERVAL = 15
+
+LOGDIR = "."
+
+# Model Constants
+IGNORE_INDEX = -100
+IMAGE_TOKEN_INDEX = -200
+DEFAULT_IMAGE_TOKEN = ""
+DEFAULT_IMAGE_PATCH_TOKEN = ""
+DEFAULT_IM_START_TOKEN = ""
+DEFAULT_IM_END_TOKEN = ""
diff --git a/model/llava/conversation.py b/model/llava/conversation.py
new file mode 100644
index 0000000000000000000000000000000000000000..11fe82f0c9277d13050033cbd56caa5cf2c72606
--- /dev/null
+++ b/model/llava/conversation.py
@@ -0,0 +1,399 @@
+import dataclasses
+from enum import Enum, auto
+from typing import List, Tuple
+
+
+class SeparatorStyle(Enum):
+ """Different separator style."""
+
+ SINGLE = auto()
+ TWO = auto()
+ MPT = auto()
+ PLAIN = auto()
+ LLAMA_2 = auto()
+
+
+@dataclasses.dataclass
+class Conversation:
+ """A class that keeps all conversation history."""
+
+ system: str
+ roles: List[str]
+ messages: List[List[str]]
+ offset: int
+ sep_style: SeparatorStyle = SeparatorStyle.SINGLE
+ sep: str = "###"
+ sep2: str = None
+ version: str = "Unknown"
+
+ skip_next: bool = False
+
+ def get_prompt(self):
+ messages = self.messages
+ if len(messages) > 0 and type(messages[0][1]) is tuple:
+ messages = self.messages.copy()
+ init_role, init_msg = messages[0].copy()
+ init_msg = init_msg[0].replace("", "").strip()
+ if "mmtag" in self.version:
+ messages[0] = (init_role, init_msg)
+ messages.insert(0, (self.roles[0], ""))
+ messages.insert(1, (self.roles[1], "Received."))
+ else:
+ messages[0] = (init_role, "\n" + init_msg)
+
+ if self.sep_style == SeparatorStyle.SINGLE:
+ ret = self.system + self.sep
+ for role, message in messages:
+ if message:
+ if type(message) is tuple:
+ message, _, _ = message
+ ret += role + ": " + message + self.sep
+ else:
+ ret += role + ":"
+ elif self.sep_style == SeparatorStyle.TWO:
+ seps = [self.sep, self.sep2]
+ ret = self.system + seps[0]
+ for i, (role, message) in enumerate(messages):
+ if message:
+ if type(message) is tuple:
+ message, _, _ = message
+ ret += role + ": " + message + seps[i % 2]
+ else:
+ ret += role + ":"
+ elif self.sep_style == SeparatorStyle.MPT:
+ ret = self.system + self.sep
+ for role, message in messages:
+ if message:
+ if type(message) is tuple:
+ message, _, _ = message
+ ret += role + message + self.sep
+ else:
+ ret += role
+ elif self.sep_style == SeparatorStyle.LLAMA_2:
+ wrap_sys = lambda msg: f"<>\n{msg}\n<>\n\n"
+ wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
+ ret = ""
+
+ for i, (role, message) in enumerate(messages):
+ if i == 0:
+ assert message, "first message should not be none"
+ assert role == self.roles[0], "first message should come from user"
+ if message:
+ if type(message) is tuple:
+ message, _, _ = message
+ if i == 0:
+ message = wrap_sys(self.system) + message
+ if i % 2 == 0:
+ message = wrap_inst(message)
+ ret += self.sep + message
+ else:
+ ret += " " + message + " " + self.sep2
+ else:
+ ret += ""
+ ret = ret.lstrip(self.sep)
+ elif self.sep_style == SeparatorStyle.PLAIN:
+ seps = [self.sep, self.sep2]
+ ret = self.system
+ for i, (role, message) in enumerate(messages):
+ if message:
+ if type(message) is tuple:
+ message, _, _ = message
+ ret += message + seps[i % 2]
+ else:
+ ret += ""
+ else:
+ raise ValueError(f"Invalid style: {self.sep_style}")
+
+ return ret
+
+ def append_message(self, role, message):
+ self.messages.append([role, message])
+
+ def get_images(self, return_pil=False):
+ images = []
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
+ if i % 2 == 0:
+ if type(msg) is tuple:
+ import base64
+ from io import BytesIO
+
+ from PIL import Image
+
+ msg, image, image_process_mode = msg
+ if image_process_mode == "Pad":
+
+ def expand2square(pil_img, background_color=(122, 116, 104)):
+ width, height = pil_img.size
+ if width == height:
+ return pil_img
+ elif width > height:
+ result = Image.new(
+ pil_img.mode, (width, width), background_color
+ )
+ result.paste(pil_img, (0, (width - height) // 2))
+ return result
+ else:
+ result = Image.new(
+ pil_img.mode, (height, height), background_color
+ )
+ result.paste(pil_img, ((height - width) // 2, 0))
+ return result
+
+ image = expand2square(image)
+ elif image_process_mode == "Crop":
+ pass
+ elif image_process_mode == "Resize":
+ image = image.resize((336, 336))
+ else:
+ raise ValueError(
+ f"Invalid image_process_mode: {image_process_mode}"
+ )
+ max_hw, min_hw = max(image.size), min(image.size)
+ aspect_ratio = max_hw / min_hw
+ max_len, min_len = 800, 400
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
+ longest_edge = int(shortest_edge * aspect_ratio)
+ W, H = image.size
+ if H > W:
+ H, W = longest_edge, shortest_edge
+ else:
+ H, W = shortest_edge, longest_edge
+ image = image.resize((W, H))
+ if return_pil:
+ images.append(image)
+ else:
+ buffered = BytesIO()
+ image.save(buffered, format="PNG")
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
+ images.append(img_b64_str)
+ return images
+
+ def to_gradio_chatbot(self):
+ ret = []
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
+ if i % 2 == 0:
+ if type(msg) is tuple:
+ import base64
+ from io import BytesIO
+
+ msg, image, image_process_mode = msg
+ max_hw, min_hw = max(image.size), min(image.size)
+ aspect_ratio = max_hw / min_hw
+ max_len, min_len = 800, 400
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
+ longest_edge = int(shortest_edge * aspect_ratio)
+ W, H = image.size
+ if H > W:
+ H, W = longest_edge, shortest_edge
+ else:
+ H, W = shortest_edge, longest_edge
+ image = image.resize((W, H))
+ buffered = BytesIO()
+ image.save(buffered, format="JPEG")
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
+ img_str = f'
'
+ ret.append([img_str, None])
+ msg = msg.replace("", "").strip()
+ if len(msg) > 0:
+ ret.append([msg, None])
+ else:
+ ret.append([msg, None])
+ else:
+ ret[-1][-1] = msg
+ return ret
+
+ def copy(self):
+ return Conversation(
+ system=self.system,
+ roles=self.roles,
+ messages=[[x, y] for x, y in self.messages],
+ offset=self.offset,
+ sep_style=self.sep_style,
+ sep=self.sep,
+ sep2=self.sep2,
+ version=self.version,
+ )
+
+ def dict(self):
+ if len(self.get_images()) > 0:
+ return {
+ "system": self.system,
+ "roles": self.roles,
+ "messages": [
+ [x, y[0] if type(y) is tuple else y] for x, y in self.messages
+ ],
+ "offset": self.offset,
+ "sep": self.sep,
+ "sep2": self.sep2,
+ }
+ return {
+ "system": self.system,
+ "roles": self.roles,
+ "messages": self.messages,
+ "offset": self.offset,
+ "sep": self.sep,
+ "sep2": self.sep2,
+ }
+
+
+conv_vicuna_v0 = Conversation(
+ system="A chat between a curious human and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
+ roles=("Human", "Assistant"),
+ messages=(
+ (
+ "Human",
+ "What are the key differences between renewable and non-renewable energy sources?",
+ ),
+ (
+ "Assistant",
+ "Renewable energy sources are those that can be replenished naturally in a relatively "
+ "short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
+ "Non-renewable energy sources, on the other hand, are finite and will eventually be "
+ "depleted, such as coal, oil, and natural gas. Here are some key differences between "
+ "renewable and non-renewable energy sources:\n"
+ "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
+ "energy sources are finite and will eventually run out.\n"
+ "2. Environmental impact: Renewable energy sources have a much lower environmental impact "
+ "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
+ "and other negative effects.\n"
+ "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
+ "have lower operational costs than non-renewable sources.\n"
+ "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
+ "locations than non-renewable sources.\n"
+ "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
+ "situations and needs, while non-renewable sources are more rigid and inflexible.\n"
+ "6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
+ "non-renewable sources are not, and their depletion can lead to economic and social instability.\n",
+ ),
+ ),
+ offset=2,
+ sep_style=SeparatorStyle.SINGLE,
+ sep="###",
+)
+
+conv_vicuna_v1 = Conversation(
+ system="A chat between a curious user and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
+ roles=("USER", "ASSISTANT"),
+ version="v1",
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.TWO,
+ sep=" ",
+ sep2="",
+)
+
+conv_llama_2 = Conversation(
+ system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
+
+If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
+ roles=("USER", "ASSISTANT"),
+ version="llama_v2",
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.LLAMA_2,
+ sep="",
+ sep2="",
+)
+
+conv_llava_llama_2 = Conversation(
+ system="You are a helpful language and vision assistant. "
+ "You are able to understand the visual content that the user provides, "
+ "and assist the user with a variety of tasks using natural language.",
+ roles=("USER", "ASSISTANT"),
+ version="llama_v2",
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.LLAMA_2,
+ sep="",
+ sep2="",
+)
+
+conv_mpt = Conversation(
+ system="""<|im_start|>system
+A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
+ version="mpt",
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.MPT,
+ sep="<|im_end|>",
+)
+
+conv_llava_plain = Conversation(
+ system="",
+ roles=("", ""),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.PLAIN,
+ sep="\n",
+)
+
+conv_llava_v0 = Conversation(
+ system="A chat between a curious human and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
+ roles=("Human", "Assistant"),
+ messages=(("Human", "Hi!"), ("Assistant", "Hi there! How can I help you today?")),
+ offset=2,
+ sep_style=SeparatorStyle.SINGLE,
+ sep="###",
+)
+
+conv_llava_v0_mmtag = Conversation(
+ system="A chat between a curious user and an artificial intelligence assistant. "
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
+ "The visual content will be provided with the following format: visual content.",
+ roles=("Human", "Assistant"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.SINGLE,
+ sep="###",
+ version="v0_mmtag",
+)
+
+conv_llava_v1 = Conversation(
+ system="A chat between a curious human and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
+ roles=("USER", "ASSISTANT"),
+ version="v1",
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.TWO,
+ sep=" ",
+ sep2="",
+)
+
+conv_llava_v1_mmtag = Conversation(
+ system="A chat between a curious user and an artificial intelligence assistant. "
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
+ "The visual content will be provided with the following format: visual content.",
+ roles=("USER", "ASSISTANT"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.TWO,
+ sep=" ",
+ sep2="",
+ version="v1_mmtag",
+)
+
+default_conversation = conv_vicuna_v0
+conv_templates = {
+ "default": conv_vicuna_v0,
+ "v0": conv_vicuna_v0,
+ "v1": conv_vicuna_v1,
+ "vicuna_v1": conv_vicuna_v1,
+ "llama_2": conv_llama_2,
+ "plain": conv_llava_plain,
+ "v0_plain": conv_llava_plain,
+ "llava_v0": conv_llava_v0,
+ "v0_mmtag": conv_llava_v0_mmtag,
+ "llava_v1": conv_llava_v1,
+ "v1_mmtag": conv_llava_v1_mmtag,
+ "llava_llama_2": conv_llava_llama_2,
+ "mpt": conv_mpt,
+}
+
+
+if __name__ == "__main__":
+ print(default_conversation.get_prompt())
diff --git a/model/llava/mm_utils.py b/model/llava/mm_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..92b1f5a6e2f7e41777a598ac7064a9b345c11e74
--- /dev/null
+++ b/model/llava/mm_utils.py
@@ -0,0 +1,88 @@
+import base64
+from io import BytesIO
+
+import torch
+from PIL import Image
+from transformers import StoppingCriteria
+
+from .constants import IMAGE_TOKEN_INDEX
+
+
+def load_image_from_base64(image):
+ return Image.open(BytesIO(base64.b64decode(image)))
+
+
+def process_images(images, image_processor, model_cfg):
+ return image_processor(images, return_tensors="pt")["pixel_values"]
+
+
+def tokenizer_image_token(
+ prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None
+):
+ prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split("")]
+
+ def insert_separator(X, sep):
+ return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]
+
+ input_ids = []
+ offset = 0
+ if (
+ len(prompt_chunks) > 0
+ and len(prompt_chunks[0]) > 0
+ and prompt_chunks[0][0] == tokenizer.bos_token_id
+ ):
+ offset = 1
+ input_ids.append(prompt_chunks[0][0])
+
+ for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
+ input_ids.extend(x[offset:])
+
+ if return_tensors is not None:
+ if return_tensors == "pt":
+ return torch.tensor(input_ids, dtype=torch.long)
+ raise ValueError(f"Unsupported tensor type: {return_tensors}")
+ return input_ids
+
+
+def get_model_name_from_path(model_path):
+ model_path = model_path.strip("/")
+ model_paths = model_path.split("/")
+ if model_paths[-1].startswith("checkpoint-"):
+ return model_paths[-2] + "_" + model_paths[-1]
+ else:
+ return model_paths[-1]
+
+
+class KeywordsStoppingCriteria(StoppingCriteria):
+ def __init__(self, keywords, tokenizer, input_ids):
+ self.keywords = keywords
+ self.keyword_ids = []
+ for keyword in keywords:
+ cur_keyword_ids = tokenizer(keyword).input_ids
+ if (
+ len(cur_keyword_ids) > 1
+ and cur_keyword_ids[0] == tokenizer.bos_token_id
+ ):
+ cur_keyword_ids = cur_keyword_ids[1:]
+ self.keyword_ids.append(torch.tensor(cur_keyword_ids))
+ self.tokenizer = tokenizer
+ self.start_len = input_ids.shape[1]
+
+ def __call__(
+ self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
+ ) -> bool:
+ assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO
+ offset = min(output_ids.shape[1] - self.start_len, 3)
+ self.keyword_ids = [
+ keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids
+ ]
+ for keyword_id in self.keyword_ids:
+ if output_ids[0, -keyword_id.shape[0] :] == keyword_id:
+ return True
+ outputs = self.tokenizer.batch_decode(
+ output_ids[:, -offset:], skip_special_tokens=True
+ )[0]
+ for keyword in self.keywords:
+ if keyword in outputs:
+ return True
+ return False
diff --git a/model/llava/model/__init__.py b/model/llava/model/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..59e87786afcd1ba81eb86f4e7755583327bec9a2
--- /dev/null
+++ b/model/llava/model/__init__.py
@@ -0,0 +1,2 @@
+from .language_model.llava_llama import LlavaConfig, LlavaLlamaForCausalLM
+from .language_model.llava_mpt import LlavaMPTConfig, LlavaMPTForCausalLM
diff --git a/model/llava/model/__pycache__/__init__.cpython-39.pyc b/model/llava/model/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7a5b4aa1c924b62f9e35f5204b92ce0fedb4b0a
Binary files /dev/null and b/model/llava/model/__pycache__/__init__.cpython-39.pyc differ
diff --git a/model/llava/model/__pycache__/llava_arch.cpython-39.pyc b/model/llava/model/__pycache__/llava_arch.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44b5b3f994cd88b8b9a8a96912549f2e22d11dde
Binary files /dev/null and b/model/llava/model/__pycache__/llava_arch.cpython-39.pyc differ
diff --git a/model/llava/model/apply_delta.py b/model/llava/model/apply_delta.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f73809262b001a1f16ca3302cd75ab30893486a
--- /dev/null
+++ b/model/llava/model/apply_delta.py
@@ -0,0 +1,56 @@
+"""
+Usage:
+python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta
+"""
+import argparse
+
+import torch
+from llava import LlavaLlamaForCausalLM
+from tqdm import tqdm
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+
+def apply_delta(base_model_path, target_model_path, delta_path):
+ print("Loading base model")
+ base = AutoModelForCausalLM.from_pretrained(
+ base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+
+ print("Loading delta")
+ delta = LlavaLlamaForCausalLM.from_pretrained(
+ delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+ delta_tokenizer = AutoTokenizer.from_pretrained(delta_path)
+
+ print("Applying delta")
+ for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
+ if name not in base.state_dict():
+ assert name in [
+ "model.mm_projector.weight",
+ "model.mm_projector.bias",
+ ], f"{name} not in base model"
+ continue
+ if param.data.shape == base.state_dict()[name].shape:
+ param.data += base.state_dict()[name]
+ else:
+ assert name in [
+ "model.embed_tokens.weight",
+ "lm_head.weight",
+ ], f"{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}"
+ bparam = base.state_dict()[name]
+ param.data[: bparam.shape[0], : bparam.shape[1]] += bparam
+
+ print("Saving target model")
+ delta.save_pretrained(target_model_path)
+ delta_tokenizer.save_pretrained(target_model_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--base-model-path", type=str, required=True)
+ parser.add_argument("--target-model-path", type=str, required=True)
+ parser.add_argument("--delta-path", type=str, required=True)
+
+ args = parser.parse_args()
+
+ apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
diff --git a/model/llava/model/builder.py b/model/llava/model/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c841ab48b765184f05eac3326b36bfa1a7a4819
--- /dev/null
+++ b/model/llava/model/builder.py
@@ -0,0 +1,206 @@
+# Copyright 2023 Haotian Liu
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import shutil
+
+import torch
+from llava.constants import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_PATCH_TOKEN)
+from llava.model import *
+from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+
+def load_pretrained_model(
+ model_path,
+ model_base,
+ model_name,
+ load_8bit=False,
+ load_4bit=False,
+ device_map="auto",
+):
+ kwargs = {"device_map": device_map}
+
+ if load_8bit:
+ kwargs["load_in_8bit"] = True
+ elif load_4bit:
+ kwargs["load_in_4bit"] = True
+ kwargs["quantization_config"] = BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ )
+ else:
+ kwargs["torch_dtype"] = torch.float16
+
+ if "llava" in model_name.lower():
+ # Load LLaVA model
+ if "lora" in model_name.lower() and model_base is not None:
+ lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
+ print("Loading LLaVA from base model...")
+ model = LlavaLlamaForCausalLM.from_pretrained(
+ model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs
+ )
+ token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
+ if model.lm_head.weight.shape[0] != token_num:
+ model.lm_head.weight = torch.nn.Parameter(
+ torch.empty(
+ token_num, tokem_dim, device=model.device, dtype=model.dtype
+ )
+ )
+ model.model.embed_tokens.weight = torch.nn.Parameter(
+ torch.empty(
+ token_num, tokem_dim, device=model.device, dtype=model.dtype
+ )
+ )
+
+ print("Loading additional LLaVA weights...")
+ if os.path.exists(os.path.join(model_path, "non_lora_trainables.bin")):
+ non_lora_trainables = torch.load(
+ os.path.join(model_path, "non_lora_trainables.bin"),
+ map_location="cpu",
+ )
+ else:
+ # this is probably from HF Hub
+ from huggingface_hub import hf_hub_download
+
+ def load_from_hf(repo_id, filename, subfolder=None):
+ cache_file = hf_hub_download(
+ repo_id=repo_id, filename=filename, subfolder=subfolder
+ )
+ return torch.load(cache_file, map_location="cpu")
+
+ non_lora_trainables = load_from_hf(
+ model_path, "non_lora_trainables.bin"
+ )
+ non_lora_trainables = {
+ (k[11:] if k.startswith("base_model.") else k): v
+ for k, v in non_lora_trainables.items()
+ }
+ if any(k.startswith("model.model.") for k in non_lora_trainables):
+ non_lora_trainables = {
+ (k[6:] if k.startswith("model.") else k): v
+ for k, v in non_lora_trainables.items()
+ }
+ model.load_state_dict(non_lora_trainables, strict=False)
+
+ from peft import PeftModel
+
+ print("Loading LoRA weights...")
+ model = PeftModel.from_pretrained(model, model_path)
+ print("Merging LoRA weights...")
+ model = model.merge_and_unload()
+ print("Model is loaded...")
+ elif model_base is not None:
+ # this may be mm projector only
+ print("Loading LLaVA from base model...")
+ if "mpt" in model_name.lower():
+ if not os.path.isfile(os.path.join(model_path, "configuration_mpt.py")):
+ shutil.copyfile(
+ os.path.join(model_base, "configuration_mpt.py"),
+ os.path.join(model_path, "configuration_mpt.py"),
+ )
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
+ cfg_pretrained = AutoConfig.from_pretrained(
+ model_path, trust_remote_code=True
+ )
+ model = LlavaMPTForCausalLM.from_pretrained(
+ model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs
+ )
+ else:
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
+ cfg_pretrained = AutoConfig.from_pretrained(model_path)
+ model = LlavaLlamaForCausalLM.from_pretrained(
+ model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs
+ )
+
+ mm_projector_weights = torch.load(
+ os.path.join(model_path, "mm_projector.bin"), map_location="cpu"
+ )
+ mm_projector_weights = {
+ k: v.to(torch.float16) for k, v in mm_projector_weights.items()
+ }
+ model.load_state_dict(mm_projector_weights, strict=False)
+ else:
+ if "mpt" in model_name.lower():
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
+ model = LlavaMPTForCausalLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, **kwargs
+ )
+ else:
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
+ model = LlavaLlamaForCausalLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, **kwargs
+ )
+ else:
+ # Load language model
+ if model_base is not None:
+ # PEFT model
+ from peft import PeftModel
+
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_base,
+ torch_dtype=torch.float16,
+ low_cpu_mem_usage=True,
+ device_map="auto",
+ )
+ print(f"Loading LoRA weights from {model_path}")
+ model = PeftModel.from_pretrained(model, model_path)
+ print(f"Merging weights")
+ model = model.merge_and_unload()
+ print("Convert to FP16...")
+ model.to(torch.float16)
+ else:
+ use_fast = False
+ if "mpt" in model_name.lower():
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs
+ )
+ else:
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
+ model = AutoModelForCausalLM.from_pretrained(
+ model_path, low_cpu_mem_usage=True, **kwargs
+ )
+
+ image_processor = None
+
+ if "llava" in model_name.lower():
+ mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
+ mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
+ if mm_use_im_patch_token:
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
+ if mm_use_im_start_end:
+ tokenizer.add_tokens(
+ [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True
+ )
+ model.resize_token_embeddings(len(tokenizer))
+
+ vision_tower = model.get_vision_tower()
+ if not vision_tower.is_loaded:
+ vision_tower.load_model()
+ vision_tower.to(device="cuda", dtype=torch.float16)
+ image_processor = vision_tower.image_processor
+
+ if hasattr(model.config, "max_sequence_length"):
+ context_len = model.config.max_sequence_length
+ else:
+ context_len = 2048
+
+ return tokenizer, model, image_processor, context_len
diff --git a/model/llava/model/consolidate.py b/model/llava/model/consolidate.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1fd9b722bc2afe5338eb632dee2d09cc27367ca
--- /dev/null
+++ b/model/llava/model/consolidate.py
@@ -0,0 +1,31 @@
+"""
+Usage:
+python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate
+"""
+import argparse
+
+import torch
+from llava.model import *
+from llava.model.utils import auto_upgrade
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+
+def consolidate_ckpt(src_path, dst_path):
+ print("Loading model")
+ auto_upgrade(src_path)
+ src_model = AutoModelForCausalLM.from_pretrained(
+ src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+ src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False)
+ src_model.save_pretrained(dst_path)
+ src_tokenizer.save_pretrained(dst_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--src", type=str, required=True)
+ parser.add_argument("--dst", type=str, required=True)
+
+ args = parser.parse_args()
+
+ consolidate_ckpt(args.src, args.dst)
diff --git a/model/llava/model/language_model/__pycache__/llava_llama.cpython-39.pyc b/model/llava/model/language_model/__pycache__/llava_llama.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d60dd04d20c5a45fed09d05eaa1df8c4554c691
Binary files /dev/null and b/model/llava/model/language_model/__pycache__/llava_llama.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/__pycache__/llava_mpt.cpython-39.pyc b/model/llava/model/language_model/__pycache__/llava_mpt.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9eb33e4d3cd54fd76f93658496dd0789ca47e00
Binary files /dev/null and b/model/llava/model/language_model/__pycache__/llava_mpt.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/llava_llama.py b/model/llava/model/language_model/llava_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..460c001998a41ce10a901acde4c3c862b7cfc57c
--- /dev/null
+++ b/model/llava/model/language_model/llava_llama.py
@@ -0,0 +1,167 @@
+# Copyright 2023 Haotian Liu
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+from torch.nn import CrossEntropyLoss
+from transformers import (AutoConfig, AutoModelForCausalLM, LlamaConfig,
+ LlamaForCausalLM, LlamaModel)
+from transformers.modeling_outputs import CausalLMOutputWithPast
+
+from ..llava_arch import LlavaMetaForCausalLM, LlavaMetaModel
+
+
+class LlavaConfig(LlamaConfig):
+ model_type = "llava"
+
+
+class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
+ config_class = LlavaConfig
+
+ def __init__(self, config: LlamaConfig):
+ super(LlavaLlamaModel, self).__init__(config)
+
+
+class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):
+ config_class = LlavaConfig
+
+ def __init__(self, config):
+ super(LlamaForCausalLM, self).__init__(config)
+
+ self.model = LlavaLlamaModel(config)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_model(self):
+ return self.model
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ images: Optional[torch.FloatTensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ output_attentions = (
+ output_attentions
+ if output_attentions is not None
+ else self.config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states
+ if output_hidden_states is not None
+ else self.config.output_hidden_states
+ )
+ return_dict = (
+ return_dict if return_dict is not None else self.config.use_return_dict
+ )
+
+ (
+ input_ids,
+ attention_mask,
+ past_key_values,
+ inputs_embeds,
+ labels,
+ ) = self.prepare_inputs_labels_for_multimodal(
+ input_ids, attention_mask, past_key_values, labels, images
+ )
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model/pipeline parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ if self.training:
+ output_hidden_states = outputs.hidden_states
+ else:
+ output_hidden_states = hidden_states
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=output_hidden_states, # outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ images=None,
+ **kwargs
+ ):
+ if past_key_values:
+ input_ids = input_ids[:, -1:]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ "images": images,
+ }
+ )
+ return model_inputs
+
+
+AutoConfig.register("llava", LlavaConfig)
+AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM)
diff --git a/model/llava/model/language_model/llava_mpt.py b/model/llava/model/language_model/llava_mpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..1549fb501b319d6382f2d4e3d9c82f83307397ba
--- /dev/null
+++ b/model/llava/model/language_model/llava_mpt.py
@@ -0,0 +1,174 @@
+# Copyright 2023 Haotian Liu
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import math
+import warnings
+from typing import List, Optional, Tuple
+
+import torch
+import torch.nn.functional as F
+from transformers import AutoConfig, AutoModelForCausalLM
+from transformers.modeling_outputs import CausalLMOutputWithPast
+
+from ..llava_arch import LlavaMetaForCausalLM, LlavaMetaModel
+from .mpt.modeling_mpt import MPTConfig, MPTForCausalLM, MPTModel
+
+
+class LlavaMPTConfig(MPTConfig):
+ model_type = "llava_mpt"
+
+
+class LlavaMPTModel(LlavaMetaModel, MPTModel):
+ config_class = LlavaMPTConfig
+
+ def __init__(self, config: MPTConfig):
+ config.hidden_size = config.d_model
+ super(LlavaMPTModel, self).__init__(config)
+
+ def embed_tokens(self, x):
+ return self.wte(x)
+
+
+class LlavaMPTForCausalLM(MPTForCausalLM, LlavaMetaForCausalLM):
+ config_class = LlavaMPTConfig
+ supports_gradient_checkpointing = True
+
+ def __init__(self, config):
+ super(MPTForCausalLM, self).__init__(config)
+
+ if not config.tie_word_embeddings:
+ raise ValueError("MPTForCausalLM only supports tied word embeddings")
+ self.transformer = LlavaMPTModel(config)
+ self.logit_scale = None
+ if config.logit_scale is not None:
+ logit_scale = config.logit_scale
+ if isinstance(logit_scale, str):
+ if logit_scale == "inv_sqrt_d_model":
+ logit_scale = 1 / math.sqrt(config.d_model)
+ else:
+ raise ValueError(
+ f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'."
+ )
+ self.logit_scale = logit_scale
+
+ def get_model(self):
+ return self.transformer
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, LlavaMPTModel):
+ module.gradient_checkpointing = value
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
+ attention_mask: Optional[torch.ByteTensor] = None,
+ prefix_mask: Optional[torch.ByteTensor] = None,
+ sequence_id: Optional[torch.LongTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ return_dict: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ images=None,
+ ):
+ return_dict = (
+ return_dict if return_dict is not None else self.config.return_dict
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ (
+ input_ids,
+ attention_mask,
+ past_key_values,
+ inputs_embeds,
+ labels,
+ ) = self.prepare_inputs_labels_for_multimodal(
+ input_ids, attention_mask, past_key_values, labels, images
+ )
+ outputs = self.transformer(
+ input_ids=input_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ prefix_mask=prefix_mask,
+ sequence_id=sequence_id,
+ return_dict=return_dict,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ )
+ # FIXME: this is a hack to fix the multiple gpu inference issue in https://github.com/haotian-liu/LLaVA/issues/338
+ logits = F.linear(
+ outputs.last_hidden_state.to(self.transformer.wte.weight.device),
+ self.transformer.wte.weight,
+ )
+ if self.logit_scale is not None:
+ if self.logit_scale == 0:
+ warnings.warn(
+ f"Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs."
+ )
+ logits *= self.logit_scale
+ loss = None
+ if labels is not None:
+ labels = torch.roll(labels, shifts=-1)
+ labels[:, -1] = -100
+ loss = F.cross_entropy(
+ logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1)
+ )
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
+ ):
+ if inputs_embeds is not None:
+ raise NotImplementedError("inputs_embeds is not implemented for MPT yet")
+ attention_mask = kwargs["attention_mask"].bool()
+ if attention_mask[:, -1].sum() != attention_mask.shape[0]:
+ raise NotImplementedError(
+ "MPT does not support generation with right padding."
+ )
+ if self.transformer.attn_uses_sequence_id and self.training:
+ sequence_id = torch.zeros_like(input_ids[:1])
+ else:
+ sequence_id = None
+ if past_key_values is not None:
+ input_ids = input_ids[:, -1].unsqueeze(-1)
+ if self.transformer.prefix_lm:
+ prefix_mask = torch.ones_like(attention_mask)
+ if kwargs.get("use_cache") == False:
+ raise NotImplementedError(
+ "MPT with prefix_lm=True does not support use_cache=False."
+ )
+ else:
+ prefix_mask = None
+ return {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "prefix_mask": prefix_mask,
+ "sequence_id": sequence_id,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache", True),
+ "images": kwargs.get("images", None),
+ }
+
+
+AutoConfig.register("llava_mpt", LlavaMPTConfig)
+AutoModelForCausalLM.register(LlavaMPTConfig, LlavaMPTForCausalLM)
diff --git a/model/llava/model/language_model/mpt/__pycache__/adapt_tokenizer.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/adapt_tokenizer.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fbf31e156b02bf48515da35e8329f67f09dde2be
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/adapt_tokenizer.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/attention.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/attention.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66bee5f87333bb4a62f42afcd186f006bd74c30b
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/attention.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/blocks.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/blocks.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a69bd13f9b6d4617d90605930d63af22262303f
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/blocks.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/configuration_mpt.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/configuration_mpt.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..63d9b5719f4939725f3420d3c83a38f843bced07
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/configuration_mpt.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/custom_embedding.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/custom_embedding.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4af5923564bf747ddcc8497683bcfaf1716cd7cb
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/custom_embedding.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/flash_attn_triton.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/flash_attn_triton.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b46e6570e5035e96654e0af3df276c6f9db7c2c
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/flash_attn_triton.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/hf_prefixlm_converter.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/hf_prefixlm_converter.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a60379567a4813cee27c260d4213ed4b4646d946
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/hf_prefixlm_converter.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/meta_init_context.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/meta_init_context.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4677b84689522abfd6a941fd33991bae04623261
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/meta_init_context.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/modeling_mpt.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/modeling_mpt.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23b79ffb8711d464521152acd2300074ae1035b7
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/modeling_mpt.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/norm.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/norm.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef4e4fe3b3b5e329b3e0f7dfc5a6117c66508c19
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/norm.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/__pycache__/param_init_fns.cpython-39.pyc b/model/llava/model/language_model/mpt/__pycache__/param_init_fns.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e2c289fa0838e3e91f39a739ffb63aa23c7d5c35
Binary files /dev/null and b/model/llava/model/language_model/mpt/__pycache__/param_init_fns.cpython-39.pyc differ
diff --git a/model/llava/model/language_model/mpt/adapt_tokenizer.py b/model/llava/model/language_model/mpt/adapt_tokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6c2acaca8bd5bab095bad9f45208f7961297057
--- /dev/null
+++ b/model/llava/model/language_model/mpt/adapt_tokenizer.py
@@ -0,0 +1,46 @@
+from typing import Union
+
+from transformers import (AutoTokenizer, PreTrainedTokenizer,
+ PreTrainedTokenizerFast)
+
+Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
+NUM_SENTINEL_TOKENS: int = 100
+
+
+def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
+ """Adds sentinel tokens and padding token (if missing).
+
+ Expands the tokenizer vocabulary to include sentinel tokens
+ used in mixture-of-denoiser tasks as well as a padding token.
+
+ All added tokens are added as special tokens. No tokens are
+ added if sentinel tokens and padding token already exist.
+ """
+ sentinels_to_add = [f"" for i in range(NUM_SENTINEL_TOKENS)]
+ tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
+ if tokenizer.pad_token is None:
+ tokenizer.add_tokens("", special_tokens=True)
+ tokenizer.pad_token = ""
+ assert tokenizer.pad_token_id is not None
+ sentinels = "".join([f"" for i in range(NUM_SENTINEL_TOKENS)])
+ _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
+ tokenizer.sentinel_token_ids = _sentinel_token_ids
+
+
+class AutoTokenizerForMOD(AutoTokenizer):
+ """AutoTokenizer + Adaptation for MOD.
+
+ A simple wrapper around AutoTokenizer to make instantiating
+ an MOD-adapted tokenizer a bit easier.
+
+ MOD-adapted tokenizers have sentinel tokens (e.g., ),
+ a padding token, and a property to get the token ids of the
+ sentinel tokens.
+ """
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ """See `AutoTokenizer.from_pretrained` docstring."""
+ tokenizer = super().from_pretrained(*args, **kwargs)
+ adapt_tokenizer_for_denoising(tokenizer)
+ return tokenizer
diff --git a/model/llava/model/language_model/mpt/attention.py b/model/llava/model/language_model/mpt/attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..24fcd8fb1d1fd65c8342f85a2329bbdf791fed0f
--- /dev/null
+++ b/model/llava/model/language_model/mpt/attention.py
@@ -0,0 +1,526 @@
+"""Attention layers."""
+import math
+import warnings
+from typing import Optional
+
+import torch
+import torch.nn as nn
+from einops import rearrange
+from packaging import version
+from torch import nn
+
+from .norm import LPLayerNorm
+
+
+def _reset_is_causal(
+ num_query_tokens: int, num_key_tokens: int, original_is_causal: bool
+):
+ if original_is_causal and num_query_tokens != num_key_tokens:
+ if num_query_tokens != 1:
+ raise NotImplementedError(
+ "MPT does not support query and key with different number of tokens, unless number of query tokens is 1."
+ )
+ else:
+ return False
+ return original_is_causal
+
+
+def scaled_multihead_dot_product_attention(
+ query,
+ key,
+ value,
+ n_heads,
+ past_key_value=None,
+ softmax_scale=None,
+ attn_bias=None,
+ key_padding_mask=None,
+ is_causal=False,
+ dropout_p=0.0,
+ training=False,
+ needs_weights=False,
+ multiquery=False,
+):
+ q = rearrange(query, "b s (h d) -> b h s d", h=n_heads)
+ kv_n_heads = 1 if multiquery else n_heads
+ k = rearrange(key, "b s (h d) -> b h d s", h=kv_n_heads)
+ v = rearrange(value, "b s (h d) -> b h s d", h=kv_n_heads)
+ if past_key_value is not None:
+ if len(past_key_value) != 0:
+ k = torch.cat([past_key_value[0], k], dim=3)
+ v = torch.cat([past_key_value[1], v], dim=2)
+ past_key_value = (k, v)
+ (b, _, s_q, d) = q.shape
+ s_k = k.size(-1)
+ if softmax_scale is None:
+ softmax_scale = 1 / math.sqrt(d)
+ attn_weight = q.matmul(k) * softmax_scale
+ if attn_bias is not None:
+ _s_q = max(0, attn_bias.size(2) - s_q)
+ _s_k = max(0, attn_bias.size(3) - s_k)
+ attn_bias = attn_bias[:, :, _s_q:, _s_k:]
+ if (
+ attn_bias.size(-1) != 1
+ and attn_bias.size(-1) != s_k
+ or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q)
+ ):
+ raise RuntimeError(
+ f"attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}."
+ )
+ attn_weight = attn_weight + attn_bias
+ min_val = torch.finfo(q.dtype).min
+ if key_padding_mask is not None:
+ if attn_bias is not None:
+ warnings.warn(
+ "Propogating key_padding_mask to the attention module "
+ + "and applying it within the attention module can cause "
+ + "unneccessary computation/memory usage. Consider integrating "
+ + "into attn_bias once and passing that to each attention "
+ + "module instead."
+ )
+ attn_weight = attn_weight.masked_fill(
+ ~key_padding_mask.view((b, 1, 1, s_k)), min_val
+ )
+ if is_causal and (not q.size(2) == 1):
+ s = max(s_q, s_k)
+ causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
+ causal_mask = causal_mask.tril()
+ causal_mask = causal_mask.to(torch.bool)
+ causal_mask = ~causal_mask
+ causal_mask = causal_mask[-s_q:, -s_k:]
+ attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val)
+ attn_weight = torch.softmax(attn_weight, dim=-1)
+ if dropout_p:
+ attn_weight = torch.nn.functional.dropout(
+ attn_weight, p=dropout_p, training=training, inplace=True
+ )
+ out = attn_weight.to(v.dtype).matmul(v)
+ out = rearrange(out, "b h s d -> b s (h d)")
+ if needs_weights:
+ return (out, attn_weight, past_key_value)
+ return (out, None, past_key_value)
+
+
+def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
+ for tensor in tensors:
+ if tensor.dtype not in valid_dtypes:
+ raise TypeError(
+ f"tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}."
+ )
+ if not tensor.is_cuda:
+ raise TypeError(
+ f"Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r})."
+ )
+
+
+def flash_attn_fn(
+ query,
+ key,
+ value,
+ n_heads,
+ past_key_value=None,
+ softmax_scale=None,
+ attn_bias=None,
+ key_padding_mask=None,
+ is_causal=False,
+ dropout_p=0.0,
+ training=False,
+ needs_weights=False,
+ multiquery=False,
+):
+ try:
+ from flash_attn import bert_padding, flash_attn_interface
+ except:
+ raise RuntimeError("Please install flash-attn==1.0.3.post0")
+ check_valid_inputs(query, key, value)
+ if past_key_value is not None:
+ if len(past_key_value) != 0:
+ key = torch.cat([past_key_value[0], key], dim=1)
+ value = torch.cat([past_key_value[1], value], dim=1)
+ past_key_value = (key, value)
+ if attn_bias is not None:
+ _s_q = max(0, attn_bias.size(2) - query.size(1))
+ _s_k = max(0, attn_bias.size(3) - key.size(1))
+ attn_bias = attn_bias[:, :, _s_q:, _s_k:]
+ if attn_bias is not None:
+ raise NotImplementedError(f"attn_bias not implemented for flash attn.")
+ (batch_size, seqlen) = query.shape[:2]
+ if key_padding_mask is None:
+ key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
+ query_padding_mask = key_padding_mask[:, -query.size(1) :]
+ (query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(
+ query, query_padding_mask
+ )
+ query_unpad = rearrange(query_unpad, "nnz (h d) -> nnz h d", h=n_heads)
+ (key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(
+ key, key_padding_mask
+ )
+ key_unpad = rearrange(
+ key_unpad, "nnz (h d) -> nnz h d", h=1 if multiquery else n_heads
+ )
+ (value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask)
+ value_unpad = rearrange(
+ value_unpad, "nnz (h d) -> nnz h d", h=1 if multiquery else n_heads
+ )
+ if multiquery:
+ key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
+ value_unpad = value_unpad.expand(
+ value_unpad.size(0), n_heads, value_unpad.size(-1)
+ )
+ dropout_p = dropout_p if training else 0.0
+ reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
+ output_unpad = flash_attn_interface.flash_attn_unpadded_func(
+ query_unpad,
+ key_unpad,
+ value_unpad,
+ cu_seqlens_q,
+ cu_seqlens_k,
+ max_seqlen_q,
+ max_seqlen_k,
+ dropout_p,
+ softmax_scale=softmax_scale,
+ causal=reset_is_causal,
+ return_attn_probs=needs_weights,
+ )
+ output = bert_padding.pad_input(
+ rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices_q, batch_size, seqlen
+ )
+ return (output, None, past_key_value)
+
+
+def triton_flash_attn_fn(
+ query,
+ key,
+ value,
+ n_heads,
+ past_key_value=None,
+ softmax_scale=None,
+ attn_bias=None,
+ key_padding_mask=None,
+ is_causal=False,
+ dropout_p=0.0,
+ training=False,
+ needs_weights=False,
+ multiquery=False,
+):
+ try:
+ from .flash_attn_triton import flash_attn_func
+ except:
+ _installed = False
+ if version.parse(torch.__version__) < version.parse("2.0.0"):
+ _installed = True
+ try:
+ from flash_attn.flash_attn_triton import flash_attn_func
+ except:
+ _installed = False
+ if not _installed:
+ raise RuntimeError(
+ "Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]` if installing from llm-foundry source or `pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). Note: (1) requires you have CMake and PyTorch already installed."
+ )
+ check_valid_inputs(query, key, value)
+ if past_key_value is not None:
+ if len(past_key_value) != 0:
+ key = torch.cat([past_key_value[0], key], dim=1)
+ value = torch.cat([past_key_value[1], value], dim=1)
+ past_key_value = (key, value)
+ if attn_bias is not None:
+ _s_q = max(0, attn_bias.size(2) - query.size(1))
+ _s_k = max(0, attn_bias.size(3) - key.size(1))
+ attn_bias = attn_bias[:, :, _s_q:, _s_k:]
+ if dropout_p:
+ raise NotImplementedError(f"Dropout not implemented for attn_impl: triton.")
+ if needs_weights:
+ raise NotImplementedError(f"attn_impl: triton cannot return attn weights.")
+ if key_padding_mask is not None:
+ warnings.warn(
+ "Propagating key_padding_mask to the attention module "
+ + "and applying it within the attention module can cause "
+ + "unnecessary computation/memory usage. Consider integrating "
+ + "into attn_bias once and passing that to each attention "
+ + "module instead."
+ )
+ (b_size, s_k) = key_padding_mask.shape[:2]
+ if attn_bias is None:
+ attn_bias = query.new_zeros(b_size, 1, 1, s_k)
+ attn_bias = attn_bias.masked_fill(
+ ~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min
+ )
+ query = rearrange(query, "b s (h d) -> b s h d", h=n_heads)
+ key = rearrange(key, "b s (h d) -> b s h d", h=1 if multiquery else n_heads)
+ value = rearrange(value, "b s (h d) -> b s h d", h=1 if multiquery else n_heads)
+ if multiquery:
+ key = key.expand(*key.shape[:2], n_heads, key.size(-1))
+ value = value.expand(*value.shape[:2], n_heads, value.size(-1))
+ reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
+ attn_output = flash_attn_func(
+ query, key, value, attn_bias, reset_is_causal, softmax_scale
+ )
+ output = attn_output.view(*attn_output.shape[:2], -1)
+ return (output, None, past_key_value)
+
+
+class MultiheadAttention(nn.Module):
+ """Multi-head self attention.
+
+ Using torch or triton attention implemetation enables user to also use
+ additive bias.
+ """
+
+ def __init__(
+ self,
+ d_model: int,
+ n_heads: int,
+ attn_impl: str = "triton",
+ clip_qkv: Optional[float] = None,
+ qk_ln: bool = False,
+ softmax_scale: Optional[float] = None,
+ attn_pdrop: float = 0.0,
+ low_precision_layernorm: bool = False,
+ verbose: int = 0,
+ device: Optional[str] = None,
+ ):
+ super().__init__()
+ self.attn_impl = attn_impl
+ self.clip_qkv = clip_qkv
+ self.qk_ln = qk_ln
+ self.d_model = d_model
+ self.n_heads = n_heads
+ self.softmax_scale = softmax_scale
+ if self.softmax_scale is None:
+ self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
+ self.attn_dropout_p = attn_pdrop
+ self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device)
+ fuse_splits = (d_model, 2 * d_model)
+ self.Wqkv._fused = (0, fuse_splits)
+ if self.qk_ln:
+ layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
+ self.q_ln = layernorm_class(self.d_model, device=device)
+ self.k_ln = layernorm_class(self.d_model, device=device)
+ if self.attn_impl == "flash":
+ self.attn_fn = flash_attn_fn
+ elif self.attn_impl == "triton":
+ self.attn_fn = triton_flash_attn_fn
+ if verbose:
+ warnings.warn(
+ "While `attn_impl: triton` can be faster than `attn_impl: flash` "
+ + "it uses more memory. When training larger models this can trigger "
+ + "alloc retries which hurts performance. If encountered, we recommend "
+ + "using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`."
+ )
+ elif self.attn_impl == "torch":
+ self.attn_fn = scaled_multihead_dot_product_attention
+ if torch.cuda.is_available() and verbose:
+ warnings.warn(
+ "Using `attn_impl: torch`. If your model does not use `alibi` or "
+ + "`prefix_lm` we recommend using `attn_impl: flash` otherwise "
+ + "we recommend using `attn_impl: triton`."
+ )
+ else:
+ raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.")
+ self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
+ self.out_proj._is_residual = True
+
+ def forward(
+ self,
+ x,
+ past_key_value=None,
+ attn_bias=None,
+ attention_mask=None,
+ is_causal=True,
+ needs_weights=False,
+ ):
+ qkv = self.Wqkv(x)
+ if self.clip_qkv:
+ qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
+ (query, key, value) = qkv.chunk(3, dim=2)
+ key_padding_mask = attention_mask
+ if self.qk_ln:
+ dtype = query.dtype
+ query = self.q_ln(query).to(dtype)
+ key = self.k_ln(key).to(dtype)
+ (context, attn_weights, past_key_value) = self.attn_fn(
+ query,
+ key,
+ value,
+ self.n_heads,
+ past_key_value=past_key_value,
+ softmax_scale=self.softmax_scale,
+ attn_bias=attn_bias,
+ key_padding_mask=key_padding_mask,
+ is_causal=is_causal,
+ dropout_p=self.attn_dropout_p,
+ training=self.training,
+ needs_weights=needs_weights,
+ )
+ return (self.out_proj(context), attn_weights, past_key_value)
+
+
+class MultiQueryAttention(nn.Module):
+ """Multi-Query self attention.
+
+ Using torch or triton attention implemetation enables user to also use
+ additive bias.
+ """
+
+ def __init__(
+ self,
+ d_model: int,
+ n_heads: int,
+ attn_impl: str = "triton",
+ clip_qkv: Optional[float] = None,
+ qk_ln: bool = False,
+ softmax_scale: Optional[float] = None,
+ attn_pdrop: float = 0.0,
+ low_precision_layernorm: bool = False,
+ verbose: int = 0,
+ device: Optional[str] = None,
+ ):
+ super().__init__()
+ self.attn_impl = attn_impl
+ self.clip_qkv = clip_qkv
+ self.qk_ln = qk_ln
+ self.d_model = d_model
+ self.n_heads = n_heads
+ self.head_dim = d_model // n_heads
+ self.softmax_scale = softmax_scale
+ if self.softmax_scale is None:
+ self.softmax_scale = 1 / math.sqrt(self.head_dim)
+ self.attn_dropout_p = attn_pdrop
+ self.Wqkv = nn.Linear(d_model, d_model + 2 * self.head_dim, device=device)
+ fuse_splits = (d_model, d_model + self.head_dim)
+ self.Wqkv._fused = (0, fuse_splits)
+ if self.qk_ln:
+ layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
+ self.q_ln = layernorm_class(d_model, device=device)
+ self.k_ln = layernorm_class(self.head_dim, device=device)
+ if self.attn_impl == "flash":
+ self.attn_fn = flash_attn_fn
+ elif self.attn_impl == "triton":
+ self.attn_fn = triton_flash_attn_fn
+ if verbose:
+ warnings.warn(
+ "While `attn_impl: triton` can be faster than `attn_impl: flash` "
+ + "it uses more memory. When training larger models this can trigger "
+ + "alloc retries which hurts performance. If encountered, we recommend "
+ + "using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`."
+ )
+ elif self.attn_impl == "torch":
+ self.attn_fn = scaled_multihead_dot_product_attention
+ if torch.cuda.is_available() and verbose:
+ warnings.warn(
+ "Using `attn_impl: torch`. If your model does not use `alibi` or "
+ + "`prefix_lm` we recommend using `attn_impl: flash` otherwise "
+ + "we recommend using `attn_impl: triton`."
+ )
+ else:
+ raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.")
+ self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
+ self.out_proj._is_residual = True
+
+ def forward(
+ self,
+ x,
+ past_key_value=None,
+ attn_bias=None,
+ attention_mask=None,
+ is_causal=True,
+ needs_weights=False,
+ ):
+ qkv = self.Wqkv(x)
+ if self.clip_qkv:
+ qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
+ (query, key, value) = qkv.split(
+ [self.d_model, self.head_dim, self.head_dim], dim=2
+ )
+ key_padding_mask = attention_mask
+ if self.qk_ln:
+ dtype = query.dtype
+ query = self.q_ln(query).to(dtype)
+ key = self.k_ln(key).to(dtype)
+ (context, attn_weights, past_key_value) = self.attn_fn(
+ query,
+ key,
+ value,
+ self.n_heads,
+ past_key_value=past_key_value,
+ softmax_scale=self.softmax_scale,
+ attn_bias=attn_bias,
+ key_padding_mask=key_padding_mask,
+ is_causal=is_causal,
+ dropout_p=self.attn_dropout_p,
+ training=self.training,
+ needs_weights=needs_weights,
+ multiquery=True,
+ )
+ return (self.out_proj(context), attn_weights, past_key_value)
+
+
+def attn_bias_shape(
+ attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id
+):
+ if attn_impl == "flash":
+ return None
+ elif attn_impl in ["torch", "triton"]:
+ if alibi:
+ if (prefix_lm or not causal) or use_sequence_id:
+ return (1, n_heads, seq_len, seq_len)
+ return (1, n_heads, 1, seq_len)
+ elif prefix_lm or use_sequence_id:
+ return (1, 1, seq_len, seq_len)
+ return None
+ else:
+ raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.")
+
+
+def build_attn_bias(
+ attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8
+):
+ if attn_impl == "flash":
+ return None
+ elif attn_impl in ["torch", "triton"]:
+ if alibi:
+ (device, dtype) = (attn_bias.device, attn_bias.dtype)
+ attn_bias = attn_bias.add(
+ build_alibi_bias(
+ n_heads,
+ seq_len,
+ full=not causal,
+ alibi_bias_max=alibi_bias_max,
+ device=device,
+ dtype=dtype,
+ )
+ )
+ return attn_bias
+ else:
+ raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.")
+
+
+def gen_slopes(n_heads, alibi_bias_max=8, device=None):
+ _n_heads = 2 ** math.ceil(math.log2(n_heads))
+ m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
+ m = m.mul(alibi_bias_max / _n_heads)
+ slopes = 1.0 / torch.pow(2, m)
+ if _n_heads != n_heads:
+ slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
+ return slopes.view(1, n_heads, 1, 1)
+
+
+def build_alibi_bias(
+ n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None
+):
+ alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(
+ 1, 1, 1, seq_len
+ )
+ if full:
+ alibi_bias = alibi_bias - torch.arange(
+ 1 - seq_len, 1, dtype=torch.int32, device=device
+ ).view(1, 1, seq_len, 1)
+ alibi_bias = alibi_bias.abs().mul(-1)
+ slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
+ alibi_bias = alibi_bias * slopes
+ return alibi_bias.to(dtype=dtype)
+
+
+ATTN_CLASS_REGISTRY = {
+ "multihead_attention": MultiheadAttention,
+ "multiquery_attention": MultiQueryAttention,
+}
diff --git a/model/llava/model/language_model/mpt/blocks.py b/model/llava/model/language_model/mpt/blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f036432ecaf689a9292ef943c8fa8eb4dc9c357
--- /dev/null
+++ b/model/llava/model/language_model/mpt/blocks.py
@@ -0,0 +1,92 @@
+"""GPT Blocks used for the GPT Model."""
+from typing import Dict, Optional, Tuple
+
+import torch
+import torch.nn as nn
+
+from .attention import ATTN_CLASS_REGISTRY
+from .norm import NORM_CLASS_REGISTRY
+
+
+class MPTMLP(nn.Module):
+ def __init__(
+ self, d_model: int, expansion_ratio: int, device: Optional[str] = None
+ ):
+ super().__init__()
+ self.up_proj = nn.Linear(d_model, expansion_ratio * d_model, device=device)
+ self.act = nn.GELU(approximate="none")
+ self.down_proj = nn.Linear(expansion_ratio * d_model, d_model, device=device)
+ self.down_proj._is_residual = True
+
+ def forward(self, x):
+ return self.down_proj(self.act(self.up_proj(x)))
+
+
+class MPTBlock(nn.Module):
+ def __init__(
+ self,
+ d_model: int,
+ n_heads: int,
+ expansion_ratio: int,
+ attn_config: Dict = {
+ "attn_type": "multihead_attention",
+ "attn_pdrop": 0.0,
+ "attn_impl": "triton",
+ "qk_ln": False,
+ "clip_qkv": None,
+ "softmax_scale": None,
+ "prefix_lm": False,
+ "attn_uses_sequence_id": False,
+ "alibi": False,
+ "alibi_bias_max": 8,
+ },
+ resid_pdrop: float = 0.0,
+ norm_type: str = "low_precision_layernorm",
+ verbose: int = 0,
+ device: Optional[str] = None,
+ **kwargs
+ ):
+ del kwargs
+ super().__init__()
+ norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
+ attn_class = ATTN_CLASS_REGISTRY[attn_config["attn_type"]]
+ self.norm_1 = norm_class(d_model, device=device)
+ self.attn = attn_class(
+ attn_impl=attn_config["attn_impl"],
+ clip_qkv=attn_config["clip_qkv"],
+ qk_ln=attn_config["qk_ln"],
+ softmax_scale=attn_config["softmax_scale"],
+ attn_pdrop=attn_config["attn_pdrop"],
+ d_model=d_model,
+ n_heads=n_heads,
+ verbose=verbose,
+ device=device,
+ )
+ self.norm_2 = norm_class(d_model, device=device)
+ self.ffn = MPTMLP(
+ d_model=d_model, expansion_ratio=expansion_ratio, device=device
+ )
+ self.resid_attn_dropout = nn.Dropout(resid_pdrop)
+ self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attn_bias: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.ByteTensor] = None,
+ is_causal: bool = True,
+ ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:
+ a = self.norm_1(x)
+ (b, attn_weights, past_key_value) = self.attn(
+ a,
+ past_key_value=past_key_value,
+ attn_bias=attn_bias,
+ attention_mask=attention_mask,
+ is_causal=is_causal,
+ )
+ x = x + self.resid_attn_dropout(b)
+ m = self.norm_2(x)
+ n = self.ffn(m)
+ x = x + self.resid_ffn_dropout(n)
+ return (x, attn_weights, past_key_value)
diff --git a/model/llava/model/language_model/mpt/configuration_mpt.py b/model/llava/model/language_model/mpt/configuration_mpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..06da3b8f05b347836a7b4bebede90bc02d2b9971
--- /dev/null
+++ b/model/llava/model/language_model/mpt/configuration_mpt.py
@@ -0,0 +1,199 @@
+"""A HuggingFace-style model configuration."""
+from typing import Dict, Optional, Union
+
+from transformers import PretrainedConfig
+
+attn_config_defaults: Dict = {
+ "attn_type": "multihead_attention",
+ "attn_pdrop": 0.0,
+ "attn_impl": "triton",
+ "qk_ln": False,
+ "clip_qkv": None,
+ "softmax_scale": None,
+ "prefix_lm": False,
+ "attn_uses_sequence_id": False,
+ "alibi": False,
+ "alibi_bias_max": 8,
+}
+init_config_defaults: Dict = {
+ "name": "kaiming_normal_",
+ "fan_mode": "fan_in",
+ "init_nonlinearity": "relu",
+ "init_div_is_residual": True,
+ "emb_init_std": None,
+ "emb_init_uniform_lim": None,
+ "init_std": None,
+ "init_gain": 0.0,
+}
+
+
+class MPTConfig(PretrainedConfig):
+ model_type = "mpt"
+
+ def __init__(
+ self,
+ d_model: int = 2048,
+ n_heads: int = 16,
+ n_layers: int = 24,
+ expansion_ratio: int = 4,
+ max_seq_len: int = 2048,
+ vocab_size: int = 50368,
+ resid_pdrop: float = 0.0,
+ emb_pdrop: float = 0.0,
+ learned_pos_emb: bool = True,
+ attn_config: Dict = attn_config_defaults,
+ init_device: str = "cpu",
+ logit_scale: Optional[Union[float, str]] = None,
+ no_bias: bool = False,
+ verbose: int = 0,
+ embedding_fraction: float = 1.0,
+ norm_type: str = "low_precision_layernorm",
+ use_cache: bool = False,
+ init_config: Dict = init_config_defaults,
+ **kwargs,
+ ):
+ """The MPT configuration class.
+
+ Args:
+ d_model (int): The size of the embedding dimension of the model.
+ n_heads (int): The number of attention heads.
+ n_layers (int): The number of layers in the model.
+ expansion_ratio (int): The ratio of the up/down scale in the MLP.
+ max_seq_len (int): The maximum sequence length of the model.
+ vocab_size (int): The size of the vocabulary.
+ resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
+ emb_pdrop (float): The dropout probability for the embedding layer.
+ learned_pos_emb (bool): Whether to use learned positional embeddings
+ attn_config (Dict): A dictionary used to configure the model's attention module:
+ attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention
+ attn_pdrop (float): The dropout probability for the attention layers.
+ attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.
+ qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.
+ clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to
+ this value.
+ softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,
+ use the default scale of ``1/sqrt(d_keys)``.
+ prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an
+ extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix
+ can attend to one another bi-directionally. Tokens outside the prefix use causal attention.
+ attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.
+ When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
+ which sub-sequence each token belongs to.
+ Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
+ alibi (bool): Whether to use the alibi bias instead of position embeddings.
+ alibi_bias_max (int): The maximum value of the alibi bias.
+ init_device (str): The device to use for parameter initialization.
+ logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
+ no_bias (bool): Whether to use bias in all layers.
+ verbose (int): The verbosity level. 0 is silent.
+ embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
+ norm_type (str): choose type of norm to use
+ multiquery_attention (bool): Whether to use multiquery attention implementation.
+ use_cache (bool): Whether or not the model should return the last key/values attentions
+ init_config (Dict): A dictionary used to configure the model initialization:
+ init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',
+ 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or
+ 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.
+ init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.
+ emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.
+ emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution
+ used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.
+ init_std (float): The standard deviation of the normal distribution used to initialize the model,
+ if using the baseline_ parameter initialization scheme.
+ init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.
+ fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.
+ init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.
+ ---
+ See llmfoundry.models.utils.param_init_fns.py for info on other param init config options
+ """
+ self.d_model = d_model
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.expansion_ratio = expansion_ratio
+ self.max_seq_len = max_seq_len
+ self.vocab_size = vocab_size
+ self.resid_pdrop = resid_pdrop
+ self.emb_pdrop = emb_pdrop
+ self.learned_pos_emb = learned_pos_emb
+ self.attn_config = attn_config
+ self.init_device = init_device
+ self.logit_scale = logit_scale
+ self.no_bias = no_bias
+ self.verbose = verbose
+ self.embedding_fraction = embedding_fraction
+ self.norm_type = norm_type
+ self.use_cache = use_cache
+ self.init_config = init_config
+ if "name" in kwargs:
+ del kwargs["name"]
+ if "loss_fn" in kwargs:
+ del kwargs["loss_fn"]
+ super().__init__(**kwargs)
+ self._validate_config()
+
+ def _set_config_defaults(self, config, config_defaults):
+ for k, v in config_defaults.items():
+ if k not in config:
+ config[k] = v
+ return config
+
+ def _validate_config(self):
+ self.attn_config = self._set_config_defaults(
+ self.attn_config, attn_config_defaults
+ )
+ self.init_config = self._set_config_defaults(
+ self.init_config, init_config_defaults
+ )
+ if self.d_model % self.n_heads != 0:
+ raise ValueError("d_model must be divisible by n_heads")
+ if any(
+ (
+ prob < 0 or prob > 1
+ for prob in [
+ self.attn_config["attn_pdrop"],
+ self.resid_pdrop,
+ self.emb_pdrop,
+ ]
+ )
+ ):
+ raise ValueError(
+ "self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1"
+ )
+ if self.attn_config["attn_impl"] not in ["torch", "flash", "triton"]:
+ raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}")
+ if self.attn_config["prefix_lm"] and self.attn_config["attn_impl"] not in [
+ "torch",
+ "triton",
+ ]:
+ raise NotImplementedError(
+ "prefix_lm only implemented with torch and triton attention."
+ )
+ if self.attn_config["alibi"] and self.attn_config["attn_impl"] not in [
+ "torch",
+ "triton",
+ ]:
+ raise NotImplementedError(
+ "alibi only implemented with torch and triton attention."
+ )
+ if self.attn_config["attn_uses_sequence_id"] and self.attn_config[
+ "attn_impl"
+ ] not in ["torch", "triton"]:
+ raise NotImplementedError(
+ "attn_uses_sequence_id only implemented with torch and triton attention."
+ )
+ if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
+ raise ValueError(
+ "model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!"
+ )
+ if isinstance(self.logit_scale, str) and self.logit_scale != "inv_sqrt_d_model":
+ raise ValueError(
+ f"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'."
+ )
+ if self.init_config.get("name", None) is None:
+ raise ValueError(
+ f"self.init_config={self.init_config!r} 'name' needs to be set."
+ )
+ if not self.learned_pos_emb and (not self.attn_config["alibi"]):
+ raise ValueError(
+ f"Positional information must be provided to the model using either learned_pos_emb or alibi."
+ )
diff --git a/model/llava/model/language_model/mpt/custom_embedding.py b/model/llava/model/language_model/mpt/custom_embedding.py
new file mode 100644
index 0000000000000000000000000000000000000000..83979e7e7d8552b32c97d3473d8fd4bb12bd45f3
--- /dev/null
+++ b/model/llava/model/language_model/mpt/custom_embedding.py
@@ -0,0 +1,11 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch import Tensor
+
+
+class SharedEmbedding(nn.Embedding):
+ def forward(self, input: Tensor, unembed: bool = False) -> Tensor:
+ if unembed:
+ return F.linear(input, self.weight)
+ return super().forward(input)
diff --git a/model/llava/model/language_model/mpt/flash_attn_triton.py b/model/llava/model/language_model/mpt/flash_attn_triton.py
new file mode 100644
index 0000000000000000000000000000000000000000..1247b53cdaee9c3f7e6b2e6df6610751158d7765
--- /dev/null
+++ b/model/llava/model/language_model/mpt/flash_attn_triton.py
@@ -0,0 +1,1087 @@
+"""
+Copied from https://github.com/HazyResearch/flash-attention/blob/eff9fe6b8076df59d64d7a3f464696738a3c7c24/flash_attn/flash_attn_triton.py
+update imports to use 'triton_pre_mlir'
+
+*Experimental* implementation of FlashAttention in Triton.
+Tested with triton==2.0.0.dev20221202.
+Triton 2.0 has a new backend (MLIR) but seems like it doesn't yet work for head dimensions
+other than 64:
+https://github.com/openai/triton/blob/d376020f90002757eea3ea9475d4f7cfc2ec5ead/python/triton/ops/flash_attention.py#L207
+We'll update this implementation with the new Triton backend once this is fixed.
+
+We use the FlashAttention implementation from Phil Tillet a starting point.
+https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
+
+Changes:
+- Implement both causal and non-causal attention.
+- Implement both self-attention and cross-attention.
+- Support arbitrary seqlens (not just multiples of 128), for both forward and backward.
+- Support all head dimensions up to 128 (not just 16, 32, 64, 128), for both forward and backward.
+- Support attention bias.
+- Speed up the forward pass a bit, and only store the LSE instead of m and l.
+- Make the backward for d=128 much faster by reducing register spilling.
+- Optionally parallelize the backward pass across seqlen_k, to deal with the case of
+small batch size * nheads.
+
+Caution:
+- This is an *experimental* implementation. The forward pass should be quite robust but
+I'm not 100% sure that the backward pass doesn't have race conditions (due to the Triton compiler).
+- This implementation has only been tested on A100.
+- If you plan to use headdim other than 64 and 128, you should test for race conditions
+(due to the Triton compiler), as done in tests/test_flash_attn.py
+"test_flash_attn_triton_race_condition". I've tested and fixed many race conditions
+for different head dimensions (40, 48, 64, 128, 80, 88, 96), but I'm still not 100% confident
+that there are none left for other head dimensions.
+
+Differences between this Triton version and the CUDA version:
+- Triton version doesn't support dropout.
+- Triton forward is generally faster than CUDA forward, while Triton backward is
+generally slower than CUDA backward. Overall Triton forward + backward is slightly slower
+than CUDA forward + backward.
+- Triton version doesn't support different sequence lengths in a batch (i.e., RaggedTensor/NestedTensor).
+- Triton version supports attention bias, while CUDA version doesn't.
+"""
+import math
+
+import torch
+import triton_pre_mlir as triton
+import triton_pre_mlir.language as tl
+
+
+@triton.heuristics(
+ {
+ "EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0,
+ "EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0,
+ "EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"],
+ }
+)
+@triton.jit
+def _fwd_kernel(
+ Q,
+ K,
+ V,
+ Bias,
+ Out,
+ Lse,
+ TMP,
+ softmax_scale,
+ stride_qb,
+ stride_qh,
+ stride_qm,
+ stride_kb,
+ stride_kh,
+ stride_kn,
+ stride_vb,
+ stride_vh,
+ stride_vn,
+ stride_bb,
+ stride_bh,
+ stride_bm,
+ stride_ob,
+ stride_oh,
+ stride_om,
+ nheads,
+ seqlen_q,
+ seqlen_k,
+ seqlen_q_rounded,
+ headdim,
+ CACHE_KEY_SEQLEN_Q,
+ CACHE_KEY_SEQLEN_K,
+ BIAS_TYPE: tl.constexpr,
+ IS_CAUSAL: tl.constexpr,
+ BLOCK_HEADDIM: tl.constexpr,
+ EVEN_M: tl.constexpr,
+ EVEN_N: tl.constexpr,
+ EVEN_HEADDIM: tl.constexpr,
+ BLOCK_M: tl.constexpr,
+ BLOCK_N: tl.constexpr,
+):
+ start_m = tl.program_id(0)
+ off_hb = tl.program_id(1)
+ off_b = off_hb // nheads
+ off_h = off_hb % nheads
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
+ offs_n = tl.arange(0, BLOCK_N)
+ offs_d = tl.arange(0, BLOCK_HEADDIM)
+ q_ptrs = (
+ Q
+ + off_b * stride_qb
+ + off_h * stride_qh
+ + (offs_m[:, None] * stride_qm + offs_d[None, :])
+ )
+ k_ptrs = (
+ K
+ + off_b * stride_kb
+ + off_h * stride_kh
+ + (offs_n[:, None] * stride_kn + offs_d[None, :])
+ )
+ v_ptrs = (
+ V
+ + off_b * stride_vb
+ + off_h * stride_vh
+ + (offs_n[:, None] * stride_vn + offs_d[None, :])
+ )
+ if BIAS_TYPE == "vector":
+ b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
+ elif BIAS_TYPE == "matrix":
+ b_ptrs = (
+ Bias
+ + off_b * stride_bb
+ + off_h * stride_bh
+ + (offs_m[:, None] * stride_bm + offs_n[None, :])
+ )
+ t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
+ lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
+ m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
+ acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
+ if EVEN_M & EVEN_N:
+ if EVEN_HEADDIM:
+ q = tl.load(q_ptrs)
+ else:
+ q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
+ elif EVEN_HEADDIM:
+ q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
+ else:
+ q = tl.load(
+ q_ptrs,
+ mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
+ other=0.0,
+ )
+ end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k)
+ for start_n in range(0, end_n, BLOCK_N):
+ start_n = tl.multiple_of(start_n, BLOCK_N)
+ if EVEN_N & EVEN_M:
+ if EVEN_HEADDIM:
+ k = tl.load(k_ptrs + start_n * stride_kn)
+ else:
+ k = tl.load(
+ k_ptrs + start_n * stride_kn,
+ mask=offs_d[None, :] < headdim,
+ other=0.0,
+ )
+ elif EVEN_HEADDIM:
+ k = tl.load(
+ k_ptrs + start_n * stride_kn,
+ mask=(start_n + offs_n)[:, None] < seqlen_k,
+ other=0.0,
+ )
+ else:
+ k = tl.load(
+ k_ptrs + start_n * stride_kn,
+ mask=((start_n + offs_n)[:, None] < seqlen_k)
+ & (offs_d[None, :] < headdim),
+ other=0.0,
+ )
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
+ qk += tl.dot(q, k, trans_b=True)
+ if not EVEN_N:
+ qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float("-inf"))
+ if IS_CAUSAL:
+ qk += tl.where(
+ offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float("-inf")
+ )
+ if BIAS_TYPE != "none":
+ if BIAS_TYPE == "vector":
+ if EVEN_N:
+ bias = tl.load(b_ptrs + start_n).to(tl.float32)
+ else:
+ bias = tl.load(
+ b_ptrs + start_n, mask=start_n + offs_n < seqlen_k, other=0.0
+ ).to(tl.float32)
+ bias = bias[None, :]
+ elif BIAS_TYPE == "matrix":
+ if EVEN_M & EVEN_N:
+ bias = tl.load(b_ptrs + start_n).to(tl.float32)
+ else:
+ bias = tl.load(
+ b_ptrs + start_n,
+ mask=(offs_m[:, None] < seqlen_q)
+ & ((start_n + offs_n)[None, :] < seqlen_k),
+ other=0.0,
+ ).to(tl.float32)
+ qk = qk * softmax_scale + bias
+ m_ij = tl.maximum(tl.max(qk, 1), lse_i)
+ p = tl.exp(qk - m_ij[:, None])
+ else:
+ m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
+ p = tl.exp(qk * softmax_scale - m_ij[:, None])
+ l_ij = tl.sum(p, 1)
+ acc_o_scale = tl.exp(m_i - m_ij)
+ tl.store(t_ptrs, acc_o_scale)
+ acc_o_scale = tl.load(t_ptrs)
+ acc_o = acc_o * acc_o_scale[:, None]
+ if EVEN_N & EVEN_M:
+ if EVEN_HEADDIM:
+ v = tl.load(v_ptrs + start_n * stride_vn)
+ else:
+ v = tl.load(
+ v_ptrs + start_n * stride_vn,
+ mask=offs_d[None, :] < headdim,
+ other=0.0,
+ )
+ elif EVEN_HEADDIM:
+ v = tl.load(
+ v_ptrs + start_n * stride_vn,
+ mask=(start_n + offs_n)[:, None] < seqlen_k,
+ other=0.0,
+ )
+ else:
+ v = tl.load(
+ v_ptrs + start_n * stride_vn,
+ mask=((start_n + offs_n)[:, None] < seqlen_k)
+ & (offs_d[None, :] < headdim),
+ other=0.0,
+ )
+ p = p.to(v.dtype)
+ acc_o += tl.dot(p, v)
+ m_i = m_ij
+ l_i_new = tl.exp(lse_i - m_ij) + l_ij
+ lse_i = m_ij + tl.log(l_i_new)
+ o_scale = tl.exp(m_i - lse_i)
+ tl.store(t_ptrs, o_scale)
+ o_scale = tl.load(t_ptrs)
+ acc_o = acc_o * o_scale[:, None]
+ start_m = tl.program_id(0)
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
+ lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
+ tl.store(lse_ptrs, lse_i)
+ offs_d = tl.arange(0, BLOCK_HEADDIM)
+ out_ptrs = (
+ Out
+ + off_b * stride_ob
+ + off_h * stride_oh
+ + (offs_m[:, None] * stride_om + offs_d[None, :])
+ )
+ if EVEN_M:
+ if EVEN_HEADDIM:
+ tl.store(out_ptrs, acc_o)
+ else:
+ tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
+ elif EVEN_HEADDIM:
+ tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
+ else:
+ tl.store(
+ out_ptrs,
+ acc_o,
+ mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
+ )
+
+
+@triton.jit
+def _bwd_preprocess_do_o_dot(
+ Out,
+ DO,
+ Delta,
+ stride_ob,
+ stride_oh,
+ stride_om,
+ stride_dob,
+ stride_doh,
+ stride_dom,
+ nheads,
+ seqlen_q,
+ seqlen_q_rounded,
+ headdim,
+ BLOCK_M: tl.constexpr,
+ BLOCK_HEADDIM: tl.constexpr,
+):
+ start_m = tl.program_id(0)
+ off_hb = tl.program_id(1)
+ off_b = off_hb // nheads
+ off_h = off_hb % nheads
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
+ offs_d = tl.arange(0, BLOCK_HEADDIM)
+ o = tl.load(
+ Out
+ + off_b * stride_ob
+ + off_h * stride_oh
+ + offs_m[:, None] * stride_om
+ + offs_d[None, :],
+ mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
+ other=0.0,
+ ).to(tl.float32)
+ do = tl.load(
+ DO
+ + off_b * stride_dob
+ + off_h * stride_doh
+ + offs_m[:, None] * stride_dom
+ + offs_d[None, :],
+ mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
+ other=0.0,
+ ).to(tl.float32)
+ delta = tl.sum(o * do, axis=1)
+ tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
+
+
+@triton.jit
+def _bwd_store_dk_dv(
+ dk_ptrs,
+ dv_ptrs,
+ dk,
+ dv,
+ offs_n,
+ offs_d,
+ seqlen_k,
+ headdim,
+ EVEN_M: tl.constexpr,
+ EVEN_N: tl.constexpr,
+ EVEN_HEADDIM: tl.constexpr,
+):
+ if EVEN_N & EVEN_M:
+ if EVEN_HEADDIM:
+ tl.store(dv_ptrs, dv)
+ tl.store(dk_ptrs, dk)
+ else:
+ tl.store(dv_ptrs, dv, mask=offs_d[None, :] < headdim)
+ tl.store(dk_ptrs, dk, mask=offs_d[None, :] < headdim)
+ elif EVEN_HEADDIM:
+ tl.store(dv_ptrs, dv, mask=offs_n[:, None] < seqlen_k)
+ tl.store(dk_ptrs, dk, mask=offs_n[:, None] < seqlen_k)
+ else:
+ tl.store(
+ dv_ptrs, dv, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim)
+ )
+ tl.store(
+ dk_ptrs, dk, mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim)
+ )
+
+
+@triton.jit
+def _bwd_kernel_one_col_block(
+ start_n,
+ Q,
+ K,
+ V,
+ Bias,
+ DO,
+ DQ,
+ DK,
+ DV,
+ LSE,
+ D,
+ softmax_scale,
+ stride_qm,
+ stride_kn,
+ stride_vn,
+ stride_bm,
+ stride_dom,
+ stride_dqm,
+ stride_dkn,
+ stride_dvn,
+ seqlen_q,
+ seqlen_k,
+ headdim,
+ ATOMIC_ADD: tl.constexpr,
+ BIAS_TYPE: tl.constexpr,
+ IS_CAUSAL: tl.constexpr,
+ BLOCK_HEADDIM: tl.constexpr,
+ EVEN_M: tl.constexpr,
+ EVEN_N: tl.constexpr,
+ EVEN_HEADDIM: tl.constexpr,
+ BLOCK_M: tl.constexpr,
+ BLOCK_N: tl.constexpr,
+):
+ begin_m = 0 if not IS_CAUSAL else start_n * BLOCK_N // BLOCK_M * BLOCK_M
+ offs_qm = begin_m + tl.arange(0, BLOCK_M)
+ offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
+ offs_m = tl.arange(0, BLOCK_M)
+ offs_d = tl.arange(0, BLOCK_HEADDIM)
+ q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_d[None, :])
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_d[None, :])
+ v_ptrs = V + (offs_n[:, None] * stride_vn + offs_d[None, :])
+ do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_d[None, :])
+ dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_d[None, :])
+ if BIAS_TYPE == "vector":
+ b_ptrs = Bias + offs_n
+ elif BIAS_TYPE == "matrix":
+ b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :])
+ dv = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
+ dk = tl.zeros([BLOCK_N, BLOCK_HEADDIM], dtype=tl.float32)
+ if begin_m >= seqlen_q:
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
+ _bwd_store_dk_dv(
+ dk_ptrs,
+ dv_ptrs,
+ dk,
+ dv,
+ offs_n,
+ offs_d,
+ seqlen_k,
+ headdim,
+ EVEN_M=EVEN_M,
+ EVEN_N=EVEN_N,
+ EVEN_HEADDIM=EVEN_HEADDIM,
+ )
+ return
+ if EVEN_N & EVEN_M:
+ if EVEN_HEADDIM:
+ k = tl.load(k_ptrs)
+ v = tl.load(v_ptrs)
+ else:
+ k = tl.load(k_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
+ v = tl.load(v_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
+ elif EVEN_HEADDIM:
+ k = tl.load(k_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
+ v = tl.load(v_ptrs, mask=offs_n[:, None] < seqlen_k, other=0.0)
+ else:
+ k = tl.load(
+ k_ptrs,
+ mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
+ other=0.0,
+ )
+ v = tl.load(
+ v_ptrs,
+ mask=(offs_n[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
+ other=0.0,
+ )
+ num_block_m = tl.cdiv(seqlen_q, BLOCK_M)
+ for start_m in range(begin_m, num_block_m * BLOCK_M, BLOCK_M):
+ start_m = tl.multiple_of(start_m, BLOCK_M)
+ offs_m_curr = start_m + offs_m
+ if EVEN_M & EVEN_HEADDIM:
+ q = tl.load(q_ptrs)
+ elif EVEN_HEADDIM:
+ q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < seqlen_q, other=0.0)
+ else:
+ q = tl.load(
+ q_ptrs,
+ mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
+ other=0.0,
+ )
+ qk = tl.dot(q, k, trans_b=True)
+ if not EVEN_N:
+ qk = tl.where(offs_n[None, :] < seqlen_k, qk, float("-inf"))
+ if IS_CAUSAL:
+ qk = tl.where(offs_m_curr[:, None] >= offs_n[None, :], qk, float("-inf"))
+ if BIAS_TYPE != "none":
+ tl.debug_barrier()
+ if BIAS_TYPE == "vector":
+ if EVEN_N:
+ bias = tl.load(b_ptrs).to(tl.float32)
+ else:
+ bias = tl.load(b_ptrs, mask=offs_n < seqlen_k, other=0.0).to(
+ tl.float32
+ )
+ bias = bias[None, :]
+ elif BIAS_TYPE == "matrix":
+ if EVEN_M & EVEN_N:
+ bias = tl.load(b_ptrs).to(tl.float32)
+ else:
+ bias = tl.load(
+ b_ptrs,
+ mask=(offs_m_curr[:, None] < seqlen_q)
+ & (offs_n[None, :] < seqlen_k),
+ other=0.0,
+ ).to(tl.float32)
+ qk = qk * softmax_scale + bias
+ if not EVEN_M & EVEN_HEADDIM:
+ tl.debug_barrier()
+ lse_i = tl.load(LSE + offs_m_curr)
+ if BIAS_TYPE == "none":
+ p = tl.exp(qk * softmax_scale - lse_i[:, None])
+ else:
+ p = tl.exp(qk - lse_i[:, None])
+ if EVEN_M & EVEN_HEADDIM:
+ do = tl.load(do_ptrs)
+ else:
+ do = tl.load(
+ do_ptrs,
+ mask=(offs_m_curr[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
+ other=0.0,
+ )
+ dv += tl.dot(p.to(do.dtype), do, trans_a=True)
+ if not EVEN_M & EVEN_HEADDIM:
+ tl.debug_barrier()
+ dp = tl.dot(do, v, trans_b=True)
+ if not EVEN_HEADDIM:
+ tl.debug_barrier()
+ Di = tl.load(D + offs_m_curr)
+ ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
+ dk += tl.dot(ds, q, trans_a=True)
+ if not EVEN_M & EVEN_HEADDIM:
+ tl.debug_barrier()
+ if not ATOMIC_ADD:
+ if EVEN_M & EVEN_HEADDIM:
+ dq = tl.load(dq_ptrs, eviction_policy="evict_last")
+ dq += tl.dot(ds, k)
+ tl.store(dq_ptrs, dq, eviction_policy="evict_last")
+ elif EVEN_HEADDIM:
+ dq = tl.load(
+ dq_ptrs,
+ mask=offs_m_curr[:, None] < seqlen_q,
+ other=0.0,
+ eviction_policy="evict_last",
+ )
+ dq += tl.dot(ds, k)
+ tl.store(
+ dq_ptrs,
+ dq,
+ mask=offs_m_curr[:, None] < seqlen_q,
+ eviction_policy="evict_last",
+ )
+ else:
+ dq = tl.load(
+ dq_ptrs,
+ mask=(offs_m_curr[:, None] < seqlen_q)
+ & (offs_d[None, :] < headdim),
+ other=0.0,
+ eviction_policy="evict_last",
+ )
+ dq += tl.dot(ds, k)
+ tl.store(
+ dq_ptrs,
+ dq,
+ mask=(offs_m_curr[:, None] < seqlen_q)
+ & (offs_d[None, :] < headdim),
+ eviction_policy="evict_last",
+ )
+ else:
+ dq = tl.dot(ds, k)
+ if EVEN_M & EVEN_HEADDIM:
+ tl.atomic_add(dq_ptrs, dq)
+ elif EVEN_HEADDIM:
+ tl.atomic_add(dq_ptrs, dq, mask=offs_m_curr[:, None] < seqlen_q)
+ else:
+ tl.atomic_add(
+ dq_ptrs,
+ dq,
+ mask=(offs_m_curr[:, None] < seqlen_q)
+ & (offs_d[None, :] < headdim),
+ )
+ dq_ptrs += BLOCK_M * stride_dqm
+ q_ptrs += BLOCK_M * stride_qm
+ do_ptrs += BLOCK_M * stride_dom
+ if BIAS_TYPE == "matrix":
+ b_ptrs += BLOCK_M * stride_bm
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_d[None, :])
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_d[None, :])
+ _bwd_store_dk_dv(
+ dk_ptrs,
+ dv_ptrs,
+ dk,
+ dv,
+ offs_n,
+ offs_d,
+ seqlen_k,
+ headdim,
+ EVEN_M=EVEN_M,
+ EVEN_N=EVEN_N,
+ EVEN_HEADDIM=EVEN_HEADDIM,
+ )
+
+
+def init_to_zero(name):
+ return lambda nargs: nargs[name].zero_()
+
+
+@triton.autotune(
+ configs=[
+ triton.Config(
+ {"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": False},
+ num_warps=8,
+ num_stages=1,
+ pre_hook=init_to_zero("DQ"),
+ ),
+ triton.Config(
+ {"BLOCK_M": 128, "BLOCK_N": 128, "SEQUENCE_PARALLEL": True},
+ num_warps=8,
+ num_stages=1,
+ pre_hook=init_to_zero("DQ"),
+ ),
+ ],
+ key=[
+ "CACHE_KEY_SEQLEN_Q",
+ "CACHE_KEY_SEQLEN_K",
+ "BIAS_TYPE",
+ "IS_CAUSAL",
+ "BLOCK_HEADDIM",
+ ],
+)
+@triton.heuristics(
+ {
+ "EVEN_M": lambda args: args["seqlen_q"] % args["BLOCK_M"] == 0,
+ "EVEN_N": lambda args: args["seqlen_k"] % args["BLOCK_N"] == 0,
+ "EVEN_HEADDIM": lambda args: args["headdim"] == args["BLOCK_HEADDIM"],
+ }
+)
+@triton.jit
+def _bwd_kernel(
+ Q,
+ K,
+ V,
+ Bias,
+ DO,
+ DQ,
+ DK,
+ DV,
+ LSE,
+ D,
+ softmax_scale,
+ stride_qb,
+ stride_qh,
+ stride_qm,
+ stride_kb,
+ stride_kh,
+ stride_kn,
+ stride_vb,
+ stride_vh,
+ stride_vn,
+ stride_bb,
+ stride_bh,
+ stride_bm,
+ stride_dob,
+ stride_doh,
+ stride_dom,
+ stride_dqb,
+ stride_dqh,
+ stride_dqm,
+ stride_dkb,
+ stride_dkh,
+ stride_dkn,
+ stride_dvb,
+ stride_dvh,
+ stride_dvn,
+ nheads,
+ seqlen_q,
+ seqlen_k,
+ seqlen_q_rounded,
+ headdim,
+ CACHE_KEY_SEQLEN_Q,
+ CACHE_KEY_SEQLEN_K,
+ BIAS_TYPE: tl.constexpr,
+ IS_CAUSAL: tl.constexpr,
+ BLOCK_HEADDIM: tl.constexpr,
+ SEQUENCE_PARALLEL: tl.constexpr,
+ EVEN_M: tl.constexpr,
+ EVEN_N: tl.constexpr,
+ EVEN_HEADDIM: tl.constexpr,
+ BLOCK_M: tl.constexpr,
+ BLOCK_N: tl.constexpr,
+):
+ off_hb = tl.program_id(1)
+ off_b = off_hb // nheads
+ off_h = off_hb % nheads
+ Q += off_b * stride_qb + off_h * stride_qh
+ K += off_b * stride_kb + off_h * stride_kh
+ V += off_b * stride_vb + off_h * stride_vh
+ DO += off_b * stride_dob + off_h * stride_doh
+ DQ += off_b * stride_dqb + off_h * stride_dqh
+ DK += off_b * stride_dkb + off_h * stride_dkh
+ DV += off_b * stride_dvb + off_h * stride_dvh
+ if BIAS_TYPE != "none":
+ Bias += off_b * stride_bb + off_h * stride_bh
+ D += off_hb * seqlen_q_rounded
+ LSE += off_hb * seqlen_q_rounded
+ if not SEQUENCE_PARALLEL:
+ num_block_n = tl.cdiv(seqlen_k, BLOCK_N)
+ for start_n in range(0, num_block_n):
+ _bwd_kernel_one_col_block(
+ start_n,
+ Q,
+ K,
+ V,
+ Bias,
+ DO,
+ DQ,
+ DK,
+ DV,
+ LSE,
+ D,
+ softmax_scale,
+ stride_qm,
+ stride_kn,
+ stride_vn,
+ stride_bm,
+ stride_dom,
+ stride_dqm,
+ stride_dkn,
+ stride_dvn,
+ seqlen_q,
+ seqlen_k,
+ headdim,
+ ATOMIC_ADD=False,
+ BIAS_TYPE=BIAS_TYPE,
+ IS_CAUSAL=IS_CAUSAL,
+ BLOCK_HEADDIM=BLOCK_HEADDIM,
+ EVEN_M=EVEN_M,
+ EVEN_N=EVEN_N,
+ EVEN_HEADDIM=EVEN_HEADDIM,
+ BLOCK_M=BLOCK_M,
+ BLOCK_N=BLOCK_N,
+ )
+ else:
+ start_n = tl.program_id(0)
+ _bwd_kernel_one_col_block(
+ start_n,
+ Q,
+ K,
+ V,
+ Bias,
+ DO,
+ DQ,
+ DK,
+ DV,
+ LSE,
+ D,
+ softmax_scale,
+ stride_qm,
+ stride_kn,
+ stride_vn,
+ stride_bm,
+ stride_dom,
+ stride_dqm,
+ stride_dkn,
+ stride_dvn,
+ seqlen_q,
+ seqlen_k,
+ headdim,
+ ATOMIC_ADD=True,
+ BIAS_TYPE=BIAS_TYPE,
+ IS_CAUSAL=IS_CAUSAL,
+ BLOCK_HEADDIM=BLOCK_HEADDIM,
+ EVEN_M=EVEN_M,
+ EVEN_N=EVEN_N,
+ EVEN_HEADDIM=EVEN_HEADDIM,
+ BLOCK_M=BLOCK_M,
+ BLOCK_N=BLOCK_N,
+ )
+
+
+def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
+ (batch, seqlen_q, nheads, d) = q.shape
+ (_, seqlen_k, _, _) = k.shape
+ assert k.shape == (batch, seqlen_k, nheads, d)
+ assert v.shape == (batch, seqlen_k, nheads, d)
+ assert d <= 128, "FlashAttention only support head dimensions up to 128"
+ assert q.dtype == k.dtype == v.dtype, "All tensors must have the same type"
+ assert q.dtype in [torch.float16, torch.bfloat16], "Only support fp16 and bf16"
+ assert q.is_cuda and k.is_cuda and v.is_cuda
+ softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
+ has_bias = bias is not None
+ bias_type = "none"
+ if has_bias:
+ assert bias.dtype in [q.dtype, torch.float]
+ assert bias.is_cuda
+ assert bias.dim() == 4
+ if bias.stride(-1) != 1:
+ bias = bias.contiguous()
+ if bias.shape[2:] == (1, seqlen_k):
+ bias_type = "vector"
+ elif bias.shape[2:] == (seqlen_q, seqlen_k):
+ bias_type = "matrix"
+ else:
+ raise RuntimeError(
+ "Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)"
+ )
+ bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
+ bias_strides = (
+ (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
+ )
+ seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
+ lse = torch.empty(
+ (batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32
+ )
+ tmp = torch.empty(
+ (batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32
+ )
+ o = torch.empty_like(q)
+ BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
+ BLOCK = 128
+ num_warps = 4 if d <= 64 else 8
+ grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
+ _fwd_kernel[grid](
+ q,
+ k,
+ v,
+ bias,
+ o,
+ lse,
+ tmp,
+ softmax_scale,
+ q.stride(0),
+ q.stride(2),
+ q.stride(1),
+ k.stride(0),
+ k.stride(2),
+ k.stride(1),
+ v.stride(0),
+ v.stride(2),
+ v.stride(1),
+ *bias_strides,
+ o.stride(0),
+ o.stride(2),
+ o.stride(1),
+ nheads,
+ seqlen_q,
+ seqlen_k,
+ seqlen_q_rounded,
+ d,
+ seqlen_q // 32,
+ seqlen_k // 32,
+ bias_type,
+ causal,
+ BLOCK_HEADDIM,
+ BLOCK_M=BLOCK,
+ BLOCK_N=BLOCK,
+ num_warps=num_warps,
+ num_stages=1
+ )
+ return (o, lse, softmax_scale)
+
+
+def _flash_attn_backward(
+ do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None
+):
+ if do.stride(-1) != 1:
+ do = do.contiguous()
+ (batch, seqlen_q, nheads, d) = q.shape
+ (_, seqlen_k, _, _) = k.shape
+ assert d <= 128
+ seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
+ assert lse.shape == (batch, nheads, seqlen_q_rounded)
+ assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
+ assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
+ softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
+ dq_accum = torch.empty_like(q, dtype=torch.float32)
+ delta = torch.empty_like(lse)
+ BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
+ grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
+ _bwd_preprocess_do_o_dot[grid](
+ o,
+ do,
+ delta,
+ o.stride(0),
+ o.stride(2),
+ o.stride(1),
+ do.stride(0),
+ do.stride(2),
+ do.stride(1),
+ nheads,
+ seqlen_q,
+ seqlen_q_rounded,
+ d,
+ BLOCK_M=128,
+ BLOCK_HEADDIM=BLOCK_HEADDIM,
+ )
+ has_bias = bias is not None
+ bias_type = "none"
+ if has_bias:
+ assert bias.dtype in [q.dtype, torch.float]
+ assert bias.is_cuda
+ assert bias.dim() == 4
+ assert bias.stride(-1) == 1
+ if bias.shape[2:] == (1, seqlen_k):
+ bias_type = "vector"
+ elif bias.shape[2:] == (seqlen_q, seqlen_k):
+ bias_type = "matrix"
+ else:
+ raise RuntimeError(
+ "Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)"
+ )
+ bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
+ bias_strides = (
+ (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
+ )
+ grid = lambda META: (
+ triton.cdiv(seqlen_k, META["BLOCK_N"]) if META["SEQUENCE_PARALLEL"] else 1,
+ batch * nheads,
+ )
+ _bwd_kernel[grid](
+ q,
+ k,
+ v,
+ bias,
+ do,
+ dq_accum,
+ dk,
+ dv,
+ lse,
+ delta,
+ softmax_scale,
+ q.stride(0),
+ q.stride(2),
+ q.stride(1),
+ k.stride(0),
+ k.stride(2),
+ k.stride(1),
+ v.stride(0),
+ v.stride(2),
+ v.stride(1),
+ *bias_strides,
+ do.stride(0),
+ do.stride(2),
+ do.stride(1),
+ dq_accum.stride(0),
+ dq_accum.stride(2),
+ dq_accum.stride(1),
+ dk.stride(0),
+ dk.stride(2),
+ dk.stride(1),
+ dv.stride(0),
+ dv.stride(2),
+ dv.stride(1),
+ nheads,
+ seqlen_q,
+ seqlen_k,
+ seqlen_q_rounded,
+ d,
+ seqlen_q // 32,
+ seqlen_k // 32,
+ bias_type,
+ causal,
+ BLOCK_HEADDIM
+ )
+ dq.copy_(dq_accum)
+
+
+class FlashAttnQKVPackedFunc(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, qkv, bias=None, causal=False, softmax_scale=None):
+ """
+ qkv: (batch, seqlen, 3, nheads, headdim)
+ bias: optional, shape broadcastible to (batch, nheads, seqlen, seqlen).
+ For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen).
+ ALiBi mask for non-causal would have shape (1, nheads, seqlen, seqlen)
+ """
+ if qkv.stride(-1) != 1:
+ qkv = qkv.contiguous()
+ (o, lse, ctx.softmax_scale) = _flash_attn_forward(
+ qkv[:, :, 0],
+ qkv[:, :, 1],
+ qkv[:, :, 2],
+ bias=bias,
+ causal=causal,
+ softmax_scale=softmax_scale,
+ )
+ ctx.save_for_backward(qkv, o, lse, bias)
+ ctx.causal = causal
+ return o
+
+ @staticmethod
+ def backward(ctx, do):
+ (qkv, o, lse, bias) = ctx.saved_tensors
+ assert not ctx.needs_input_grad[
+ 1
+ ], "FlashAttention does not support bias gradient yet"
+ with torch.inference_mode():
+ dqkv = torch.empty_like(qkv)
+ _flash_attn_backward(
+ do,
+ qkv[:, :, 0],
+ qkv[:, :, 1],
+ qkv[:, :, 2],
+ o,
+ lse,
+ dqkv[:, :, 0],
+ dqkv[:, :, 1],
+ dqkv[:, :, 2],
+ bias=bias,
+ causal=ctx.causal,
+ softmax_scale=ctx.softmax_scale,
+ )
+ return (dqkv, None, None, None)
+
+
+flash_attn_qkvpacked_func = FlashAttnQKVPackedFunc.apply
+
+
+class FlashAttnKVPackedFunc(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, q, kv, bias=None, causal=False, softmax_scale=None):
+ """
+ q: (batch, seqlen_q, nheads, headdim)
+ kv: (batch, seqlen_k, 2, nheads, headdim)
+ bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
+ For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
+ ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
+ """
+ (q, kv) = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, kv]]
+ (o, lse, ctx.softmax_scale) = _flash_attn_forward(
+ q,
+ kv[:, :, 0],
+ kv[:, :, 1],
+ bias=bias,
+ causal=causal,
+ softmax_scale=softmax_scale,
+ )
+ ctx.save_for_backward(q, kv, o, lse, bias)
+ ctx.causal = causal
+ return o
+
+ @staticmethod
+ def backward(ctx, do):
+ (q, kv, o, lse, bias) = ctx.saved_tensors
+ if len(ctx.needs_input_grad) >= 3:
+ assert not ctx.needs_input_grad[
+ 2
+ ], "FlashAttention does not support bias gradient yet"
+ with torch.inference_mode():
+ dq = torch.empty_like(q)
+ dkv = torch.empty_like(kv)
+ _flash_attn_backward(
+ do,
+ q,
+ kv[:, :, 0],
+ kv[:, :, 1],
+ o,
+ lse,
+ dq,
+ dkv[:, :, 0],
+ dkv[:, :, 1],
+ bias=bias,
+ causal=ctx.causal,
+ softmax_scale=ctx.softmax_scale,
+ )
+ return (dq, dkv, None, None, None)
+
+
+flash_attn_kvpacked_func = FlashAttnKVPackedFunc.apply
+
+
+class FlashAttnFunc(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
+ """
+ q: (batch_size, seqlen_q, nheads, headdim)
+ k, v: (batch_size, seqlen_k, nheads, headdim)
+ bias: optional, shape broadcastible to (batch, nheads, seqlen_q, seqlen_k).
+ For example, ALiBi mask for causal would have shape (1, nheads, 1, seqlen_k).
+ ALiBi mask for non-causal would have shape (1, nheads, seqlen_q, seqlen_k)
+ """
+ (q, k, v) = [x if x.stride(-1) == 1 else x.contiguous() for x in [q, k, v]]
+ (o, lse, ctx.softmax_scale) = _flash_attn_forward(
+ q, k, v, bias=bias, causal=causal, softmax_scale=softmax_scale
+ )
+ ctx.save_for_backward(q, k, v, o, lse, bias)
+ ctx.causal = causal
+ return o
+
+ @staticmethod
+ def backward(ctx, do):
+ (q, k, v, o, lse, bias) = ctx.saved_tensors
+ assert not ctx.needs_input_grad[
+ 3
+ ], "FlashAttention does not support bias gradient yet"
+ with torch.inference_mode():
+ dq = torch.empty_like(q)
+ dk = torch.empty_like(k)
+ dv = torch.empty_like(v)
+ _flash_attn_backward(
+ do,
+ q,
+ k,
+ v,
+ o,
+ lse,
+ dq,
+ dk,
+ dv,
+ bias=bias,
+ causal=ctx.causal,
+ softmax_scale=ctx.softmax_scale,
+ )
+ return (dq, dk, dv, None, None, None)
+
+
+flash_attn_func = FlashAttnFunc.apply
diff --git a/model/llava/model/language_model/mpt/hf_prefixlm_converter.py b/model/llava/model/language_model/mpt/hf_prefixlm_converter.py
new file mode 100644
index 0000000000000000000000000000000000000000..427d3878185431f3e657d1a93c5db5a55f04300f
--- /dev/null
+++ b/model/llava/model/language_model/mpt/hf_prefixlm_converter.py
@@ -0,0 +1,750 @@
+"""Converts Huggingface Causal LM to Prefix LM.
+
+Conversion does lightweight surgery on a HuggingFace
+Causal LM to convert it to a Prefix LM.
+
+Prefix LMs accepts a `bidirectional_mask` input in `forward`
+and treat the input prompt as the prefix in `generate`.
+"""
+import math
+import warnings
+from types import MethodType
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+from transformers.models.bloom.modeling_bloom import (
+ BaseModelOutputWithPastAndCrossAttentions, BloomForCausalLM, BloomModel,
+ CausalLMOutputWithCrossAttentions, CrossEntropyLoss)
+from transformers.models.bloom.modeling_bloom import \
+ _expand_mask as _expand_mask_bloom
+from transformers.models.bloom.modeling_bloom import \
+ _make_causal_mask as _make_causal_mask_bloom
+from transformers.models.bloom.modeling_bloom import logging
+from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
+from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM
+from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
+from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
+from transformers.models.opt.modeling_opt import OPTForCausalLM
+from transformers.models.opt.modeling_opt import \
+ _expand_mask as _expand_mask_opt
+from transformers.models.opt.modeling_opt import \
+ _make_causal_mask as _make_causal_mask_opt
+
+logger = logging.get_logger(__name__)
+_SUPPORTED_GPT_MODELS = (
+ GPT2LMHeadModel,
+ GPTJForCausalLM,
+ GPTNeoForCausalLM,
+ GPTNeoXForCausalLM,
+)
+CAUSAL_GPT_TYPES = Union[
+ GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM
+]
+
+
+def _convert_gpt_causal_lm_to_prefix_lm(model: CAUSAL_GPT_TYPES) -> CAUSAL_GPT_TYPES:
+ """Converts a GPT-style Causal LM to a Prefix LM.
+
+ Supported HuggingFace model classes:
+ - `GPT2LMHeadModel`
+ - `GPTNeoForCausalLM`
+ - `GPTNeoXForCausalLM`
+ - `GPTJForCausalLM`
+
+ See `convert_hf_causal_lm_to_prefix_lm` for more details.
+ """
+ if hasattr(model, "_prefix_lm_converted"):
+ return model
+ assert isinstance(model, _SUPPORTED_GPT_MODELS)
+ assert (
+ model.config.add_cross_attention == False
+ ), "Only supports GPT-style decoder-only models"
+
+ def _get_attn_modules(model: CAUSAL_GPT_TYPES) -> List[torch.nn.Module]:
+ """Helper that gets a list of the model's attention modules.
+
+ Each module has a `bias` buffer used for causal masking. The Prefix LM
+ conversion adds logic to dynamically manipulate these biases to support
+ Prefix LM attention masking.
+ """
+ attn_modules = []
+ if isinstance(model, GPTNeoXForCausalLM):
+ blocks = model.gpt_neox.layers
+ else:
+ blocks = model.transformer.h
+ for block in blocks:
+ if isinstance(model, GPTNeoForCausalLM):
+ if block.attn.attention_type != "global":
+ continue
+ attn_module = block.attn.attention
+ elif isinstance(model, GPTNeoXForCausalLM):
+ attn_module = block.attention
+ else:
+ attn_module = block.attn
+ attn_modules.append(attn_module)
+ return attn_modules
+
+ setattr(model, "_original_forward", getattr(model, "forward"))
+ setattr(model, "_original_generate", getattr(model, "generate"))
+
+ def forward(
+ self: CAUSAL_GPT_TYPES,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ bidirectional_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ """Wraps original forward to enable PrefixLM attention."""
+
+ def call_og_forward():
+ if isinstance(self, GPTNeoXForCausalLM):
+ return self._original_forward(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ labels=labels,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ else:
+ return self._original_forward(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ labels=labels,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if bidirectional_mask is None:
+ return call_og_forward()
+ assert isinstance(bidirectional_mask, torch.Tensor)
+ attn_modules = _get_attn_modules(model)
+ (b, s) = bidirectional_mask.shape
+ max_length = attn_modules[0].bias.shape[-1]
+ if s > max_length:
+ raise ValueError(
+ f"bidirectional_mask sequence length (={s}) exceeds the "
+ + f"max length allowed by the model ({max_length})."
+ )
+ assert s <= max_length
+ if s < max_length:
+ pad = torch.zeros(
+ (int(b), int(max_length - s)),
+ dtype=bidirectional_mask.dtype,
+ device=bidirectional_mask.device,
+ )
+ bidirectional_mask = torch.cat([bidirectional_mask, pad], dim=1)
+ bidirectional = bidirectional_mask.unsqueeze(1).unsqueeze(1)
+ for attn_module in attn_modules:
+ attn_module.bias.data = torch.logical_or(
+ attn_module.bias.data, bidirectional
+ )
+ output = call_og_forward()
+ for attn_module in attn_modules:
+ attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
+ return output
+
+ def generate(self: CAUSAL_GPT_TYPES, *args: tuple, **kwargs: Dict[str, Any]):
+ """Wraps original generate to enable PrefixLM attention."""
+ attn_modules = _get_attn_modules(model)
+ for attn_module in attn_modules:
+ attn_module.bias.data[:] = 1
+ output = self._original_generate(*args, **kwargs)
+ for attn_module in attn_modules:
+ attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
+ return output
+
+ setattr(model, "forward", MethodType(forward, model))
+ setattr(model, "generate", MethodType(generate, model))
+ setattr(model, "_prefix_lm_converted", True)
+ return model
+
+
+def _convert_bloom_causal_lm_to_prefix_lm(model: BloomForCausalLM) -> BloomForCausalLM:
+ """Converts a BLOOM Causal LM to a Prefix LM.
+
+ Supported HuggingFace model classes:
+ - `BloomForCausalLM`
+
+ See `convert_hf_causal_lm_to_prefix_lm` for more details.
+ """
+ if hasattr(model, "_prefix_lm_converted"):
+ return model
+ assert isinstance(model, BloomForCausalLM)
+ assert (
+ model.config.add_cross_attention == False
+ ), "Only supports BLOOM decoder-only models"
+
+ def _prepare_attn_mask(
+ self: BloomModel,
+ attention_mask: torch.Tensor,
+ bidirectional_mask: Optional[torch.Tensor],
+ input_shape: Tuple[int, int],
+ past_key_values_length: int,
+ ) -> torch.BoolTensor:
+ combined_attention_mask = None
+ device = attention_mask.device
+ (_, src_length) = input_shape
+ if src_length > 1:
+ combined_attention_mask = _make_causal_mask_bloom(
+ input_shape,
+ device=device,
+ past_key_values_length=past_key_values_length,
+ )
+ if bidirectional_mask is not None:
+ assert attention_mask.shape == bidirectional_mask.shape
+ expanded_bidirectional_mask = _expand_mask_bloom(
+ bidirectional_mask, tgt_length=src_length
+ )
+ combined_attention_mask = torch.logical_and(
+ combined_attention_mask, expanded_bidirectional_mask
+ )
+ expanded_attn_mask = _expand_mask_bloom(attention_mask, tgt_length=src_length)
+ combined_attention_mask = (
+ expanded_attn_mask
+ if combined_attention_mask is None
+ else expanded_attn_mask | combined_attention_mask
+ )
+ return combined_attention_mask
+
+ def _build_alibi_tensor(
+ self: BloomModel,
+ batch_size: int,
+ query_length: int,
+ key_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ ) -> torch.Tensor:
+ num_heads = self.config.n_head
+ closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
+ base = torch.tensor(
+ 2 ** (-(2 ** (-(math.log2(closest_power_of_2) - 3)))),
+ device=device,
+ dtype=torch.float32,
+ )
+ powers = torch.arange(
+ 1, 1 + closest_power_of_2, device=device, dtype=torch.int32
+ )
+ slopes = torch.pow(base, powers)
+ if closest_power_of_2 != num_heads:
+ extra_base = torch.tensor(
+ 2 ** (-(2 ** (-(math.log2(2 * closest_power_of_2) - 3)))),
+ device=device,
+ dtype=torch.float32,
+ )
+ num_remaining_heads = min(
+ closest_power_of_2, num_heads - closest_power_of_2
+ )
+ extra_powers = torch.arange(
+ 1, 1 + 2 * num_remaining_heads, 2, device=device, dtype=torch.int32
+ )
+ slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
+ qa = torch.arange(query_length, device=device, dtype=torch.int32).view(-1, 1)
+ ka = torch.arange(key_length, device=device, dtype=torch.int32).view(1, -1)
+ diffs = qa - ka + key_length - query_length
+ diffs = -diffs.abs()
+ alibi = slopes.view(1, num_heads, 1, 1) * diffs.view(
+ 1, 1, query_length, key_length
+ )
+ alibi = alibi.expand(batch_size, -1, -1, -1).reshape(
+ -1, query_length, key_length
+ )
+ return alibi.to(dtype)
+
+ KeyValueT = Tuple[torch.Tensor, torch.Tensor]
+
+ def forward(
+ self: BloomModel,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[KeyValueT, ...]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ bidirectional_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **deprecated_arguments,
+ ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
+ if deprecated_arguments.pop("position_ids", False) is not False:
+ warnings.warn(
+ "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. "
+ + "You can safely ignore passing `position_ids`.",
+ FutureWarning,
+ )
+ if len(deprecated_arguments) > 0:
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
+ output_attentions = (
+ output_attentions
+ if output_attentions is not None
+ else self.config.output_attentions
+ )
+ output_hidden_states = (
+ output_hidden_states
+ if output_hidden_states is not None
+ else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = (
+ return_dict if return_dict is not None else self.config.use_return_dict
+ )
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError(
+ "You cannot specify both input_ids and inputs_embeds at the same time"
+ )
+ elif input_ids is not None:
+ (batch_size, seq_length) = input_ids.shape
+ elif inputs_embeds is not None:
+ (batch_size, seq_length, _) = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+ if past_key_values is None:
+ past_key_values = tuple([None] * len(self.h))
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ hidden_states = self.word_embeddings_layernorm(inputs_embeds)
+ presents = () if use_cache else None
+ all_self_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+ seq_length_with_past = seq_length
+ past_key_values_length = 0
+ if past_key_values[0] is not None:
+ tmp = past_key_values[0][0]
+ past_key_values_length = tmp.shape[2]
+ seq_length_with_past = seq_length_with_past + past_key_values_length
+ if attention_mask is None:
+ attention_mask = torch.ones(
+ (batch_size, seq_length_with_past), device=hidden_states.device
+ )
+ else:
+ attention_mask = attention_mask.to(hidden_states.device)
+ alibi = self._build_alibi_tensor(
+ batch_size=batch_size,
+ query_length=seq_length,
+ key_length=seq_length_with_past,
+ dtype=hidden_states.dtype,
+ device=hidden_states.device,
+ )
+ causal_mask = self._prepare_attn_mask(
+ attention_mask,
+ bidirectional_mask,
+ input_shape=(batch_size, seq_length),
+ past_key_values_length=past_key_values_length,
+ )
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
+ if output_hidden_states:
+ hst = (hidden_states,)
+ all_hidden_states = all_hidden_states + hst
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(
+ *inputs,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ return custom_forward
+
+ outputs = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(block),
+ hidden_states,
+ alibi,
+ causal_mask,
+ head_mask[i],
+ )
+ else:
+ outputs = block(
+ hidden_states,
+ layer_past=layer_past,
+ attention_mask=causal_mask,
+ head_mask=head_mask[i],
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ alibi=alibi,
+ )
+ hidden_states = outputs[0]
+ if use_cache is True:
+ presents = presents + (outputs[1],)
+ if output_attentions:
+ oa = (outputs[2 if use_cache else 1],)
+ all_self_attentions = all_self_attentions + oa
+ hidden_states = self.ln_f(hidden_states)
+ if output_hidden_states:
+ hst = (hidden_states,)
+ all_hidden_states = all_hidden_states + hst
+ if not return_dict:
+ return tuple(
+ (
+ v
+ for v in [
+ hidden_states,
+ presents,
+ all_hidden_states,
+ all_self_attentions,
+ ]
+ if v is not None
+ )
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+ setattr(
+ model.transformer,
+ "_prepare_attn_mask",
+ MethodType(_prepare_attn_mask, model.transformer),
+ )
+ setattr(
+ model.transformer,
+ "_build_alibi_tensor",
+ MethodType(_build_alibi_tensor, model.transformer),
+ )
+ setattr(model.transformer, "forward", MethodType(forward, model.transformer))
+ KeyValueT = Tuple[torch.Tensor, torch.Tensor]
+
+ def forward(
+ self: BloomForCausalLM,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[KeyValueT, ...]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ bidirectional_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **deprecated_arguments,
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
+ """Replacement forward method for BloomCausalLM."""
+ if deprecated_arguments.pop("position_ids", False) is not False:
+ warnings.warn(
+ "`position_ids` have no functionality in BLOOM and will be removed "
+ + "in v5.0.0. You can safely ignore passing `position_ids`.",
+ FutureWarning,
+ )
+ if len(deprecated_arguments) > 0:
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
+ return_dict = (
+ return_dict if return_dict is not None else self.config.use_return_dict
+ )
+ transformer_outputs = self.transformer(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ bidirectional_mask=bidirectional_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ lm_logits = self.lm_head(hidden_states)
+ loss = None
+ if labels is not None:
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ (batch_size, seq_length, vocab_size) = shift_logits.shape
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(
+ shift_logits.view(batch_size * seq_length, vocab_size),
+ shift_labels.view(batch_size * seq_length),
+ )
+ if not return_dict:
+ output = (lm_logits,) + transformer_outputs[1:]
+ return (loss,) + output if loss is not None else output
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self: BloomForCausalLM,
+ input_ids: torch.LongTensor,
+ past: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> dict:
+ if past:
+ input_ids = input_ids[:, -1].unsqueeze(-1)
+ bidirectional_mask = None
+ if past[0][0].shape[0] == input_ids.shape[0]:
+ past = self._convert_to_bloom_cache(past)
+ else:
+ bidirectional_mask = torch.ones_like(input_ids)
+ return {
+ "input_ids": input_ids,
+ "past_key_values": past,
+ "use_cache": True,
+ "attention_mask": attention_mask,
+ "bidirectional_mask": bidirectional_mask,
+ }
+
+ setattr(model, "forward", MethodType(forward, model))
+ setattr(
+ model,
+ "prepare_inputs_for_generation",
+ MethodType(prepare_inputs_for_generation, model),
+ )
+ setattr(model, "_prefix_lm_converted", True)
+ return model
+
+
+def _convert_opt_causal_lm_to_prefix_lm(model: OPTForCausalLM) -> OPTForCausalLM:
+ """Converts an OPT Causal LM to a Prefix LM.
+
+ Supported HuggingFace model classes:
+ - `OPTForCausalLM`
+
+ See `convert_hf_causal_lm_to_prefix_lm` for more details.
+ """
+ if hasattr(model, "_prefix_lm_converted"):
+ return model
+ assert isinstance(model, OPTForCausalLM)
+ assert (
+ model.config.add_cross_attention == False
+ ), "Only supports OPT decoder-only models"
+ setattr(model, "_original_forward", getattr(model, "forward"))
+ setattr(model, "_original_generate", getattr(model, "generate"))
+ model.model.decoder.bidirectional_mask = None
+
+ def _prepare_decoder_attention_mask(
+ self, attention_mask, input_shape, inputs_embeds, past_key_values_length
+ ):
+ combined_attention_mask = None
+ if input_shape[-1] > 1:
+ if self.bidirectional_mask == "g":
+ (bsz, src_length) = input_shape
+ combined_attention_mask = torch.zeros(
+ (bsz, 1, src_length, src_length + past_key_values_length),
+ dtype=inputs_embeds.dtype,
+ device=inputs_embeds.device,
+ )
+ else:
+ combined_attention_mask = _make_causal_mask_opt(
+ input_shape,
+ inputs_embeds.dtype,
+ past_key_values_length=past_key_values_length,
+ ).to(inputs_embeds.device)
+ if self.bidirectional_mask is not None:
+ assert attention_mask.shape == self.bidirectional_mask.shape
+ expanded_bidirectional_mask = _expand_mask_opt(
+ self.bidirectional_mask,
+ inputs_embeds.dtype,
+ tgt_len=input_shape[-1],
+ ).to(inputs_embeds.device)
+ combined_attention_mask = torch.maximum(
+ expanded_bidirectional_mask, combined_attention_mask
+ )
+ if attention_mask is not None:
+ expanded_attn_mask = _expand_mask_opt(
+ attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ ).to(inputs_embeds.device)
+ combined_attention_mask = (
+ expanded_attn_mask
+ if combined_attention_mask is None
+ else expanded_attn_mask + combined_attention_mask
+ )
+ return combined_attention_mask
+
+ setattr(
+ model.model.decoder,
+ "_prepare_decoder_attention_mask",
+ MethodType(_prepare_decoder_attention_mask, model.model.decoder),
+ )
+
+ def forward(
+ self: OPTForCausalLM,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ bidirectional_mask: Optional[torch.ByteTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ):
+ def call_og_forward():
+ return self._original_forward(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ labels=labels,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if bidirectional_mask is None:
+ return call_og_forward()
+ self.model.decoder.bidirectional_mask = bidirectional_mask
+ try:
+ outputs = call_og_forward()
+ except:
+ self.model.decoder.bidirectional_mask = None
+ raise
+ self.model.decoder.bidirectional_mask = None
+ return outputs
+
+ def generate(self: OPTForCausalLM, *args: tuple, **kwargs: Dict[str, Any]):
+ """Wraps original generate to enable PrefixLM-style attention."""
+ self.model.decoder.bidirectional_mask = "g"
+ try:
+ output = self._original_generate(*args, **kwargs)
+ except:
+ self.model.decoder.bidirectional_mask = None
+ raise
+ self.model.decoder.bidirectional_mask = None
+ return output
+
+ setattr(model, "forward", MethodType(forward, model))
+ setattr(model, "generate", MethodType(generate, model))
+ setattr(model, "_prefix_lm_converted", True)
+ return model
+
+
+_SUPPORTED_HF_MODELS = _SUPPORTED_GPT_MODELS + (BloomForCausalLM, OPTForCausalLM)
+CAUSAL_LM_TYPES = Union[
+ GPT2LMHeadModel,
+ GPTJForCausalLM,
+ GPTNeoForCausalLM,
+ GPTNeoXForCausalLM,
+ BloomForCausalLM,
+ OPTForCausalLM,
+]
+
+
+def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:
+ """Converts a HuggingFace Causal LM to a Prefix LM.
+
+ Supported HuggingFace model classes:
+ - `GPT2LMHeadModel`
+ - `GPTNeoForCausalLM`
+ - `GPTNeoXForCausalLM`
+ - `GPTJForCausalLM`
+ - `BloomForCausalLM`
+ - `OPTForCausalLM`
+
+ Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the
+ `generate` method and/or select underlying methods depending on the model class.
+
+ These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask".
+
+ Notes on training:
+ To actually train the converted model as a Prefix LM, training batches will need to indicate
+ the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.
+
+ **This is not a standard input and requires custom layers either within or after your dataloader.**
+
+ In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`
+ such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.
+ That is, the prefix portion of the sequence should not generate any loss. Loss should only be
+ generated by the target portion of the sequence.
+
+ Notes on `GPTNeoForCausalLM`:
+ To simplify the implementation, "global" and "local" attention layers are handled differently.
+ For "global" layers, we handle conversion as described above. For "local" layers, which use a
+ causal attention mask within a restricted local window, we do not alter the masking.
+
+ Notes on `forward` method conversion:
+ After conversion, the `forward` method will handle a new input, `bidirectional_mask`,
+ which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions
+ belonging to the prefix (prefix tokens can attend to one another bidirectionally), and
+ 0 indicates token positions belonging to the target.
+
+ The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing
+ causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset
+ the causal masks before returning the result.
+
+ Notes on `generate` method conversion:
+ After conversion, the `generate` method will have the same signature but will internally
+ convert all causal masks to be purely bidirectional, call the original `generate` method, and
+ (where appropriate) reset the causal masks before returning the result.
+
+ This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token
+ "prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates
+ each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one
+ another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and
+ previously-generated tokens (also as expected in a Prefix LM).
+
+ To preserve the API, the original methods are renamed to `_original_forward` and
+ `_original_generate`, and replaced with new `forward` and `generate` methods that wrap
+ them, respectively. Although implementation details vary by model class.
+ """
+ if isinstance(model, _SUPPORTED_GPT_MODELS):
+ return _convert_gpt_causal_lm_to_prefix_lm(model)
+ elif isinstance(model, BloomForCausalLM):
+ return _convert_bloom_causal_lm_to_prefix_lm(model)
+ elif isinstance(model, OPTForCausalLM):
+ return _convert_opt_causal_lm_to_prefix_lm(model)
+ else:
+ raise TypeError(
+ f"Cannot convert model to Prefix LM. "
+ + f"Model does not belong to set of supported HF models:"
+ + f"\n{_SUPPORTED_HF_MODELS}"
+ )
+
+
+def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):
+ """Attempts to add bidirectional_mask to batch if missing.
+
+ Raises:
+ KeyError if bidirectional_mask is missing and can't be inferred
+ """
+ if "bidirectional_mask" not in batch:
+ if batch.get("mode", None) == "icl_task":
+ batch["bidirectional_mask"] = batch["attention_mask"].clone()
+ for i, continuation_indices in enumerate(batch["continuation_indices"]):
+ batch["bidirectional_mask"][i, continuation_indices] = 0
+ elif "labels" in batch and "attention_mask" in batch:
+ batch["bidirectional_mask"] = torch.logical_and(
+ torch.eq(batch["attention_mask"], 1), torch.eq(batch["labels"], -100)
+ ).type_as(batch["attention_mask"])
+ else:
+ raise KeyError(
+ "No bidirectional_mask in batch and not sure how to construct one."
+ )
diff --git a/model/llava/model/language_model/mpt/meta_init_context.py b/model/llava/model/language_model/mpt/meta_init_context.py
new file mode 100644
index 0000000000000000000000000000000000000000..208ab255cedb65e5c444b1c5fa5abf72cbdb1512
--- /dev/null
+++ b/model/llava/model/language_model/mpt/meta_init_context.py
@@ -0,0 +1,111 @@
+from contextlib import contextmanager
+
+import torch
+import torch.nn as nn
+
+
+@contextmanager
+def init_empty_weights(include_buffers: bool = False):
+ """Meta initialization context manager.
+
+ A context manager under which models are initialized with all parameters
+ on the meta device, therefore creating an empty model. Useful when just
+ initializing the model would blow the available RAM.
+
+ Args:
+ include_buffers (`bool`, *optional*, defaults to `False`): Whether or
+ not to also put all buffers on the meta device while initializing.
+
+ Example:
+ ```python
+ import torch.nn as nn
+
+ # Initialize a model with 100 billions parameters in no time and without using any RAM.
+ with init_empty_weights():
+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
+ ```
+
+
+
+ Any model created under this context manager has no weights. As such you can't do something like
+ `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
+
+
+ """
+ with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
+ yield f
+
+
+@contextmanager
+def init_on_device(device: torch.device, include_buffers: bool = False):
+ """Device initialization context manager.
+
+ A context manager under which models are initialized with all parameters
+ on the specified device.
+
+ Args:
+ device (`torch.device`): Device to initialize all parameters on.
+ include_buffers (`bool`, *optional*, defaults to `False`): Whether or
+ not to also put all buffers on the meta device while initializing.
+
+ Example:
+ ```python
+ import torch.nn as nn
+
+ with init_on_device(device=torch.device("cuda")):
+ tst = nn.Liner(100, 100) # on `cuda` device
+ ```
+ """
+ old_register_parameter = nn.Module.register_parameter
+ if include_buffers:
+ old_register_buffer = nn.Module.register_buffer
+
+ def register_empty_parameter(module, name, param):
+ old_register_parameter(module, name, param)
+ if param is not None:
+ param_cls = type(module._parameters[name])
+ kwargs = module._parameters[name].__dict__
+ module._parameters[name] = param_cls(
+ module._parameters[name].to(device), **kwargs
+ )
+
+ def register_empty_buffer(module, name, buffer):
+ old_register_buffer(module, name, buffer)
+ if buffer is not None:
+ module._buffers[name] = module._buffers[name].to(device)
+
+ if include_buffers:
+ tensor_constructors_to_patch = {
+ torch_function_name: getattr(torch, torch_function_name)
+ for torch_function_name in ["empty", "zeros", "ones", "full"]
+ }
+ else:
+ tensor_constructors_to_patch = {}
+
+ def patch_tensor_constructor(fn):
+ def wrapper(*args, **kwargs):
+ kwargs["device"] = device
+ return fn(*args, **kwargs)
+
+ return wrapper
+
+ try:
+ nn.Module.register_parameter = register_empty_parameter
+ if include_buffers:
+ nn.Module.register_buffer = register_empty_buffer
+ for torch_function_name in tensor_constructors_to_patch.keys():
+ setattr(
+ torch,
+ torch_function_name,
+ patch_tensor_constructor(getattr(torch, torch_function_name)),
+ )
+ yield
+ finally:
+ nn.Module.register_parameter = old_register_parameter
+ if include_buffers:
+ nn.Module.register_buffer = old_register_buffer
+ for (
+ torch_function_name,
+ old_torch_function,
+ ) in tensor_constructors_to_patch.items():
+ setattr(torch, torch_function_name, old_torch_function)
diff --git a/model/llava/model/language_model/mpt/modeling_mpt.py b/model/llava/model/language_model/mpt/modeling_mpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..98ae82229180862e6b6c648baecc603e1a0381e3
--- /dev/null
+++ b/model/llava/model/language_model/mpt/modeling_mpt.py
@@ -0,0 +1,538 @@
+"""A simple, flexible implementation of a GPT model.
+
+Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
+"""
+import math
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from transformers import (PreTrainedModel, PreTrainedTokenizer,
+ PreTrainedTokenizerFast)
+from transformers.modeling_outputs import (BaseModelOutputWithPast,
+ CausalLMOutputWithPast)
+
+from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
+from .attention import attn_bias_shape, build_attn_bias
+from .blocks import MPTBlock
+from .configuration_mpt import MPTConfig
+from .custom_embedding import SharedEmbedding
+from .hf_prefixlm_converter import (add_bidirectional_mask_if_missing,
+ convert_hf_causal_lm_to_prefix_lm)
+from .meta_init_context import init_empty_weights
+from .norm import NORM_CLASS_REGISTRY
+from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
+
+try:
+ from .flash_attn_triton import flash_attn_func
+except:
+ pass
+Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
+
+
+class MPTPreTrainedModel(PreTrainedModel):
+ config_class = MPTConfig
+ base_model_prefix = "model"
+ _no_split_modules = ["MPTBlock"]
+
+
+class MPTModel(MPTPreTrainedModel):
+ def __init__(self, config: MPTConfig):
+ config._validate_config()
+ super().__init__(config)
+ self.attn_impl = config.attn_config["attn_impl"]
+ self.prefix_lm = config.attn_config["prefix_lm"]
+ self.attn_uses_sequence_id = config.attn_config["attn_uses_sequence_id"]
+ self.alibi = config.attn_config["alibi"]
+ self.alibi_bias_max = config.attn_config["alibi_bias_max"]
+ if config.init_device == "mixed":
+ if dist.get_local_rank() == 0:
+ config.init_device = "cpu"
+ else:
+ config.init_device = "meta"
+ if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
+ norm_options = " | ".join(NORM_CLASS_REGISTRY.keys())
+ raise NotImplementedError(
+ f"Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options})."
+ )
+ norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
+ self.embedding_fraction = config.embedding_fraction
+ self.wte = SharedEmbedding(
+ config.vocab_size, config.d_model, device=config.init_device
+ )
+ if not self.alibi:
+ self.wpe = torch.nn.Embedding(
+ config.max_seq_len, config.d_model, device=config.init_device
+ )
+ self.emb_drop = nn.Dropout(config.emb_pdrop)
+ self.blocks = nn.ModuleList(
+ [
+ MPTBlock(device=config.init_device, **config.to_dict())
+ for _ in range(config.n_layers)
+ ]
+ )
+ self.norm_f = norm_class(config.d_model, device=config.init_device)
+ if config.init_device != "meta":
+ print(
+ f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.'
+ )
+ self.apply(self.param_init_fn)
+ self.is_causal = not self.prefix_lm
+ self._attn_bias_initialized = False
+ self.attn_bias = None
+ self.attn_bias_shape = attn_bias_shape(
+ self.attn_impl,
+ config.n_heads,
+ config.max_seq_len,
+ self.alibi,
+ prefix_lm=self.prefix_lm,
+ causal=self.is_causal,
+ use_sequence_id=self.attn_uses_sequence_id,
+ )
+ if config.no_bias:
+ for module in self.modules():
+ if hasattr(module, "bias") and isinstance(module.bias, nn.Parameter):
+ if config.verbose:
+ warnings.warn(f"Removing bias ({module.bias}) from {module}.")
+ module.register_parameter("bias", None)
+ if config.verbose and config.verbose > 2:
+ print(self)
+ if "verbose" not in self.config.init_config:
+ self.config.init_config["verbose"] = self.config.verbose
+ if self.config.init_config["verbose"] > 1:
+ init_fn_name = self.config.init_config["name"]
+ warnings.warn(f"Using {init_fn_name} initialization.")
+ self.gradient_checkpointing = False
+
+ def get_input_embeddings(self):
+ return self.wte
+
+ def set_input_embeddings(self, value):
+ self.wte = value
+
+ @torch.no_grad()
+ def _attn_bias(
+ self,
+ device,
+ dtype,
+ attention_mask: Optional[torch.ByteTensor] = None,
+ prefix_mask: Optional[torch.ByteTensor] = None,
+ sequence_id: Optional[torch.LongTensor] = None,
+ ):
+ if not self._attn_bias_initialized:
+ if self.attn_bias_shape:
+ self.attn_bias = torch.zeros(
+ self.attn_bias_shape, device=device, dtype=dtype
+ )
+ self.attn_bias = build_attn_bias(
+ self.attn_impl,
+ self.attn_bias,
+ self.config.n_heads,
+ self.config.max_seq_len,
+ causal=self.is_causal,
+ alibi=self.alibi,
+ alibi_bias_max=self.alibi_bias_max,
+ )
+ self._attn_bias_initialized = True
+ if self.attn_impl == "flash":
+ return (self.attn_bias, attention_mask)
+ if self.attn_bias is not None:
+ self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
+ attn_bias = self.attn_bias
+ if self.prefix_lm:
+ assert isinstance(attn_bias, torch.Tensor)
+ assert isinstance(prefix_mask, torch.Tensor)
+ attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
+ if self.attn_uses_sequence_id and sequence_id is not None:
+ assert isinstance(attn_bias, torch.Tensor)
+ attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
+ if attention_mask is not None:
+ s_k = attention_mask.shape[-1]
+ if attn_bias is None:
+ attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
+ else:
+ _s_k = max(0, attn_bias.size(-1) - s_k)
+ attn_bias = attn_bias[:, :, :, _s_k:]
+ if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
+ raise ValueError(
+ f"attention_mask shape={attention_mask.shape} "
+ + f"and prefix_mask shape={prefix_mask.shape} are not equal."
+ )
+ min_val = torch.finfo(attn_bias.dtype).min
+ attn_bias = attn_bias.masked_fill(
+ ~attention_mask.view(-1, 1, 1, s_k), min_val
+ )
+ return (attn_bias, None)
+
+ def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
+ (s_k, s_q) = attn_bias.shape[-2:]
+ if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
+ raise ValueError(
+ "attn_bias does not match the expected shape. "
+ + f"The last two dimensions should both be {self.config.max_length} "
+ + f"but are {s_k} and {s_q}."
+ )
+ seq_len = prefix_mask.shape[-1]
+ if seq_len > self.config.max_seq_len:
+ raise ValueError(
+ f"prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}"
+ )
+ attn_bias = attn_bias[..., :seq_len, :seq_len]
+ causal = torch.tril(
+ torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)
+ ).view(1, 1, seq_len, seq_len)
+ prefix = prefix_mask.view(-1, 1, 1, seq_len)
+ cannot_attend = ~torch.logical_or(causal, prefix.bool())
+ min_val = torch.finfo(attn_bias.dtype).min
+ attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
+ return attn_bias
+
+ def _apply_sequence_id(
+ self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor
+ ):
+ seq_len = sequence_id.shape[-1]
+ if seq_len > self.config.max_seq_len:
+ raise ValueError(
+ f"sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}"
+ )
+ attn_bias = attn_bias[..., :seq_len, :seq_len]
+ cannot_attend = torch.logical_not(
+ torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))
+ ).unsqueeze(1)
+ min_val = torch.finfo(attn_bias.dtype).min
+ attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
+ return attn_bias
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
+ attention_mask: Optional[torch.ByteTensor] = None,
+ prefix_mask: Optional[torch.ByteTensor] = None,
+ sequence_id: Optional[torch.LongTensor] = None,
+ return_dict: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ ):
+ return_dict = (
+ return_dict if return_dict is not None else self.config.return_dict
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ if attention_mask is not None:
+ attention_mask = attention_mask.bool()
+ if prefix_mask is not None:
+ prefix_mask = prefix_mask.bool()
+ if not return_dict:
+ raise NotImplementedError(
+ "return_dict False is not implemented yet for MPT"
+ )
+ if output_attentions:
+ if self.attn_impl != "torch":
+ raise NotImplementedError(
+ "output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`."
+ )
+ if (
+ attention_mask is not None
+ and attention_mask[:, 0].sum() != attention_mask.shape[0]
+ and self.training
+ ):
+ raise NotImplementedError(
+ "MPT does not support training with left padding."
+ )
+ if self.prefix_lm and prefix_mask is None:
+ raise ValueError(
+ "prefix_mask is a required argument when MPT is configured with prefix_lm=True."
+ )
+ if self.training:
+ if self.attn_uses_sequence_id and sequence_id is None:
+ raise ValueError(
+ "sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True "
+ + "and the model is in train mode."
+ )
+ elif self.attn_uses_sequence_id is False and sequence_id is not None:
+ warnings.warn(
+ "MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. "
+ + "This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True."
+ )
+ if input_ids is not None:
+ S = input_ids.size(1)
+ assert (
+ S <= self.config.max_seq_len
+ ), f"Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}"
+ tok_emb = self.wte(input_ids)
+ else:
+ assert inputs_embeds is not None
+ assert (
+ self.alibi
+ ), "inputs_embeds is not implemented for MPT unless for alibi."
+ S = inputs_embeds.size(1)
+ tok_emb = inputs_embeds
+ if self.alibi:
+ x = tok_emb
+ else:
+ past_position = 0
+ if past_key_values is not None:
+ if len(past_key_values) != self.config.n_layers:
+ raise ValueError(
+ f"past_key_values must provide a past_key_value for each attention "
+ + f"layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r})."
+ )
+ past_position = past_key_values[0][0].size(1)
+ if self.attn_impl == "torch":
+ past_position = past_key_values[0][0].size(3)
+ if S + past_position > self.config.max_seq_len:
+ raise ValueError(
+ f"Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}."
+ )
+ pos = torch.arange(
+ past_position,
+ S + past_position,
+ dtype=torch.long,
+ device=input_ids.device,
+ ).unsqueeze(0)
+ if attention_mask is not None:
+ pos = torch.clamp(
+ pos
+ - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[
+ :, past_position:
+ ],
+ min=0,
+ )
+ pos_emb = self.wpe(pos)
+ x = tok_emb + pos_emb
+ if self.embedding_fraction == 1:
+ x = self.emb_drop(x)
+ else:
+ x_shrunk = x * self.embedding_fraction + x.detach() * (
+ 1 - self.embedding_fraction
+ )
+ assert isinstance(self.emb_drop, nn.Module)
+ x = self.emb_drop(x_shrunk)
+ (attn_bias, attention_mask) = self._attn_bias(
+ device=x.device,
+ dtype=torch.float32,
+ attention_mask=attention_mask,
+ prefix_mask=prefix_mask,
+ sequence_id=sequence_id,
+ )
+ if use_cache and past_key_values is None:
+ past_key_values = [() for _ in range(self.config.n_layers)]
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ for b_idx, block in enumerate(self.blocks):
+ if output_hidden_states:
+ assert all_hidden_states is not None
+ all_hidden_states = all_hidden_states + (x,)
+ past_key_value = (
+ past_key_values[b_idx] if past_key_values is not None else None
+ )
+ if self.gradient_checkpointing and self.training:
+ (x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(
+ block, x, past_key_value, attn_bias, attention_mask, self.is_causal
+ )
+ else:
+ (x, attn_weights, past_key_value) = block(
+ x,
+ past_key_value=past_key_value,
+ attn_bias=attn_bias,
+ attention_mask=attention_mask,
+ is_causal=self.is_causal,
+ )
+ if past_key_values is not None:
+ past_key_values[b_idx] = past_key_value
+ if output_attentions:
+ assert all_self_attns is not None
+ all_self_attns = all_self_attns + (attn_weights,)
+ x = self.norm_f(x)
+ if output_hidden_states:
+ assert all_hidden_states is not None
+ all_hidden_states = all_hidden_states + (x,)
+ return BaseModelOutputWithPast(
+ last_hidden_state=x,
+ past_key_values=past_key_values,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+ def param_init_fn(self, module):
+ init_fn_name = self.config.init_config["name"]
+ MODEL_INIT_REGISTRY[init_fn_name](
+ module=module,
+ n_layers=self.config.n_layers,
+ d_model=self.config.d_model,
+ **self.config.init_config,
+ )
+
+ def fsdp_wrap_fn(self, module):
+ return isinstance(module, MPTBlock)
+
+ def activation_checkpointing_fn(self, module):
+ return isinstance(module, MPTBlock)
+
+
+class MPTForCausalLM(MPTPreTrainedModel):
+ def __init__(self, config: MPTConfig):
+ super().__init__(config)
+ if not config.tie_word_embeddings:
+ raise ValueError("MPTForCausalLM only supports tied word embeddings")
+ print(f"Instantiating an MPTForCausalLM model from {__file__}")
+ self.transformer = MPTModel(config)
+ for child in self.transformer.children():
+ if isinstance(child, torch.nn.ModuleList):
+ continue
+ if isinstance(child, torch.nn.Module):
+ child._fsdp_wrap = True
+ self.logit_scale = None
+ if config.logit_scale is not None:
+ logit_scale = config.logit_scale
+ if isinstance(logit_scale, str):
+ if logit_scale == "inv_sqrt_d_model":
+ logit_scale = 1 / math.sqrt(config.d_model)
+ else:
+ raise ValueError(
+ f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'."
+ )
+ self.logit_scale = logit_scale
+
+ def get_input_embeddings(self):
+ return self.transformer.wte
+
+ def set_input_embeddings(self, value):
+ self.transformer.wte = value
+
+ def get_output_embeddings(self):
+ return self.transformer.wte
+
+ def set_output_embeddings(self, new_embeddings):
+ self.transformer.wte = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.transformer = decoder
+
+ def get_decoder(self):
+ return self.transformer
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None,
+ attention_mask: Optional[torch.ByteTensor] = None,
+ prefix_mask: Optional[torch.ByteTensor] = None,
+ sequence_id: Optional[torch.LongTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ return_dict: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ ):
+ return_dict = (
+ return_dict if return_dict is not None else self.config.return_dict
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ if inputs_embeds is not None:
+ raise NotImplementedError(
+ "inputs_embeds has to be None (for hf/peft support)."
+ )
+ outputs = self.transformer(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ prefix_mask=prefix_mask,
+ sequence_id=sequence_id,
+ return_dict=return_dict,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ use_cache=use_cache,
+ )
+ logits = self.transformer.wte(
+ outputs.last_hidden_state.to(self.transformer.wte.weight.device), True
+ )
+ if self.logit_scale is not None:
+ if self.logit_scale == 0:
+ warnings.warn(
+ f"Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs."
+ )
+ logits *= self.logit_scale
+ loss = None
+ if labels is not None:
+ labels = torch.roll(labels, shifts=-1)
+ labels[:, -1] = -100
+ loss = F.cross_entropy(
+ logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1)
+ )
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def param_init_fn(self, module):
+ init_fn_name = self.config.init_config["name"]
+ MODEL_INIT_REGISTRY[init_fn_name](
+ module=module,
+ n_layers=self.config.n_layers,
+ d_model=self.config.d_model,
+ **self.config.init_config,
+ )
+
+ def fsdp_wrap_fn(self, module):
+ return isinstance(module, MPTBlock)
+
+ def activation_checkpointing_fn(self, module):
+ return isinstance(module, MPTBlock)
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
+ ):
+ if inputs_embeds is not None:
+ raise NotImplementedError("inputs_embeds is not implemented for MPT yet")
+ attention_mask = kwargs["attention_mask"].bool()
+ if attention_mask[:, -1].sum() != attention_mask.shape[0]:
+ raise NotImplementedError(
+ "MPT does not support generation with right padding."
+ )
+ if self.transformer.attn_uses_sequence_id and self.training:
+ sequence_id = torch.zeros_like(input_ids[:1])
+ else:
+ sequence_id = None
+ if past_key_values is not None:
+ input_ids = input_ids[:, -1].unsqueeze(-1)
+ if self.transformer.prefix_lm:
+ prefix_mask = torch.ones_like(attention_mask)
+ if kwargs.get("use_cache") == False:
+ raise NotImplementedError(
+ "MPT with prefix_lm=True does not support use_cache=False."
+ )
+ else:
+ prefix_mask = None
+ return {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "prefix_mask": prefix_mask,
+ "sequence_id": sequence_id,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache", True),
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ """Used by HuggingFace generate when using beam search with kv-caching.
+
+ See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133
+ for an example in transformers.
+ """
+ reordered_past = []
+ for layer_past in past_key_values:
+ reordered_past += [
+ tuple(
+ (past_state.index_select(0, beam_idx) for past_state in layer_past)
+ )
+ ]
+ return reordered_past
diff --git a/model/llava/model/language_model/mpt/norm.py b/model/llava/model/language_model/mpt/norm.py
new file mode 100644
index 0000000000000000000000000000000000000000..85291eadbbfc407ff43f88d699bf4853ca0ff2cf
--- /dev/null
+++ b/model/llava/model/language_model/mpt/norm.py
@@ -0,0 +1,106 @@
+import torch
+
+
+def _cast_if_autocast_enabled(tensor):
+ if torch.is_autocast_enabled():
+ if tensor.device.type == "cuda":
+ dtype = torch.get_autocast_gpu_dtype()
+ elif tensor.device.type == "cpu":
+ dtype = torch.get_autocast_cpu_dtype()
+ else:
+ raise NotImplementedError()
+ return tensor.to(dtype=dtype)
+ return tensor
+
+
+class LPLayerNorm(torch.nn.LayerNorm):
+ def __init__(
+ self,
+ normalized_shape,
+ eps=1e-05,
+ elementwise_affine=True,
+ device=None,
+ dtype=None,
+ ):
+ super().__init__(
+ normalized_shape=normalized_shape,
+ eps=eps,
+ elementwise_affine=elementwise_affine,
+ device=device,
+ dtype=dtype,
+ )
+
+ def forward(self, x):
+ module_device = x.device
+ downcast_x = _cast_if_autocast_enabled(x)
+ downcast_weight = (
+ _cast_if_autocast_enabled(self.weight)
+ if self.weight is not None
+ else self.weight
+ )
+ downcast_bias = (
+ _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
+ )
+ with torch.autocast(enabled=False, device_type=module_device.type):
+ return torch.nn.functional.layer_norm(
+ downcast_x,
+ self.normalized_shape,
+ downcast_weight,
+ downcast_bias,
+ self.eps,
+ )
+
+
+def rms_norm(x, weight=None, eps=1e-05):
+ output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps)
+ if weight is not None:
+ return output * weight
+ return output
+
+
+class RMSNorm(torch.nn.Module):
+ def __init__(
+ self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None
+ ):
+ super().__init__()
+ self.eps = eps
+ if weight:
+ self.weight = torch.nn.Parameter(
+ torch.ones(normalized_shape, dtype=dtype, device=device)
+ )
+ else:
+ self.register_parameter("weight", None)
+
+ def forward(self, x):
+ return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
+
+
+class LPRMSNorm(RMSNorm):
+ def __init__(
+ self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None
+ ):
+ super().__init__(
+ normalized_shape=normalized_shape,
+ eps=eps,
+ weight=weight,
+ dtype=dtype,
+ device=device,
+ )
+
+ def forward(self, x):
+ downcast_x = _cast_if_autocast_enabled(x)
+ downcast_weight = (
+ _cast_if_autocast_enabled(self.weight)
+ if self.weight is not None
+ else self.weight
+ )
+ with torch.autocast(enabled=False, device_type=x.device.type):
+ return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype)
+
+
+NORM_CLASS_REGISTRY = {
+ "layernorm": torch.nn.LayerNorm,
+ "low_precision_layernorm": LPLayerNorm,
+ "rmsnorm": RMSNorm,
+ "low_precision_rmsnorm": LPRMSNorm,
+}
diff --git a/model/llava/model/language_model/mpt/param_init_fns.py b/model/llava/model/language_model/mpt/param_init_fns.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c1d17a22a62e4411a537e2d7c0c96422e4a4174
--- /dev/null
+++ b/model/llava/model/language_model/mpt/param_init_fns.py
@@ -0,0 +1,419 @@
+import math
+import warnings
+from collections.abc import Sequence
+from functools import partial
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+
+from .norm import NORM_CLASS_REGISTRY
+
+
+def torch_default_param_init_fn_(module: nn.Module, verbose: int = 0, **kwargs):
+ del kwargs
+ if verbose > 1:
+ warnings.warn(f"Initializing network using module's reset_parameters attribute")
+ if hasattr(module, "reset_parameters"):
+ module.reset_parameters()
+
+
+def fused_init_helper_(module: nn.Module, init_fn_):
+ _fused = getattr(module, "_fused", None)
+ if _fused is None:
+ raise RuntimeError(f"Internal logic error")
+ (dim, splits) = _fused
+ splits = (0, *splits, module.weight.size(dim))
+ for s, e in zip(splits[:-1], splits[1:]):
+ slice_indices = [slice(None)] * module.weight.ndim
+ slice_indices[dim] = slice(s, e)
+ init_fn_(module.weight[slice_indices])
+
+
+def generic_param_init_fn_(
+ module: nn.Module,
+ init_fn_,
+ n_layers: int,
+ d_model: Optional[int] = None,
+ init_div_is_residual: Union[int, float, str, bool] = True,
+ emb_init_std: Optional[float] = None,
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
+ verbose: int = 0,
+ **kwargs,
+):
+ del kwargs
+ if verbose > 1:
+ warnings.warn(f"If model has bias parameters they are initialized to 0.")
+ init_div_is_residual = init_div_is_residual
+ if init_div_is_residual is False:
+ div_is_residual = 1.0
+ elif init_div_is_residual is True:
+ div_is_residual = math.sqrt(2 * n_layers)
+ elif isinstance(init_div_is_residual, float) or isinstance(
+ init_div_is_residual, int
+ ):
+ div_is_residual = init_div_is_residual
+ elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
+ div_is_residual = float(init_div_is_residual)
+ else:
+ div_is_residual = 1.0
+ raise ValueError(
+ f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}"
+ )
+ if init_div_is_residual is not False:
+ if verbose > 1:
+ warnings.warn(
+ f"Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. "
+ + f"Set `init_div_is_residual: false` in init config to disable this."
+ )
+ if isinstance(module, nn.Linear):
+ if hasattr(module, "_fused"):
+ fused_init_helper_(module, init_fn_)
+ else:
+ init_fn_(module.weight)
+ if module.bias is not None:
+ torch.nn.init.zeros_(module.bias)
+ if init_div_is_residual is not False and getattr(module, "_is_residual", False):
+ with torch.no_grad():
+ module.weight.div_(div_is_residual)
+ elif isinstance(module, nn.Embedding):
+ if emb_init_std is not None:
+ std = emb_init_std
+ if std == 0:
+ warnings.warn(f"Embedding layer initialized to 0.")
+ emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
+ if verbose > 1:
+ warnings.warn(
+ f"Embedding layer initialized using normal distribution with mean=0 and std={std!r}."
+ )
+ elif emb_init_uniform_lim is not None:
+ lim = emb_init_uniform_lim
+ if isinstance(lim, Sequence):
+ if len(lim) > 2:
+ raise ValueError(
+ f"Uniform init requires a min and a max limit. User input: {lim}."
+ )
+ if lim[0] == lim[1]:
+ warnings.warn(f"Embedding layer initialized to {lim[0]}.")
+ else:
+ if lim == 0:
+ warnings.warn(f"Embedding layer initialized to 0.")
+ lim = [-lim, lim]
+ (a, b) = lim
+ emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
+ if verbose > 1:
+ warnings.warn(
+ f"Embedding layer initialized using uniform distribution in range {lim}."
+ )
+ else:
+ emb_init_fn_ = init_fn_
+ emb_init_fn_(module.weight)
+ elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
+ if verbose > 1:
+ warnings.warn(
+ f"Norm weights are set to 1. If norm layer has a bias it is initialized to 0."
+ )
+ if hasattr(module, "weight") and module.weight is not None:
+ torch.nn.init.ones_(module.weight)
+ if hasattr(module, "bias") and module.bias is not None:
+ torch.nn.init.zeros_(module.bias)
+ elif isinstance(module, nn.MultiheadAttention):
+ if module._qkv_same_embed_dim:
+ assert module.in_proj_weight is not None
+ assert (
+ module.q_proj_weight is None
+ and module.k_proj_weight is None
+ and (module.v_proj_weight is None)
+ )
+ assert d_model is not None
+ _d = d_model
+ splits = (0, _d, 2 * _d, 3 * _d)
+ for s, e in zip(splits[:-1], splits[1:]):
+ init_fn_(module.in_proj_weight[s:e])
+ else:
+ assert (
+ module.q_proj_weight is not None
+ and module.k_proj_weight is not None
+ and (module.v_proj_weight is not None)
+ )
+ assert module.in_proj_weight is None
+ init_fn_(module.q_proj_weight)
+ init_fn_(module.k_proj_weight)
+ init_fn_(module.v_proj_weight)
+ if module.in_proj_bias is not None:
+ torch.nn.init.zeros_(module.in_proj_bias)
+ if module.bias_k is not None:
+ torch.nn.init.zeros_(module.bias_k)
+ if module.bias_v is not None:
+ torch.nn.init.zeros_(module.bias_v)
+ init_fn_(module.out_proj.weight)
+ if init_div_is_residual is not False and getattr(
+ module.out_proj, "_is_residual", False
+ ):
+ with torch.no_grad():
+ module.out_proj.weight.div_(div_is_residual)
+ if module.out_proj.bias is not None:
+ torch.nn.init.zeros_(module.out_proj.bias)
+ else:
+ for _ in module.parameters(recurse=False):
+ raise NotImplementedError(
+ f"{module.__class__.__name__} parameters are not initialized by param_init_fn."
+ )
+
+
+def _normal_init_(std, mean=0.0):
+ return partial(torch.nn.init.normal_, mean=mean, std=std)
+
+
+def _normal_param_init_fn_(
+ module: nn.Module,
+ std: float,
+ n_layers: int,
+ d_model: Optional[int] = None,
+ init_div_is_residual: Union[int, float, str, bool] = True,
+ emb_init_std: Optional[float] = None,
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
+ verbose: int = 0,
+ **kwargs,
+):
+ del kwargs
+ init_fn_ = _normal_init_(std=std)
+ if verbose > 1:
+ warnings.warn(f"Using torch.nn.init.normal_ init fn mean=0.0, std={std}")
+ generic_param_init_fn_(
+ module=module,
+ init_fn_=init_fn_,
+ d_model=d_model,
+ n_layers=n_layers,
+ init_div_is_residual=init_div_is_residual,
+ emb_init_std=emb_init_std,
+ emb_init_uniform_lim=emb_init_uniform_lim,
+ verbose=verbose,
+ )
+
+
+def baseline_param_init_fn_(
+ module: nn.Module,
+ init_std: float,
+ n_layers: int,
+ d_model: Optional[int] = None,
+ init_div_is_residual: Union[int, float, str, bool] = True,
+ emb_init_std: Optional[float] = None,
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
+ verbose: int = 0,
+ **kwargs,
+):
+ del kwargs
+ if init_std is None:
+ raise ValueError(
+ "You must set model.init_config['init_std'] to a float value to use the default initialization scheme."
+ )
+ _normal_param_init_fn_(
+ module=module,
+ std=init_std,
+ d_model=d_model,
+ n_layers=n_layers,
+ init_div_is_residual=init_div_is_residual,
+ emb_init_std=emb_init_std,
+ emb_init_uniform_lim=emb_init_uniform_lim,
+ verbose=verbose,
+ )
+
+
+def small_param_init_fn_(
+ module: nn.Module,
+ n_layers: int,
+ d_model: int,
+ init_div_is_residual: Union[int, float, str, bool] = True,
+ emb_init_std: Optional[float] = None,
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
+ verbose: int = 0,
+ **kwargs,
+):
+ del kwargs
+ std = math.sqrt(2 / (5 * d_model))
+ _normal_param_init_fn_(
+ module=module,
+ std=std,
+ d_model=d_model,
+ n_layers=n_layers,
+ init_div_is_residual=init_div_is_residual,
+ emb_init_std=emb_init_std,
+ emb_init_uniform_lim=emb_init_uniform_lim,
+ verbose=verbose,
+ )
+
+
+def neox_param_init_fn_(
+ module: nn.Module,
+ n_layers: int,
+ d_model: int,
+ emb_init_std: Optional[float] = None,
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
+ verbose: int = 0,
+ **kwargs,
+):
+ """From section 2.3.1 of GPT-NeoX-20B:
+
+ An Open-Source AutoregressiveLanguage Model — Black et. al. (2022)
+ see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151
+ and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py
+ """
+ del kwargs
+ residual_div = n_layers / math.sqrt(10)
+ if verbose > 1:
+ warnings.warn(f"setting init_div_is_residual to {residual_div}")
+ small_param_init_fn_(
+ module=module,
+ d_model=d_model,
+ n_layers=n_layers,
+ init_div_is_residual=residual_div,
+ emb_init_std=emb_init_std,
+ emb_init_uniform_lim=emb_init_uniform_lim,
+ verbose=verbose,
+ )
+
+
+def kaiming_uniform_param_init_fn_(
+ module: nn.Module,
+ n_layers: int,
+ d_model: Optional[int] = None,
+ init_div_is_residual: Union[int, float, str, bool] = True,
+ emb_init_std: Optional[float] = None,
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
+ init_gain: float = 0,
+ fan_mode: str = "fan_in",
+ init_nonlinearity: str = "leaky_relu",
+ verbose: int = 0,
+ **kwargs,
+):
+ del kwargs
+ if verbose > 1:
+ warnings.warn(
+ f"Using nn.init.kaiming_uniform_ init fn with parameters: "
+ + f"a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}"
+ )
+ kaiming_uniform_ = partial(
+ nn.init.kaiming_uniform_,
+ a=init_gain,
+ mode=fan_mode,
+ nonlinearity=init_nonlinearity,
+ )
+ generic_param_init_fn_(
+ module=module,
+ init_fn_=kaiming_uniform_,
+ d_model=d_model,
+ n_layers=n_layers,
+ init_div_is_residual=init_div_is_residual,
+ emb_init_std=emb_init_std,
+ emb_init_uniform_lim=emb_init_uniform_lim,
+ verbose=verbose,
+ )
+
+
+def kaiming_normal_param_init_fn_(
+ module: nn.Module,
+ n_layers: int,
+ d_model: Optional[int] = None,
+ init_div_is_residual: Union[int, float, str, bool] = True,
+ emb_init_std: Optional[float] = None,
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
+ init_gain: float = 0,
+ fan_mode: str = "fan_in",
+ init_nonlinearity: str = "leaky_relu",
+ verbose: int = 0,
+ **kwargs,
+):
+ del kwargs
+ if verbose > 1:
+ warnings.warn(
+ f"Using nn.init.kaiming_normal_ init fn with parameters: "
+ + f"a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}"
+ )
+ kaiming_normal_ = partial(
+ torch.nn.init.kaiming_normal_,
+ a=init_gain,
+ mode=fan_mode,
+ nonlinearity=init_nonlinearity,
+ )
+ generic_param_init_fn_(
+ module=module,
+ init_fn_=kaiming_normal_,
+ d_model=d_model,
+ n_layers=n_layers,
+ init_div_is_residual=init_div_is_residual,
+ emb_init_std=emb_init_std,
+ emb_init_uniform_lim=emb_init_uniform_lim,
+ verbose=verbose,
+ )
+
+
+def xavier_uniform_param_init_fn_(
+ module: nn.Module,
+ n_layers: int,
+ d_model: Optional[int] = None,
+ init_div_is_residual: Union[int, float, str, bool] = True,
+ emb_init_std: Optional[float] = None,
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
+ init_gain: float = 0,
+ verbose: int = 0,
+ **kwargs,
+):
+ del kwargs
+ xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain)
+ if verbose > 1:
+ warnings.warn(
+ f"Using torch.nn.init.xavier_uniform_ init fn with parameters: "
+ + f"gain={init_gain}"
+ )
+ generic_param_init_fn_(
+ module=module,
+ init_fn_=xavier_uniform_,
+ d_model=d_model,
+ n_layers=n_layers,
+ init_div_is_residual=init_div_is_residual,
+ emb_init_std=emb_init_std,
+ emb_init_uniform_lim=emb_init_uniform_lim,
+ verbose=verbose,
+ )
+
+
+def xavier_normal_param_init_fn_(
+ module: nn.Module,
+ n_layers: int,
+ d_model: Optional[int] = None,
+ init_div_is_residual: Union[int, float, str, bool] = True,
+ emb_init_std: Optional[float] = None,
+ emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
+ init_gain: float = 0,
+ verbose: int = 0,
+ **kwargs,
+):
+ xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain)
+ if verbose > 1:
+ warnings.warn(
+ f"Using torch.nn.init.xavier_normal_ init fn with parameters: "
+ + f"gain={init_gain}"
+ )
+ generic_param_init_fn_(
+ module=module,
+ init_fn_=xavier_normal_,
+ d_model=d_model,
+ n_layers=n_layers,
+ init_div_is_residual=init_div_is_residual,
+ emb_init_std=emb_init_std,
+ emb_init_uniform_lim=emb_init_uniform_lim,
+ verbose=verbose,
+ )
+
+
+MODEL_INIT_REGISTRY = {
+ "default_": torch_default_param_init_fn_,
+ "baseline_": baseline_param_init_fn_,
+ "kaiming_uniform_": kaiming_uniform_param_init_fn_,
+ "kaiming_normal_": kaiming_normal_param_init_fn_,
+ "neox_init_": neox_param_init_fn_,
+ "small_init_": small_param_init_fn_,
+ "xavier_uniform_": xavier_uniform_param_init_fn_,
+ "xavier_normal_": xavier_normal_param_init_fn_,
+}
diff --git a/model/llava/model/llava_arch.py b/model/llava/model/llava_arch.py
new file mode 100644
index 0000000000000000000000000000000000000000..049ea7e6541b12ca921bfaf8a0944351bf062e62
--- /dev/null
+++ b/model/llava/model/llava_arch.py
@@ -0,0 +1,398 @@
+# Copyright 2023 Haotian Liu
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from abc import ABC, abstractmethod
+
+import torch
+import torch.nn as nn
+
+# from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_PATCH_TOKEN, IGNORE_INDEX,
+ IMAGE_TOKEN_INDEX)
+
+from .multimodal_encoder.builder import build_vision_tower
+
+
+class LlavaMetaModel:
+ def __init__(self, config):
+ super(LlavaMetaModel, self).__init__(config)
+
+ if hasattr(config, "mm_vision_tower"):
+ self.vision_tower = build_vision_tower(config, delay_load=True)
+ self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)
+
+ def get_vision_tower(self):
+ vision_tower = getattr(self, "vision_tower", None)
+ if type(vision_tower) is list:
+ vision_tower = vision_tower[0]
+ return vision_tower
+
+ def initialize_vision_modules(self, model_args, fsdp=None):
+ vision_tower = model_args.vision_tower
+ mm_vision_select_layer = model_args.mm_vision_select_layer
+ mm_vision_select_feature = model_args.mm_vision_select_feature
+ pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
+
+ self.config.mm_vision_tower = vision_tower
+
+ vision_tower = build_vision_tower(model_args)
+
+ if fsdp is not None and len(fsdp) > 0:
+ self.vision_tower = [vision_tower]
+ else:
+ self.vision_tower = vision_tower
+
+ self.config.use_mm_proj = True
+ self.config.mm_hidden_size = vision_tower.hidden_size
+ self.config.mm_vision_select_layer = mm_vision_select_layer
+ self.config.mm_vision_select_feature = mm_vision_select_feature
+
+ if not hasattr(self, "mm_projector"):
+ self.mm_projector = nn.Linear(
+ self.config.mm_hidden_size, self.config.hidden_size
+ )
+
+ if pretrain_mm_mlp_adapter is not None:
+ mm_projector_weights = torch.load(
+ pretrain_mm_mlp_adapter, map_location="cpu"
+ )
+
+ def get_w(weights, keyword):
+ return {
+ k.split(keyword + ".")[1]: v
+ for k, v in weights.items()
+ if keyword in k
+ }
+
+ self.mm_projector.load_state_dict(
+ get_w(mm_projector_weights, "mm_projector")
+ )
+
+
+class LlavaMetaForCausalLM(ABC):
+ @abstractmethod
+ def get_model(self):
+ pass
+
+ def get_vision_tower(self):
+ return self.get_model().get_vision_tower()
+
+ def encode_images(self, images):
+ image_features = self.get_model().get_vision_tower()(images)
+ image_features = self.get_model().mm_projector(image_features)
+ return image_features
+
+ def prepare_inputs_labels_for_multimodal(
+ self, input_ids, attention_mask, past_key_values, labels, images
+ ):
+ vision_tower = self.get_vision_tower()
+ if vision_tower is None or images is None or input_ids.shape[1] == 1:
+ if (
+ past_key_values is not None
+ and vision_tower is not None
+ and images is not None
+ and input_ids.shape[1] == 1
+ ):
+ attention_mask = torch.ones(
+ (attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1),
+ dtype=attention_mask.dtype,
+ device=attention_mask.device,
+ )
+ return input_ids, attention_mask, past_key_values, None, labels
+
+ if type(images) is list or images.ndim == 5:
+ concat_images = torch.cat([image for image in images], dim=0)
+ image_features = self.encode_images(concat_images)
+ split_sizes = [image.shape[0] for image in images]
+ image_features = torch.split(image_features, split_sizes, dim=0)
+ image_features = [x.flatten(0, 1) for x in image_features]
+ else:
+ image_features = self.encode_images(images)
+
+ new_input_embeds = []
+ new_labels = [] if labels is not None else None
+ cur_image_idx = 0
+ for batch_idx, cur_input_ids in enumerate(input_ids):
+ if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
+ # multimodal LLM, but the current sample is not multimodal
+ cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)
+ cur_input_embeds = (
+ cur_input_embeds
+ + (
+ 0.0 * self.get_model().mm_projector(vision_tower.dummy_feature)
+ ).sum()
+ )
+ new_input_embeds.append(cur_input_embeds)
+ if labels is not None:
+ new_labels.append(labels[batch_idx])
+ cur_image_idx += 1
+ continue
+ image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
+ cur_new_input_embeds = []
+ if labels is not None:
+ cur_labels = labels[batch_idx]
+ cur_new_labels = []
+ assert cur_labels.shape == cur_input_ids.shape
+ while image_token_indices.numel() > 0:
+ cur_image_features = image_features[cur_image_idx]
+ image_token_start = image_token_indices[0]
+ if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(
+ self.config, "mm_use_im_start_end", False
+ ):
+ cur_new_input_embeds.append(
+ self.get_model()
+ .embed_tokens(cur_input_ids[: image_token_start - 1])
+ .detach()
+ )
+ cur_new_input_embeds.append(
+ self.get_model().embed_tokens(
+ cur_input_ids[image_token_start - 1 : image_token_start]
+ )
+ )
+ cur_new_input_embeds.append(cur_image_features)
+ cur_new_input_embeds.append(
+ self.get_model().embed_tokens(
+ cur_input_ids[image_token_start + 1 : image_token_start + 2]
+ )
+ )
+ if labels is not None:
+ cur_new_labels.append(cur_labels[:image_token_start])
+ cur_new_labels.append(
+ torch.full(
+ (cur_image_features.shape[0],),
+ IGNORE_INDEX,
+ device=labels.device,
+ dtype=labels.dtype,
+ )
+ )
+ cur_new_labels.append(
+ cur_labels[image_token_start : image_token_start + 1]
+ )
+ cur_labels = cur_labels[image_token_start + 2 :]
+ elif getattr(self.config, "mm_use_im_start_end", False):
+ cur_new_input_embeds.append(
+ self.get_model().embed_tokens(cur_input_ids[:image_token_start])
+ )
+ cur_new_input_embeds.append(cur_image_features)
+ cur_new_input_embeds.append(
+ self.get_model().embed_tokens(
+ cur_input_ids[image_token_start + 1 : image_token_start + 2]
+ )
+ )
+ if labels is not None:
+ cur_new_labels.append(cur_labels[:image_token_start])
+ cur_new_labels.append(
+ torch.full(
+ (cur_image_features.shape[0],),
+ IGNORE_INDEX,
+ device=labels.device,
+ dtype=labels.dtype,
+ )
+ )
+ cur_new_labels.append(
+ cur_labels[image_token_start + 1 : image_token_start + 2]
+ )
+ cur_labels = cur_labels[image_token_start + 2 :]
+ else:
+ cur_new_input_embeds.append(
+ self.get_model().embed_tokens(cur_input_ids[:image_token_start])
+ )
+ cur_new_input_embeds.append(cur_image_features)
+ if labels is not None:
+ cur_new_labels.append(cur_labels[:image_token_start])
+ cur_new_labels.append(
+ torch.full(
+ (cur_image_features.shape[0],),
+ IGNORE_INDEX,
+ device=labels.device,
+ dtype=labels.dtype,
+ )
+ )
+ cur_labels = cur_labels[image_token_start + 1 :]
+ cur_image_idx += 1
+ if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(
+ self.config, "mm_use_im_start_end", False
+ ):
+ cur_input_ids = cur_input_ids[image_token_start + 2 :]
+ elif getattr(self.config, "mm_use_im_start_end", False):
+ cur_input_ids = cur_input_ids[image_token_start + 2 :]
+ else:
+ cur_input_ids = cur_input_ids[image_token_start + 1 :]
+ image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
+ if cur_input_ids.numel() > 0:
+ if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(
+ self.config, "mm_use_im_start_end", False
+ ):
+ cur_new_input_embeds.append(
+ self.get_model().embed_tokens(cur_input_ids).detach()
+ )
+ elif getattr(self.config, "mm_use_im_start_end", False):
+ cur_new_input_embeds.append(
+ self.get_model().embed_tokens(cur_input_ids)
+ )
+ else:
+ cur_new_input_embeds.append(
+ self.get_model().embed_tokens(cur_input_ids)
+ )
+ if labels is not None:
+ cur_new_labels.append(cur_labels)
+ cur_new_input_embeds = [
+ x.to(device=self.device) for x in cur_new_input_embeds
+ ]
+ cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
+ new_input_embeds.append(cur_new_input_embeds)
+ if labels is not None:
+ cur_new_labels = torch.cat(cur_new_labels, dim=0)
+ new_labels.append(cur_new_labels)
+
+ if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
+ max_len = max(x.shape[0] for x in new_input_embeds)
+
+ new_input_embeds_align = []
+ for cur_new_embed in new_input_embeds:
+ cur_new_embed = torch.cat(
+ (
+ cur_new_embed,
+ torch.zeros(
+ (max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]),
+ dtype=cur_new_embed.dtype,
+ device=cur_new_embed.device,
+ ),
+ ),
+ dim=0,
+ )
+ new_input_embeds_align.append(cur_new_embed)
+ new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
+
+ if labels is not None:
+ new_labels_align = []
+ _new_labels = new_labels
+ for cur_new_label in new_labels:
+ cur_new_label = torch.cat(
+ (
+ cur_new_label,
+ torch.full(
+ (max_len - cur_new_label.shape[0],),
+ IGNORE_INDEX,
+ dtype=cur_new_label.dtype,
+ device=cur_new_label.device,
+ ),
+ ),
+ dim=0,
+ )
+ new_labels_align.append(cur_new_label)
+ new_labels = torch.stack(new_labels_align, dim=0)
+
+ if attention_mask is not None:
+ new_attention_mask = []
+ for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(
+ attention_mask, _new_labels, new_labels
+ ):
+ new_attn_mask_pad_left = torch.full(
+ (cur_new_labels.shape[0] - labels.shape[1],),
+ True,
+ dtype=attention_mask.dtype,
+ device=attention_mask.device,
+ )
+ new_attn_mask_pad_right = torch.full(
+ (cur_new_labels_align.shape[0] - cur_new_labels.shape[0],),
+ False,
+ dtype=attention_mask.dtype,
+ device=attention_mask.device,
+ )
+ cur_new_attention_mask = torch.cat(
+ (
+ new_attn_mask_pad_left,
+ cur_attention_mask,
+ new_attn_mask_pad_right,
+ ),
+ dim=0,
+ )
+ new_attention_mask.append(cur_new_attention_mask)
+ attention_mask = torch.stack(new_attention_mask, dim=0)
+ assert attention_mask.shape == new_labels.shape
+ else:
+ new_input_embeds = torch.stack(new_input_embeds, dim=0)
+ if labels is not None:
+ new_labels = torch.stack(new_labels, dim=0)
+
+ if attention_mask is not None:
+ new_attn_mask_pad_left = torch.full(
+ (
+ attention_mask.shape[0],
+ new_input_embeds.shape[1] - input_ids.shape[1],
+ ),
+ True,
+ dtype=attention_mask.dtype,
+ device=attention_mask.device,
+ )
+ attention_mask = torch.cat(
+ (new_attn_mask_pad_left, attention_mask), dim=1
+ )
+ assert attention_mask.shape == new_input_embeds.shape[:2]
+
+ return None, attention_mask, past_key_values, new_input_embeds, new_labels
+
+ # def initialize_vision_tokenizer(self, model_args, tokenizer):
+ def initialize_vision_tokenizer(self, model_args, num_new_tokens):
+ # if model_args.mm_use_im_patch_token:
+ # tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
+ # self.resize_token_embeddings(len(tokenizer))
+
+ if model_args.mm_use_im_start_end:
+ # num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
+ # self.resize_token_embeddings(len(tokenizer))
+
+ # if num_new_tokens > 0:
+ # input_embeddings = self.get_input_embeddings().weight.data
+ # output_embeddings = self.get_output_embeddings().weight.data
+
+ # input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
+ # dim=0, keepdim=True)
+ # output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
+ # dim=0, keepdim=True)
+
+ # input_embeddings[-num_new_tokens:] = input_embeddings_avg
+ # output_embeddings[-num_new_tokens:] = output_embeddings_avg
+
+ if model_args.tune_mm_mlp_adapter:
+ for p in self.get_input_embeddings().parameters():
+ p.requires_grad = True
+ for p in self.get_output_embeddings().parameters():
+ p.requires_grad = False
+
+ if model_args.pretrain_mm_mlp_adapter:
+ mm_projector_weights = torch.load(
+ model_args.pretrain_mm_mlp_adapter, map_location="cpu"
+ )
+ embed_tokens_weight = mm_projector_weights["model.embed_tokens.weight"]
+ assert num_new_tokens == 2
+ if input_embeddings.shape == embed_tokens_weight.shape:
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight[
+ -num_new_tokens:
+ ]
+ elif embed_tokens_weight.shape[0] == num_new_tokens:
+ input_embeddings[-num_new_tokens:] = embed_tokens_weight
+ else:
+ raise ValueError(
+ f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}."
+ )
+ elif model_args.mm_use_im_patch_token:
+ if model_args.tune_mm_mlp_adapter:
+ for p in self.get_input_embeddings().parameters():
+ p.requires_grad = False
+ for p in self.get_output_embeddings().parameters():
+ p.requires_grad = False
diff --git a/model/llava/model/make_delta.py b/model/llava/model/make_delta.py
new file mode 100644
index 0000000000000000000000000000000000000000..26d73d2474e2da7f62955c6685c8812d6d94f6ad
--- /dev/null
+++ b/model/llava/model/make_delta.py
@@ -0,0 +1,63 @@
+"""
+Usage:
+python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta
+"""
+import argparse
+
+import torch
+from llava.model.utils import auto_upgrade
+from tqdm import tqdm
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+
+def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
+ print("Loading base model")
+ base = AutoModelForCausalLM.from_pretrained(
+ base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+
+ print("Loading target model")
+ auto_upgrade(target_model_path)
+ target = AutoModelForCausalLM.from_pretrained(
+ target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True
+ )
+
+ print("Calculating delta")
+ for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
+ if name not in base.state_dict():
+ assert name in [
+ "model.mm_projector.weight",
+ "model.mm_projector.bias",
+ ], f"{name} not in base model"
+ continue
+ if param.data.shape == base.state_dict()[name].shape:
+ param.data -= base.state_dict()[name]
+ else:
+ assert name in [
+ "model.embed_tokens.weight",
+ "lm_head.weight",
+ ], f"{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}"
+ bparam = base.state_dict()[name]
+ param.data[: bparam.shape[0], : bparam.shape[1]] -= bparam
+
+ print("Saving delta")
+ if hub_repo_id:
+ kwargs = {"push_to_hub": True, "repo_id": hub_repo_id}
+ else:
+ kwargs = {}
+ target.save_pretrained(delta_path, **kwargs)
+ target_tokenizer = AutoTokenizer.from_pretrained(target_model_path)
+ target_tokenizer.save_pretrained(delta_path, **kwargs)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--base-model-path", type=str, required=True)
+ parser.add_argument("--target-model-path", type=str, required=True)
+ parser.add_argument("--delta-path", type=str, required=True)
+ parser.add_argument("--hub-repo-id", type=str, default=None)
+ args = parser.parse_args()
+
+ make_delta(
+ args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id
+ )
diff --git a/model/llava/model/multimodal_encoder/__pycache__/builder.cpython-39.pyc b/model/llava/model/multimodal_encoder/__pycache__/builder.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b4e763f9408b37e12f50a09e872c39df951aae24
Binary files /dev/null and b/model/llava/model/multimodal_encoder/__pycache__/builder.cpython-39.pyc differ
diff --git a/model/llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-39.pyc b/model/llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33dc1695aab6a815d6ec8b18dd8d37832d51450b
Binary files /dev/null and b/model/llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-39.pyc differ
diff --git a/model/llava/model/multimodal_encoder/builder.py b/model/llava/model/multimodal_encoder/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..087faa85779eb991e73adf17e75e387070c3e313
--- /dev/null
+++ b/model/llava/model/multimodal_encoder/builder.py
@@ -0,0 +1,17 @@
+from .clip_encoder import CLIPVisionTower
+
+
+def build_vision_tower(vision_tower_cfg, **kwargs):
+ vision_tower = getattr(
+ vision_tower_cfg,
+ "mm_vision_tower",
+ getattr(vision_tower_cfg, "vision_tower", None),
+ )
+ if (
+ vision_tower.startswith("openai")
+ or vision_tower.startswith("laion")
+ or "clip" in vision_tower
+ ):
+ return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
+
+ raise ValueError(f"Unknown vision tower: {vision_tower}")
diff --git a/model/llava/model/multimodal_encoder/clip_encoder.py b/model/llava/model/multimodal_encoder/clip_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..793b70b5f3ceb02d382d3e39b9eda8adc8b01540
--- /dev/null
+++ b/model/llava/model/multimodal_encoder/clip_encoder.py
@@ -0,0 +1,87 @@
+import torch
+import torch.nn as nn
+from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
+
+
+class CLIPVisionTower(nn.Module):
+ def __init__(self, vision_tower, args, delay_load=False):
+ super().__init__()
+
+ self.is_loaded = False
+
+ self.vision_tower_name = vision_tower
+ self.select_layer = args.mm_vision_select_layer
+ self.select_feature = getattr(args, "mm_vision_select_feature", "patch")
+
+ if not delay_load:
+ self.load_model()
+ else:
+ self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name)
+
+ def load_model(self):
+ self.image_processor = CLIPImageProcessor.from_pretrained(
+ self.vision_tower_name
+ )
+ self.vision_tower = CLIPVisionModel.from_pretrained(
+ self.vision_tower_name, low_cpu_mem_usage=True
+ )
+ self.vision_tower.requires_grad_(False)
+ self.is_loaded = True
+
+ def feature_select(self, image_forward_outs):
+ image_features = image_forward_outs.hidden_states[self.select_layer]
+ if self.select_feature == "patch":
+ image_features = image_features[:, 1:]
+ elif self.select_feature == "cls_patch":
+ image_features = image_features
+ else:
+ raise ValueError(f"Unexpected select feature: {self.select_feature}")
+ return image_features
+
+ @torch.no_grad()
+ def forward(self, images):
+ if type(images) is list:
+ image_features = []
+ for image in images:
+ image_forward_out = self.vision_tower(
+ image.to(device=self.device, dtype=self.dtype).unsqueeze(0),
+ output_hidden_states=True,
+ )
+ image_feature = self.feature_select(image_forward_out).to(image.dtype)
+ image_features.append(image_feature)
+ else:
+ image_forward_outs = self.vision_tower(
+ images.to(device=self.device, dtype=self.dtype),
+ output_hidden_states=True,
+ )
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
+
+ torch.cuda.empty_cache()
+ return image_features
+
+ @property
+ def dummy_feature(self):
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
+
+ @property
+ def dtype(self):
+ return self.vision_tower.dtype
+
+ @property
+ def device(self):
+ return self.vision_tower.device
+
+ @property
+ def config(self):
+ if self.is_loaded:
+ return self.vision_tower.config
+ else:
+ return self.cfg_only
+
+ @property
+ def hidden_size(self):
+ return self.config.hidden_size
+
+ @property
+ def num_patches(self):
+ return (self.config.image_size // self.config.patch_size) ** 2
diff --git a/model/llava/model/utils.py b/model/llava/model/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ecd82db4710fedf82f51283be32744f99684cc3
--- /dev/null
+++ b/model/llava/model/utils.py
@@ -0,0 +1,24 @@
+from transformers import AutoConfig
+
+
+def auto_upgrade(config):
+ cfg = AutoConfig.from_pretrained(config)
+ if "llava" in config and "llava" not in cfg.model_type:
+ assert cfg.model_type == "llama"
+ print(
+ "You are using newer LLaVA code base, while the checkpoint of v0 is from older code base."
+ )
+ print(
+ "You must upgrade the checkpoint to the new code base (this can be done automatically)."
+ )
+ confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]")
+ if confirm.lower() in ["y", "yes"]:
+ print("Upgrading checkpoint...")
+ assert len(cfg.architectures) == 1
+ setattr(cfg.__class__, "model_type", "llava")
+ cfg.architectures[0] = "LlavaLlamaForCausalLM"
+ cfg.save_pretrained(config)
+ print("Checkpoint upgraded.")
+ else:
+ print("Checkpoint upgrade aborted.")
+ exit(1)
diff --git a/model/llava/train/llama_flash_attn_monkey_patch.py b/model/llava/train/llama_flash_attn_monkey_patch.py
new file mode 100644
index 0000000000000000000000000000000000000000..312aa87696be6464aa7fc77dd3c2daf7fbaaa94c
--- /dev/null
+++ b/model/llava/train/llama_flash_attn_monkey_patch.py
@@ -0,0 +1,126 @@
+import logging
+from typing import List, Optional, Tuple
+
+import torch
+import transformers
+from einops import rearrange
+from torch import nn
+from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
+
+try:
+ from flash_attn.flash_attn_interface import \
+ flash_attn_unpadded_qkvpacked_func
+except ImportError:
+ from flash_attn.flash_attn_interface import (
+ flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func,
+ )
+
+from flash_attn.bert_padding import pad_input, unpad_input
+
+
+def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel
+
+ attention_mask: [bsz, q_len]
+ """
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = (
+ self.q_proj(hidden_states)
+ .view(bsz, q_len, self.num_heads, self.head_dim)
+ .transpose(1, 2)
+ )
+ key_states = (
+ self.k_proj(hidden_states)
+ .view(bsz, q_len, self.num_heads, self.head_dim)
+ .transpose(1, 2)
+ )
+ value_states = (
+ self.v_proj(hidden_states)
+ .view(bsz, q_len, self.num_heads, self.head_dim)
+ .transpose(1, 2)
+ )
+ # [bsz, q_len, nh, hd]
+ # [bsz, nh, q_len, hd]
+
+ kv_seq_len = key_states.shape[-2]
+ assert past_key_value is None, "past_key_value is not supported"
+
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(
+ query_states, key_states, cos, sin, position_ids
+ )
+ # [bsz, nh, t, hd]
+ assert not output_attentions, "output_attentions is not supported"
+ assert not use_cache, "use_cache is not supported"
+
+ # Flash attention codes from
+ # https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attention.py
+
+ # transform the data into the format required by flash attention
+ qkv = torch.stack(
+ [query_states, key_states, value_states], dim=2
+ ) # [bsz, nh, 3, q_len, hd]
+ qkv = qkv.transpose(1, 3) # [bsz, q_len, 3, nh, hd]
+ # We have disabled _prepare_decoder_attention_mask in LlamaModel
+ # the attention_mask should be the same as the key_padding_mask
+ key_padding_mask = attention_mask
+
+ if key_padding_mask is None:
+ qkv = rearrange(qkv, "b s ... -> (b s) ...")
+ max_s = q_len
+ cu_q_lens = torch.arange(
+ 0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device
+ )
+ output = flash_attn_unpadded_qkvpacked_func(
+ qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
+ )
+ output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
+ else:
+ nheads = qkv.shape[-2]
+ x = rearrange(qkv, "b s three h d -> b s (three h d)")
+ x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)
+ x_unpad = rearrange(
+ x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads
+ )
+ output_unpad = flash_attn_unpadded_qkvpacked_func(
+ x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
+ )
+ output = rearrange(
+ pad_input(
+ rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, bsz, q_len
+ ),
+ "b s (h d) -> b s h d",
+ h=nheads,
+ )
+ return self.o_proj(rearrange(output, "b s h d -> b s (h d)")), None, None
+
+
+# Disable the transformation of the attention mask in LlamaModel as the flash attention
+# requires the attention mask to be the same as the key_padding_mask
+def _prepare_decoder_attention_mask(
+ self, attention_mask, input_shape, inputs_embeds, past_key_values_length
+):
+ # [bsz, seq_len]
+ return attention_mask
+
+
+def replace_llama_attn_with_flash_attn():
+ cuda_major, cuda_minor = torch.cuda.get_device_capability()
+ if cuda_major < 8:
+ logging.warning(
+ "Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward."
+ "ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593"
+ )
+ transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = (
+ _prepare_decoder_attention_mask
+ )
+ transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
diff --git a/model/llava/train/llava_trainer.py b/model/llava/train/llava_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4fd9397533abeb902b368825e224f1ae320985b
--- /dev/null
+++ b/model/llava/train/llava_trainer.py
@@ -0,0 +1,67 @@
+import os
+from typing import Optional
+
+import torch
+from transformers import Trainer
+
+
+def maybe_zero_3(param, ignore_status=False, name=None):
+ from deepspeed import zero
+ from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
+
+ if hasattr(param, "ds_id"):
+ if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
+ if not ignore_status:
+ print(name, "no ignore status")
+ with zero.GatheredParameters([param]):
+ param = param.data.detach().cpu().clone()
+ else:
+ param = param.detach().cpu().clone()
+ return param
+
+
+def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
+ to_return = {
+ k: t
+ for k, t in named_params
+ if any(key_match in k for key_match in keys_to_match)
+ }
+ to_return = {
+ k: maybe_zero_3(v, ignore_status=True, name=k).cpu()
+ for k, v in to_return.items()
+ }
+ return to_return
+
+
+class LLaVATrainer(Trainer):
+ def _save_checkpoint(self, model, trial, metrics=None):
+ if getattr(self.args, "tune_mm_mlp_adapter", False):
+ from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
+
+ checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
+
+ run_dir = self._get_output_dir(trial=trial)
+ output_dir = os.path.join(run_dir, checkpoint_folder)
+
+ # Only save Adapter
+ keys_to_match = ["mm_projector"]
+ if getattr(self.args, "use_im_start_end", False):
+ keys_to_match.extend(["embed_tokens", "embed_in"])
+
+ weight_to_save = get_mm_adapter_state_maybe_zero_3(
+ self.model.named_parameters(), keys_to_match
+ )
+
+ if self.args.local_rank == 0 or self.args.local_rank == -1:
+ self.model.config.save_pretrained(output_dir)
+ torch.save(
+ weight_to_save, os.path.join(output_dir, f"mm_projector.bin")
+ )
+ else:
+ super(LLaVATrainer, self)._save_checkpoint(model, trial, metrics)
+
+ def _save(self, output_dir: Optional[str] = None, state_dict=None):
+ if getattr(self.args, "tune_mm_mlp_adapter", False):
+ pass
+ else:
+ super(LLaVATrainer, self)._save(output_dir, state_dict)
diff --git a/model/llava/train/train.py b/model/llava/train/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9ed3f8f5c310be51c8a0ef427deb8ae63854eb4
--- /dev/null
+++ b/model/llava/train/train.py
@@ -0,0 +1,1038 @@
+# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
+# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
+# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import json
+import logging
+import os
+import pathlib
+from dataclasses import dataclass, field
+from typing import Dict, List, Optional, Sequence
+
+import torch
+import transformers
+from llava import conversation as conversation_lib
+from llava.constants import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IGNORE_INDEX,
+ IMAGE_TOKEN_INDEX)
+from llava.mm_utils import tokenizer_image_token
+from llava.model import *
+from llava.train.llava_trainer import LLaVATrainer
+from PIL import Image
+from torch.utils.data import Dataset
+
+local_rank = None
+
+
+def rank0_print(*args):
+ if local_rank == 0:
+ print(*args)
+
+
+@dataclass
+class ModelArguments:
+ model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
+ version: Optional[str] = field(default="v0")
+ freeze_backbone: bool = field(default=False)
+ tune_mm_mlp_adapter: bool = field(default=False)
+ vision_tower: Optional[str] = field(default=None)
+ mm_vision_select_layer: Optional[int] = field(
+ default=-1
+ ) # default to the last layer
+ pretrain_mm_mlp_adapter: Optional[str] = field(default=None)
+ mm_use_im_start_end: bool = field(default=False)
+ mm_use_im_patch_token: bool = field(default=True)
+ mm_vision_select_feature: Optional[str] = field(default="patch")
+
+
+@dataclass
+class DataArguments:
+ data_path: str = field(
+ default=None, metadata={"help": "Path to the training data."}
+ )
+ lazy_preprocess: bool = False
+ is_multimodal: bool = False
+ image_folder: Optional[str] = field(default=None)
+ image_aspect_ratio: str = "square"
+ image_grid_pinpoints: Optional[str] = field(default=None)
+
+
+@dataclass
+class TrainingArguments(transformers.TrainingArguments):
+ cache_dir: Optional[str] = field(default=None)
+ optim: str = field(default="adamw_torch")
+ remove_unused_columns: bool = field(default=False)
+ freeze_mm_mlp_adapter: bool = field(default=False)
+ mpt_attn_impl: Optional[str] = field(default="triton")
+ model_max_length: int = field(
+ default=512,
+ metadata={
+ "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
+ },
+ )
+ double_quant: bool = field(
+ default=True,
+ metadata={
+ "help": "Compress the quantization statistics through double quantization."
+ },
+ )
+ quant_type: str = field(
+ default="nf4",
+ metadata={
+ "help": "Quantization data type to use. Should be one of `fp4` or `nf4`."
+ },
+ )
+ bits: int = field(default=16, metadata={"help": "How many bits to use."})
+ lora_enable: bool = False
+ lora_r: int = 64
+ lora_alpha: int = 16
+ lora_dropout: float = 0.05
+ lora_weight_path: str = ""
+ lora_bias: str = "none"
+
+
+def maybe_zero_3(param, ignore_status=False, name=None):
+ from deepspeed import zero
+ from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
+
+ if hasattr(param, "ds_id"):
+ if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
+ if not ignore_status:
+ logging.warning(
+ f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}"
+ )
+ with zero.GatheredParameters([param]):
+ param = param.data.detach().cpu().clone()
+ else:
+ param = param.detach().cpu().clone()
+ return param
+
+
+# Borrowed from peft.utils.get_peft_model_state_dict
+def get_peft_state_maybe_zero_3(named_params, bias):
+ if bias == "none":
+ to_return = {k: t for k, t in named_params if "lora_" in k}
+ elif bias == "all":
+ to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
+ elif bias == "lora_only":
+ to_return = {}
+ maybe_lora_bias = {}
+ lora_bias_names = set()
+ for k, t in named_params:
+ if "lora_" in k:
+ to_return[k] = t
+ bias_name = k.split("lora_")[0] + "bias"
+ lora_bias_names.add(bias_name)
+ elif "bias" in k:
+ maybe_lora_bias[k] = t
+ for k, t in maybe_lora_bias:
+ if bias_name in lora_bias_names:
+ to_return[bias_name] = t
+ else:
+ raise NotImplementedError
+ to_return = {k: maybe_zero_3(v, name=k) for k, v in to_return.items()}
+ return to_return
+
+
+def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
+ to_return = {k: t for k, t in named_params if "lora_" not in k}
+ if require_grad_only:
+ to_return = {k: t for k, t in to_return.items() if t.requires_grad}
+ to_return = {
+ k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()
+ }
+ return to_return
+
+
+def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
+ to_return = {
+ k: t
+ for k, t in named_params
+ if any(key_match in k for key_match in keys_to_match)
+ }
+ to_return = {
+ k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()
+ }
+ return to_return
+
+
+def find_all_linear_names(model):
+ cls = torch.nn.Linear
+ lora_module_names = set()
+ for name, module in model.named_modules():
+ if isinstance(module, cls):
+ names = name.split(".")
+ lora_module_names.add(names[0] if len(names) == 1 else names[-1])
+
+ if "lm_head" in lora_module_names: # needed for 16-bit
+ lora_module_names.remove("lm_head")
+ return list(lora_module_names)
+
+
+def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
+ """Collects the state dict and dump to disk."""
+
+ if getattr(trainer.args, "tune_mm_mlp_adapter", False):
+ # Only save Adapter
+ keys_to_match = ["mm_projector"]
+ if getattr(trainer.args, "use_im_start_end", False):
+ keys_to_match.extend(["embed_tokens", "embed_in"])
+
+ weight_to_save = get_mm_adapter_state_maybe_zero_3(
+ trainer.model.named_parameters(), keys_to_match
+ )
+ trainer.model.config.save_pretrained(output_dir)
+
+ current_folder = output_dir.split("/")[-1]
+ parent_folder = os.path.dirname(output_dir)
+ if trainer.args.local_rank == 0 or trainer.args.local_rank == -1:
+ if current_folder.startswith("checkpoint-"):
+ mm_projector_folder = os.path.join(parent_folder, "mm_projector")
+ os.makedirs(mm_projector_folder, exist_ok=True)
+ torch.save(
+ weight_to_save,
+ os.path.join(mm_projector_folder, f"{current_folder}.bin"),
+ )
+ else:
+ torch.save(
+ weight_to_save, os.path.join(output_dir, f"mm_projector.bin")
+ )
+ return
+
+ if trainer.deepspeed:
+ torch.cuda.synchronize()
+ trainer.save_model(output_dir)
+ return
+
+ state_dict = trainer.model.state_dict()
+ if trainer.args.should_save:
+ cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
+ del state_dict
+ trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
+
+
+def smart_tokenizer_and_embedding_resize(
+ special_tokens_dict: Dict,
+ tokenizer: transformers.PreTrainedTokenizer,
+ model: transformers.PreTrainedModel,
+):
+ """Resize tokenizer and embedding.
+
+ Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
+ """
+ num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
+ model.resize_token_embeddings(len(tokenizer))
+
+ if num_new_tokens > 0:
+ input_embeddings = model.get_input_embeddings().weight.data
+ output_embeddings = model.get_output_embeddings().weight.data
+
+ input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
+ dim=0, keepdim=True
+ )
+ output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
+ dim=0, keepdim=True
+ )
+
+ input_embeddings[-num_new_tokens:] = input_embeddings_avg
+ output_embeddings[-num_new_tokens:] = output_embeddings_avg
+
+
+def _tokenize_fn(
+ strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer
+) -> Dict:
+ """Tokenize a list of strings."""
+ tokenized_list = [
+ tokenizer(
+ text,
+ return_tensors="pt",
+ padding="longest",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ )
+ for text in strings
+ ]
+ input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
+ input_ids_lens = labels_lens = [
+ tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
+ for tokenized in tokenized_list
+ ]
+ return dict(
+ input_ids=input_ids,
+ labels=labels,
+ input_ids_lens=input_ids_lens,
+ labels_lens=labels_lens,
+ )
+
+
+def _mask_targets(target, tokenized_lens, speakers):
+ # cur_idx = 0
+ cur_idx = tokenized_lens[0]
+ tokenized_lens = tokenized_lens[1:]
+ target[:cur_idx] = IGNORE_INDEX
+ for tokenized_len, speaker in zip(tokenized_lens, speakers):
+ if speaker == "human":
+ target[cur_idx + 2 : cur_idx + tokenized_len] = IGNORE_INDEX
+ cur_idx += tokenized_len
+
+
+def _add_speaker_and_signal(header, source, get_conversation=True):
+ """Add speaker and start/end signal on each round."""
+ BEGIN_SIGNAL = "### "
+ END_SIGNAL = "\n"
+ conversation = header
+ for sentence in source:
+ from_str = sentence["from"]
+ if from_str.lower() == "human":
+ from_str = conversation_lib.default_conversation.roles[0]
+ elif from_str.lower() == "gpt":
+ from_str = conversation_lib.default_conversation.roles[1]
+ else:
+ from_str = "unknown"
+ sentence["value"] = (
+ BEGIN_SIGNAL + from_str + ": " + sentence["value"] + END_SIGNAL
+ )
+ if get_conversation:
+ conversation += sentence["value"]
+ conversation += BEGIN_SIGNAL
+ return conversation
+
+
+def preprocess_multimodal(sources: Sequence[str], data_args: DataArguments) -> Dict:
+ is_multimodal = data_args.is_multimodal
+ if not is_multimodal:
+ return sources
+
+ for source in sources:
+ for sentence in source:
+ if DEFAULT_IMAGE_TOKEN in sentence["value"]:
+ sentence["value"] = (
+ sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "").strip()
+ )
+ sentence["value"] = DEFAULT_IMAGE_TOKEN + "\n" + sentence["value"]
+ sentence["value"] = sentence["value"].strip()
+ if "mmtag" in conversation_lib.default_conversation.version:
+ sentence["value"] = sentence["value"].replace(
+ DEFAULT_IMAGE_TOKEN,
+ "" + DEFAULT_IMAGE_TOKEN + "",
+ )
+ replace_token = DEFAULT_IMAGE_TOKEN
+ if data_args.mm_use_im_start_end:
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
+ )
+ sentence["value"] = sentence["value"].replace(
+ DEFAULT_IMAGE_TOKEN, replace_token
+ )
+
+ return sources
+
+
+def preprocess_llama_2(
+ sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False
+) -> Dict:
+ conv = conversation_lib.default_conversation.copy()
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
+
+ # Apply prompt templates
+ conversations = []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != conv.roles[0]:
+ # Skip the first one if it is not from human
+ source = source[1:]
+
+ conv.messages = []
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ assert role == conv.roles[j % 2], f"{i}"
+ conv.append_message(role, sentence["value"])
+ conversations.append(conv.get_prompt())
+
+ # Tokenize conversations
+
+ if has_image:
+ input_ids = torch.stack(
+ [
+ tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ for prompt in conversations
+ ],
+ dim=0,
+ )
+ else:
+ input_ids = tokenizer(
+ conversations,
+ return_tensors="pt",
+ padding="longest",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ ).input_ids
+
+ targets = input_ids.clone()
+
+ assert conv.sep_style == conversation_lib.SeparatorStyle.LLAMA_2
+
+ # Mask targets
+ sep = "[/INST] "
+ for conversation, target in zip(conversations, targets):
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
+
+ rounds = conversation.split(conv.sep2)
+ cur_len = 1
+ target[:cur_len] = IGNORE_INDEX
+ for i, rou in enumerate(rounds):
+ if rou == "":
+ break
+
+ parts = rou.split(sep)
+ if len(parts) != 2:
+ break
+ parts[0] += sep
+
+ if has_image:
+ round_len = len(tokenizer_image_token(rou, tokenizer))
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
+ else:
+ round_len = len(tokenizer(rou).input_ids)
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
+
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
+
+ cur_len += round_len
+ target[cur_len:] = IGNORE_INDEX
+
+ if cur_len < tokenizer.model_max_length:
+ if cur_len != total_len:
+ target[:] = IGNORE_INDEX
+ print(
+ f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
+ f" (ignored)"
+ )
+
+ return dict(
+ input_ids=input_ids,
+ labels=targets,
+ )
+
+
+def preprocess_v1(
+ sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False
+) -> Dict:
+ conv = conversation_lib.default_conversation.copy()
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
+
+ # Apply prompt templates
+ conversations = []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != conv.roles[0]:
+ # Skip the first one if it is not from human
+ source = source[1:]
+
+ conv.messages = []
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ assert role == conv.roles[j % 2], f"{i}"
+ conv.append_message(role, sentence["value"])
+ conversations.append(conv.get_prompt())
+
+ # Tokenize conversations
+
+ if has_image:
+ input_ids = torch.stack(
+ [
+ tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ for prompt in conversations
+ ],
+ dim=0,
+ )
+ else:
+ input_ids = tokenizer(
+ conversations,
+ return_tensors="pt",
+ padding="longest",
+ max_length=tokenizer.model_max_length,
+ truncation=True,
+ ).input_ids
+
+ targets = input_ids.clone()
+
+ assert conv.sep_style == conversation_lib.SeparatorStyle.TWO
+
+ # Mask targets
+ sep = conv.sep + conv.roles[1] + ": "
+ for conversation, target in zip(conversations, targets):
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
+
+ rounds = conversation.split(conv.sep2)
+ cur_len = 1
+ target[:cur_len] = IGNORE_INDEX
+ for i, rou in enumerate(rounds):
+ if rou == "":
+ break
+
+ parts = rou.split(sep)
+ if len(parts) != 2:
+ break
+ parts[0] += sep
+
+ if has_image:
+ round_len = len(tokenizer_image_token(rou, tokenizer))
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
+ else:
+ round_len = len(tokenizer(rou).input_ids)
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
+
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
+
+ cur_len += round_len
+ target[cur_len:] = IGNORE_INDEX
+
+ if cur_len < tokenizer.model_max_length:
+ if cur_len != total_len:
+ target[:] = IGNORE_INDEX
+ print(
+ f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
+ f" (ignored)"
+ )
+
+ return dict(
+ input_ids=input_ids,
+ labels=targets,
+ )
+
+
+def preprocess_mpt(
+ sources,
+ tokenizer: transformers.PreTrainedTokenizer,
+) -> Dict:
+ conv = conversation_lib.default_conversation.copy()
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
+
+ # Apply prompt templates
+ conversations = []
+ for i, source in enumerate(sources):
+ if roles[source[0]["from"]] != conv.roles[0]:
+ # Skip the first one if it is not from human
+ source = source[1:]
+
+ conv.messages = []
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ assert role == conv.roles[j % 2], f"{i}"
+ conv.append_message(role, sentence["value"])
+ conversations.append(conv.get_prompt())
+
+ # Tokenize conversations
+ input_ids = torch.stack(
+ [
+ tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ for prompt in conversations
+ ],
+ dim=0,
+ )
+ targets = input_ids.clone()
+ assert conv.sep_style == conversation_lib.SeparatorStyle.MPT
+
+ # Mask targets
+ sep = conv.sep + conv.roles[1]
+ for conversation, target in zip(conversations, targets):
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
+
+ rounds = conversation.split(conv.sep)
+ re_rounds = [conv.sep.join(rounds[:3])] # system + user + gpt
+ for conv_idx in range(3, len(rounds), 2):
+ re_rounds.append(
+ conv.sep.join(rounds[conv_idx : conv_idx + 2])
+ ) # user + gpt
+ cur_len = 0
+ target[:cur_len] = IGNORE_INDEX
+ for i, rou in enumerate(re_rounds):
+ if rou == "":
+ break
+
+ parts = rou.split(sep)
+ if len(parts) != 2:
+ break
+ parts[0] += sep
+ round_len = len(tokenizer_image_token(rou, tokenizer)) + len(
+ tokenizer_image_token(conv.sep, tokenizer)
+ )
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer))
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
+
+ cur_len += round_len
+ target[cur_len:] = IGNORE_INDEX
+
+ if cur_len < tokenizer.model_max_length:
+ if cur_len != total_len:
+ target[:] = IGNORE_INDEX
+ print(
+ f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
+ f" (ignored)"
+ )
+
+ return dict(
+ input_ids=input_ids,
+ labels=targets,
+ )
+
+
+def preprocess_plain(
+ sources: Sequence[str],
+ tokenizer: transformers.PreTrainedTokenizer,
+) -> Dict:
+ # add end signal and concatenate together
+ conversations = []
+ for source in sources:
+ assert len(source) == 2
+ assert DEFAULT_IMAGE_TOKEN in source[0]["value"]
+ source[0]["value"] = DEFAULT_IMAGE_TOKEN
+ conversation = (
+ source[0]["value"]
+ + source[1]["value"]
+ + conversation_lib.default_conversation.sep
+ )
+ conversations.append(conversation)
+ # tokenize conversations
+ input_ids = [
+ tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ for prompt in conversations
+ ]
+ targets = copy.deepcopy(input_ids)
+ for target, source in zip(targets, sources):
+ tokenized_len = len(tokenizer_image_token(source[0]["value"], tokenizer))
+ target[:tokenized_len] = IGNORE_INDEX
+
+ return dict(input_ids=input_ids, labels=targets)
+
+
+def preprocess(
+ sources: Sequence[str],
+ tokenizer: transformers.PreTrainedTokenizer,
+ has_image: bool = False,
+) -> Dict:
+ """
+ Given a list of sources, each is a conversation list. This transform:
+ 1. Add signal '### ' at the beginning each sentence, with end signal '\n';
+ 2. Concatenate conversations together;
+ 3. Tokenize the concatenated conversation;
+ 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
+ """
+ if (
+ conversation_lib.default_conversation.sep_style
+ == conversation_lib.SeparatorStyle.PLAIN
+ ):
+ return preprocess_plain(sources, tokenizer)
+ if (
+ conversation_lib.default_conversation.sep_style
+ == conversation_lib.SeparatorStyle.LLAMA_2
+ ):
+ return preprocess_llama_2(sources, tokenizer, has_image=has_image)
+ if conversation_lib.default_conversation.version.startswith("v1"):
+ return preprocess_v1(sources, tokenizer, has_image=has_image)
+ if conversation_lib.default_conversation.version == "mpt":
+ return preprocess_mpt(sources, tokenizer)
+ # add end signal and concatenate together
+ conversations = []
+ for source in sources:
+ header = f"{conversation_lib.default_conversation.system}\n\n"
+ conversation = _add_speaker_and_signal(header, source)
+ conversations.append(conversation)
+
+ # tokenize conversations
+ def get_tokenize_len(prompts):
+ return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts]
+
+ if has_image:
+ input_ids = [
+ tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ for prompt in conversations
+ ]
+ else:
+ conversations_tokenized = _tokenize_fn(conversations, tokenizer)
+ input_ids = conversations_tokenized["input_ids"]
+
+ targets = copy.deepcopy(input_ids)
+ for target, source in zip(targets, sources):
+ if has_image:
+ tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source])
+ else:
+ tokenized_lens = _tokenize_fn(
+ [header] + [s["value"] for s in source], tokenizer
+ )["input_ids_lens"]
+ speakers = [sentence["from"] for sentence in source]
+ _mask_targets(target, tokenized_lens, speakers)
+
+ return dict(input_ids=input_ids, labels=targets)
+
+
+class LazySupervisedDataset(Dataset):
+ """Dataset for supervised fine-tuning."""
+
+ def __init__(
+ self,
+ data_path: str,
+ tokenizer: transformers.PreTrainedTokenizer,
+ data_args: DataArguments,
+ ):
+ super(LazySupervisedDataset, self).__init__()
+ list_data_dict = json.load(open(data_path, "r"))
+
+ rank0_print("Formatting inputs...Skip in lazy mode")
+ self.tokenizer = tokenizer
+ self.list_data_dict = list_data_dict
+ self.data_args = data_args
+
+ def __len__(self):
+ return len(self.list_data_dict)
+
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
+ sources = self.list_data_dict[i]
+ if isinstance(i, int):
+ sources = [sources]
+ assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
+ if "image" in sources[0]:
+ image_file = self.list_data_dict[i]["image"]
+ image_folder = self.data_args.image_folder
+ processor = self.data_args.image_processor
+ image = Image.open(os.path.join(image_folder, image_file)).convert("RGB")
+ if self.data_args.image_aspect_ratio == "pad":
+
+ def expand2square(pil_img, background_color):
+ width, height = pil_img.size
+ if width == height:
+ return pil_img
+ elif width > height:
+ result = Image.new(
+ pil_img.mode, (width, width), background_color
+ )
+ result.paste(pil_img, (0, (width - height) // 2))
+ return result
+ else:
+ result = Image.new(
+ pil_img.mode, (height, height), background_color
+ )
+ result.paste(pil_img, ((height - width) // 2, 0))
+ return result
+
+ image = expand2square(
+ image, tuple(int(x * 255) for x in processor.image_mean)
+ )
+ image = processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+ else:
+ image = processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+ sources = preprocess_multimodal(
+ copy.deepcopy([e["conversations"] for e in sources]), self.data_args
+ )
+ else:
+ sources = copy.deepcopy([e["conversations"] for e in sources])
+ data_dict = preprocess(
+ sources, self.tokenizer, has_image=("image" in self.list_data_dict[i])
+ )
+ if isinstance(i, int):
+ data_dict = dict(
+ input_ids=data_dict["input_ids"][0], labels=data_dict["labels"][0]
+ )
+
+ # image exist in the data
+ if "image" in self.list_data_dict[i]:
+ data_dict["image"] = image
+ elif self.data_args.is_multimodal:
+ # image does not exist in the data, but the model is multimodal
+ crop_size = self.data_args.image_processor.crop_size
+ data_dict["image"] = torch.zeros(3, crop_size["height"], crop_size["width"])
+ return data_dict
+
+
+@dataclass
+class DataCollatorForSupervisedDataset(object):
+ """Collate examples for supervised fine-tuning."""
+
+ tokenizer: transformers.PreTrainedTokenizer
+
+ def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
+ input_ids, labels = tuple(
+ [instance[key] for instance in instances] for key in ("input_ids", "labels")
+ )
+ input_ids = torch.nn.utils.rnn.pad_sequence(
+ input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id
+ )
+ labels = torch.nn.utils.rnn.pad_sequence(
+ labels, batch_first=True, padding_value=IGNORE_INDEX
+ )
+ input_ids = input_ids[:, : self.tokenizer.model_max_length]
+ labels = labels[:, : self.tokenizer.model_max_length]
+ batch = dict(
+ input_ids=input_ids,
+ labels=labels,
+ attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
+ )
+
+ if "image" in instances[0]:
+ images = [instance["image"] for instance in instances]
+ if all(x is not None and x.shape == images[0].shape for x in images):
+ batch["images"] = torch.stack(images)
+ else:
+ batch["images"] = images
+
+ return batch
+
+
+def make_supervised_data_module(
+ tokenizer: transformers.PreTrainedTokenizer, data_args
+) -> Dict:
+ """Make dataset and collator for supervised fine-tuning."""
+ train_dataset = LazySupervisedDataset(
+ tokenizer=tokenizer, data_path=data_args.data_path, data_args=data_args
+ )
+ data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
+ return dict(
+ train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator
+ )
+
+
+def train():
+ global local_rank
+
+ parser = transformers.HfArgumentParser(
+ (ModelArguments, DataArguments, TrainingArguments)
+ )
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
+ local_rank = training_args.local_rank
+ compute_dtype = (
+ torch.float16
+ if training_args.fp16
+ else (torch.bfloat16 if training_args.bf16 else torch.float32)
+ )
+
+ bnb_model_from_pretrained_args = {}
+ if training_args.bits in [4, 8]:
+ from transformers import BitsAndBytesConfig
+
+ bnb_model_from_pretrained_args.update(
+ dict(
+ device_map={"": training_args.device},
+ load_in_4bit=training_args.bits == 4,
+ load_in_8bit=training_args.bits == 8,
+ quantization_config=BitsAndBytesConfig(
+ load_in_4bit=training_args.bits == 4,
+ load_in_8bit=training_args.bits == 8,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=compute_dtype,
+ bnb_4bit_use_double_quant=training_args.double_quant,
+ bnb_4bit_quant_type=training_args.quant_type, # {'fp4', 'nf4'}
+ ),
+ )
+ )
+
+ if model_args.vision_tower is not None:
+ if "mpt" in model_args.model_name_or_path:
+ config = transformers.AutoConfig.from_pretrained(
+ model_args.model_name_or_path, trust_remote_code=True
+ )
+ config.attn_config["attn_impl"] = training_args.mpt_attn_impl
+ model = LlavaMPTForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ config=config,
+ cache_dir=training_args.cache_dir,
+ **bnb_model_from_pretrained_args,
+ )
+ else:
+ model = LlavaLlamaForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ **bnb_model_from_pretrained_args,
+ )
+ else:
+ model = transformers.LlamaForCausalLM.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ **bnb_model_from_pretrained_args,
+ )
+ model.config.use_cache = False
+
+ if model_args.freeze_backbone:
+ model.model.requires_grad_(False)
+
+ if training_args.bits in [4, 8]:
+ from peft import prepare_model_for_kbit_training
+
+ model.config.torch_dtype = (
+ torch.float32
+ if training_args.fp16
+ else (torch.bfloat16 if training_args.bf16 else torch.float32)
+ )
+ model = prepare_model_for_kbit_training(
+ model, use_gradient_checkpointing=training_args.gradient_checkpointing
+ )
+
+ if training_args.gradient_checkpointing:
+ if hasattr(model, "enable_input_require_grads"):
+ model.enable_input_require_grads()
+ else:
+
+ def make_inputs_require_grad(module, input, output):
+ output.requires_grad_(True)
+
+ model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
+
+ if training_args.lora_enable:
+ from peft import LoraConfig, get_peft_model
+
+ lora_config = LoraConfig(
+ r=training_args.lora_r,
+ lora_alpha=training_args.lora_alpha,
+ target_modules=find_all_linear_names(model),
+ lora_dropout=training_args.lora_dropout,
+ bias=training_args.lora_bias,
+ task_type="CAUSAL_LM",
+ )
+ if training_args.bits == 16:
+ if training_args.bf16:
+ model.to(torch.bfloat16)
+ if training_args.fp16:
+ model.to(torch.float16)
+ rank0_print("Adding LoRA adapters...")
+ model = get_peft_model(model, lora_config)
+
+ if "mpt" in model_args.model_name_or_path:
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ model_max_length=training_args.model_max_length,
+ padding_side="right",
+ )
+ else:
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ model_args.model_name_or_path,
+ cache_dir=training_args.cache_dir,
+ model_max_length=training_args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+
+ if model_args.version == "v0":
+ if tokenizer.pad_token is None:
+ smart_tokenizer_and_embedding_resize(
+ special_tokens_dict=dict(pad_token="[PAD]"),
+ tokenizer=tokenizer,
+ model=model,
+ )
+ elif model_args.version == "v0.5":
+ tokenizer.pad_token = tokenizer.unk_token
+ else:
+ tokenizer.pad_token = tokenizer.unk_token
+ if model_args.version in conversation_lib.conv_templates:
+ conversation_lib.default_conversation = conversation_lib.conv_templates[
+ model_args.version
+ ]
+ else:
+ conversation_lib.default_conversation = conversation_lib.conv_templates[
+ "vicuna_v1"
+ ]
+
+ if model_args.vision_tower is not None:
+ model.get_model().initialize_vision_modules(
+ model_args=model_args, fsdp=training_args.fsdp
+ )
+
+ vision_tower = model.get_vision_tower()
+ vision_tower.to(dtype=torch.float16, device=training_args.device)
+
+ data_args.image_processor = vision_tower.image_processor
+ data_args.is_multimodal = True
+
+ model.config.image_aspect_ratio = data_args.image_aspect_ratio
+ model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
+
+ model.config.tune_mm_mlp_adapter = (
+ training_args.tune_mm_mlp_adapter
+ ) = model_args.tune_mm_mlp_adapter
+ if model_args.tune_mm_mlp_adapter:
+ model.requires_grad_(False)
+ for p in model.get_model().mm_projector.parameters():
+ p.requires_grad = True
+
+ model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter
+ if training_args.freeze_mm_mlp_adapter:
+ for p in model.get_model().mm_projector.parameters():
+ p.requires_grad = False
+
+ if training_args.bits in [4, 8]:
+ model.get_model().mm_projector.to(
+ dtype=compute_dtype, device=training_args.device
+ )
+
+ model.config.mm_use_im_start_end = (
+ data_args.mm_use_im_start_end
+ ) = model_args.mm_use_im_start_end
+ training_args.use_im_start_end = model_args.mm_use_im_start_end
+ model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token
+ model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer)
+
+ if training_args.bits in [4, 8]:
+ from peft.tuners.lora import LoraLayer
+
+ for name, module in model.named_modules():
+ if isinstance(module, LoraLayer):
+ if training_args.bf16:
+ module = module.to(torch.bfloat16)
+ if "norm" in name:
+ module = module.to(torch.float32)
+ if "lm_head" in name or "embed_tokens" in name:
+ if hasattr(module, "weight"):
+ if training_args.bf16 and module.weight.dtype == torch.float32:
+ module = module.to(torch.bfloat16)
+
+ data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
+ trainer = LLaVATrainer(
+ model=model, tokenizer=tokenizer, args=training_args, **data_module
+ )
+
+ if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
+ trainer.train(resume_from_checkpoint=True)
+ else:
+ trainer.train()
+ trainer.save_state()
+
+ model.config.use_cache = True
+
+ if training_args.lora_enable:
+ state_dict = get_peft_state_maybe_zero_3(
+ model.named_parameters(), training_args.lora_bias
+ )
+ non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(
+ model.named_parameters()
+ )
+ if training_args.local_rank == 0 or training_args.local_rank == -1:
+ model.config.save_pretrained(training_args.output_dir)
+ model.save_pretrained(training_args.output_dir, state_dict=state_dict)
+ torch.save(
+ non_lora_state_dict,
+ os.path.join(training_args.output_dir, "non_lora_trainables.bin"),
+ )
+ else:
+ safe_save_model_for_hf_trainer(
+ trainer=trainer, output_dir=training_args.output_dir
+ )
+
+
+if __name__ == "__main__":
+ train()
diff --git a/model/llava/train/train_mem.py b/model/llava/train/train_mem.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3940cf7fea248d055a9cb333a08ebca0f782885
--- /dev/null
+++ b/model/llava/train/train_mem.py
@@ -0,0 +1,14 @@
+# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
+# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
+# Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
+
+# Need to call this before importing transformers.
+from llava.train.llama_flash_attn_monkey_patch import \
+ replace_llama_attn_with_flash_attn
+
+replace_llama_attn_with_flash_attn()
+
+from llava.train.train import train
+
+if __name__ == "__main__":
+ train()
diff --git a/model/llava/utils.py b/model/llava/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a2d5fd533ded77352f5548a0ed027b700365ea4
--- /dev/null
+++ b/model/llava/utils.py
@@ -0,0 +1,134 @@
+import datetime
+import logging
+import logging.handlers
+import os
+import sys
+
+import requests
+from llava.constants import LOGDIR
+
+server_error_msg = (
+ "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
+)
+moderation_msg = (
+ "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
+)
+
+handler = None
+
+
+def build_logger(logger_name, logger_filename):
+ global handler
+
+ formatter = logging.Formatter(
+ fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ )
+
+ # Set the format of root handlers
+ if not logging.getLogger().handlers:
+ logging.basicConfig(level=logging.INFO)
+ logging.getLogger().handlers[0].setFormatter(formatter)
+
+ # Redirect stdout and stderr to loggers
+ stdout_logger = logging.getLogger("stdout")
+ stdout_logger.setLevel(logging.INFO)
+ sl = StreamToLogger(stdout_logger, logging.INFO)
+ sys.stdout = sl
+
+ stderr_logger = logging.getLogger("stderr")
+ stderr_logger.setLevel(logging.ERROR)
+ sl = StreamToLogger(stderr_logger, logging.ERROR)
+ sys.stderr = sl
+
+ # Get logger
+ logger = logging.getLogger(logger_name)
+ logger.setLevel(logging.INFO)
+
+ # Add a file handler for all loggers
+ if handler is None:
+ os.makedirs(LOGDIR, exist_ok=True)
+ filename = os.path.join(LOGDIR, logger_filename)
+ handler = logging.handlers.TimedRotatingFileHandler(
+ filename, when="D", utc=True
+ )
+ handler.setFormatter(formatter)
+
+ for name, item in logging.root.manager.loggerDict.items():
+ if isinstance(item, logging.Logger):
+ item.addHandler(handler)
+
+ return logger
+
+
+class StreamToLogger(object):
+ """
+ Fake file-like stream object that redirects writes to a logger instance.
+ """
+
+ def __init__(self, logger, log_level=logging.INFO):
+ self.terminal = sys.stdout
+ self.logger = logger
+ self.log_level = log_level
+ self.linebuf = ""
+
+ def __getattr__(self, attr):
+ return getattr(self.terminal, attr)
+
+ def write(self, buf):
+ temp_linebuf = self.linebuf + buf
+ self.linebuf = ""
+ for line in temp_linebuf.splitlines(True):
+ # From the io.TextIOWrapper docs:
+ # On output, if newline is None, any '\n' characters written
+ # are translated to the system default line separator.
+ # By default sys.stdout.write() expects '\n' newlines and then
+ # translates them so this is still cross platform.
+ if line[-1] == "\n":
+ self.logger.log(self.log_level, line.rstrip())
+ else:
+ self.linebuf += line
+
+ def flush(self):
+ if self.linebuf != "":
+ self.logger.log(self.log_level, self.linebuf.rstrip())
+ self.linebuf = ""
+
+
+def disable_torch_init():
+ """
+ Disable the redundant torch default initialization to accelerate model creation.
+ """
+ import torch
+
+ setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
+ setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
+
+
+def violates_moderation(text):
+ """
+ Check whether the text violates OpenAI moderation API.
+ """
+ url = "https://api.openai.com/v1/moderations"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer " + os.environ["OPENAI_API_KEY"],
+ }
+ text = text.replace("\n", "")
+ data = "{" + '"input": ' + f'"{text}"' + "}"
+ data = data.encode("utf-8")
+ try:
+ ret = requests.post(url, headers=headers, data=data, timeout=5)
+ flagged = ret.json()["results"][0]["flagged"]
+ except requests.exceptions.RequestException as e:
+ flagged = False
+ except KeyError as e:
+ flagged = False
+
+ return flagged
+
+
+def pretty_print_semaphore(semaphore):
+ if semaphore is None:
+ return "None"
+ return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
diff --git a/model/segment_anything/__init__.py b/model/segment_anything/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e66218b2edd8754f1546ad1dca8b604ce891c365
--- /dev/null
+++ b/model/segment_anything/__init__.py
@@ -0,0 +1,10 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from .automatic_mask_generator import SamAutomaticMaskGenerator
+from .build_sam import (build_sam, build_sam_vit_b, build_sam_vit_h,
+ build_sam_vit_l, sam_model_registry)
+from .predictor import SamPredictor
diff --git a/model/segment_anything/__pycache__/__init__.cpython-39.pyc b/model/segment_anything/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f075522e70bfb61b378c353320fc277652f579c
Binary files /dev/null and b/model/segment_anything/__pycache__/__init__.cpython-39.pyc differ
diff --git a/model/segment_anything/__pycache__/automatic_mask_generator.cpython-39.pyc b/model/segment_anything/__pycache__/automatic_mask_generator.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff1a779c202bf20f02d7a2912e36c9ad9152534d
Binary files /dev/null and b/model/segment_anything/__pycache__/automatic_mask_generator.cpython-39.pyc differ
diff --git a/model/segment_anything/__pycache__/build_sam.cpython-39.pyc b/model/segment_anything/__pycache__/build_sam.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fded98301b99113f15493dc02a623dc6e0c848bb
Binary files /dev/null and b/model/segment_anything/__pycache__/build_sam.cpython-39.pyc differ
diff --git a/model/segment_anything/__pycache__/predictor.cpython-39.pyc b/model/segment_anything/__pycache__/predictor.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d990398cc09ec1af474f3861bf23964af57f048c
Binary files /dev/null and b/model/segment_anything/__pycache__/predictor.cpython-39.pyc differ
diff --git a/model/segment_anything/automatic_mask_generator.py b/model/segment_anything/automatic_mask_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa4bc4f0324cf7f91ded55a0993b51deeec41537
--- /dev/null
+++ b/model/segment_anything/automatic_mask_generator.py
@@ -0,0 +1,372 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Any, Dict, List, Optional, Tuple
+
+import numpy as np
+import torch
+from torchvision.ops.boxes import batched_nms, box_area # type: ignore
+
+from .modeling import Sam
+from .predictor import SamPredictor
+from .utils.amg import (MaskData, area_from_rle, batch_iterator,
+ batched_mask_to_box, box_xyxy_to_xywh,
+ build_all_layer_point_grids, calculate_stability_score,
+ coco_encode_rle, generate_crop_boxes,
+ is_box_near_crop_edge, mask_to_rle_pytorch,
+ remove_small_regions, rle_to_mask, uncrop_boxes_xyxy,
+ uncrop_masks, uncrop_points)
+
+
+class SamAutomaticMaskGenerator:
+ def __init__(
+ self,
+ model: Sam,
+ points_per_side: Optional[int] = 32,
+ points_per_batch: int = 64,
+ pred_iou_thresh: float = 0.88,
+ stability_score_thresh: float = 0.95,
+ stability_score_offset: float = 1.0,
+ box_nms_thresh: float = 0.7,
+ crop_n_layers: int = 0,
+ crop_nms_thresh: float = 0.7,
+ crop_overlap_ratio: float = 512 / 1500,
+ crop_n_points_downscale_factor: int = 1,
+ point_grids: Optional[List[np.ndarray]] = None,
+ min_mask_region_area: int = 0,
+ output_mode: str = "binary_mask",
+ ) -> None:
+ """
+ Using a SAM model, generates masks for the entire image.
+ Generates a grid of point prompts over the image, then filters
+ low quality and duplicate masks. The default settings are chosen
+ for SAM with a ViT-H backbone.
+
+ Arguments:
+ model (Sam): The SAM model to use for mask prediction.
+ points_per_side (int or None): The number of points to be sampled
+ along one side of the image. The total number of points is
+ points_per_side**2. If None, 'point_grids' must provide explicit
+ point sampling.
+ points_per_batch (int): Sets the number of points run simultaneously
+ by the model. Higher numbers may be faster but use more GPU memory.
+ pred_iou_thresh (float): A filtering threshold in [0,1], using the
+ model's predicted mask quality.
+ stability_score_thresh (float): A filtering threshold in [0,1], using
+ the stability of the mask under changes to the cutoff used to binarize
+ the model's mask predictions.
+ stability_score_offset (float): The amount to shift the cutoff when
+ calculated the stability score.
+ box_nms_thresh (float): The box IoU cutoff used by non-maximal
+ suppression to filter duplicate masks.
+ crop_n_layers (int): If >0, mask prediction will be run again on
+ crops of the image. Sets the number of layers to run, where each
+ layer has 2**i_layer number of image crops.
+ crop_nms_thresh (float): The box IoU cutoff used by non-maximal
+ suppression to filter duplicate masks between different crops.
+ crop_overlap_ratio (float): Sets the degree to which crops overlap.
+ In the first crop layer, crops will overlap by this fraction of
+ the image length. Later layers with more crops scale down this overlap.
+ crop_n_points_downscale_factor (int): The number of points-per-side
+ sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
+ point_grids (list(np.ndarray) or None): A list over explicit grids
+ of points used for sampling, normalized to [0,1]. The nth grid in the
+ list is used in the nth crop layer. Exclusive with points_per_side.
+ min_mask_region_area (int): If >0, postprocessing will be applied
+ to remove disconnected regions and holes in masks with area smaller
+ than min_mask_region_area. Requires opencv.
+ output_mode (str): The form masks are returned in. Can be 'binary_mask',
+ 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
+ For large resolutions, 'binary_mask' may consume large amounts of
+ memory.
+ """
+
+ assert (points_per_side is None) != (
+ point_grids is None
+ ), "Exactly one of points_per_side or point_grid must be provided."
+ if points_per_side is not None:
+ self.point_grids = build_all_layer_point_grids(
+ points_per_side,
+ crop_n_layers,
+ crop_n_points_downscale_factor,
+ )
+ elif point_grids is not None:
+ self.point_grids = point_grids
+ else:
+ raise ValueError("Can't have both points_per_side and point_grid be None.")
+
+ assert output_mode in [
+ "binary_mask",
+ "uncompressed_rle",
+ "coco_rle",
+ ], f"Unknown output_mode {output_mode}."
+ if output_mode == "coco_rle":
+ from pycocotools import \
+ mask as mask_utils # type: ignore # noqa: F401
+
+ if min_mask_region_area > 0:
+ import cv2 # type: ignore # noqa: F401
+
+ self.predictor = SamPredictor(model)
+ self.points_per_batch = points_per_batch
+ self.pred_iou_thresh = pred_iou_thresh
+ self.stability_score_thresh = stability_score_thresh
+ self.stability_score_offset = stability_score_offset
+ self.box_nms_thresh = box_nms_thresh
+ self.crop_n_layers = crop_n_layers
+ self.crop_nms_thresh = crop_nms_thresh
+ self.crop_overlap_ratio = crop_overlap_ratio
+ self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
+ self.min_mask_region_area = min_mask_region_area
+ self.output_mode = output_mode
+
+ @torch.no_grad()
+ def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
+ """
+ Generates masks for the given image.
+
+ Arguments:
+ image (np.ndarray): The image to generate masks for, in HWC uint8 format.
+
+ Returns:
+ list(dict(str, any)): A list over records for masks. Each record is
+ a dict containing the following keys:
+ segmentation (dict(str, any) or np.ndarray): The mask. If
+ output_mode='binary_mask', is an array of shape HW. Otherwise,
+ is a dictionary containing the RLE.
+ bbox (list(float)): The box around the mask, in XYWH format.
+ area (int): The area in pixels of the mask.
+ predicted_iou (float): The model's own prediction of the mask's
+ quality. This is filtered by the pred_iou_thresh parameter.
+ point_coords (list(list(float))): The point coordinates input
+ to the model to generate this mask.
+ stability_score (float): A measure of the mask's quality. This
+ is filtered on using the stability_score_thresh parameter.
+ crop_box (list(float)): The crop of the image used to generate
+ the mask, given in XYWH format.
+ """
+
+ # Generate masks
+ mask_data = self._generate_masks(image)
+
+ # Filter small disconnected regions and holes in masks
+ if self.min_mask_region_area > 0:
+ mask_data = self.postprocess_small_regions(
+ mask_data,
+ self.min_mask_region_area,
+ max(self.box_nms_thresh, self.crop_nms_thresh),
+ )
+
+ # Encode masks
+ if self.output_mode == "coco_rle":
+ mask_data["segmentations"] = [
+ coco_encode_rle(rle) for rle in mask_data["rles"]
+ ]
+ elif self.output_mode == "binary_mask":
+ mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
+ else:
+ mask_data["segmentations"] = mask_data["rles"]
+
+ # Write mask records
+ curr_anns = []
+ for idx in range(len(mask_data["segmentations"])):
+ ann = {
+ "segmentation": mask_data["segmentations"][idx],
+ "area": area_from_rle(mask_data["rles"][idx]),
+ "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
+ "predicted_iou": mask_data["iou_preds"][idx].item(),
+ "point_coords": [mask_data["points"][idx].tolist()],
+ "stability_score": mask_data["stability_score"][idx].item(),
+ "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
+ }
+ curr_anns.append(ann)
+
+ return curr_anns
+
+ def _generate_masks(self, image: np.ndarray) -> MaskData:
+ orig_size = image.shape[:2]
+ crop_boxes, layer_idxs = generate_crop_boxes(
+ orig_size, self.crop_n_layers, self.crop_overlap_ratio
+ )
+
+ # Iterate over image crops
+ data = MaskData()
+ for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
+ crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
+ data.cat(crop_data)
+
+ # Remove duplicate masks between crops
+ if len(crop_boxes) > 1:
+ # Prefer masks from smaller crops
+ scores = 1 / box_area(data["crop_boxes"])
+ scores = scores.to(data["boxes"].device)
+ keep_by_nms = batched_nms(
+ data["boxes"].float(),
+ scores,
+ torch.zeros_like(data["boxes"][:, 0]), # categories
+ iou_threshold=self.crop_nms_thresh,
+ )
+ data.filter(keep_by_nms)
+
+ data.to_numpy()
+ return data
+
+ def _process_crop(
+ self,
+ image: np.ndarray,
+ crop_box: List[int],
+ crop_layer_idx: int,
+ orig_size: Tuple[int, ...],
+ ) -> MaskData:
+ # Crop the image and calculate embeddings
+ x0, y0, x1, y1 = crop_box
+ cropped_im = image[y0:y1, x0:x1, :]
+ cropped_im_size = cropped_im.shape[:2]
+ self.predictor.set_image(cropped_im)
+
+ # Get points for this crop
+ points_scale = np.array(cropped_im_size)[None, ::-1]
+ points_for_image = self.point_grids[crop_layer_idx] * points_scale
+
+ # Generate masks for this crop in batches
+ data = MaskData()
+ for (points,) in batch_iterator(self.points_per_batch, points_for_image):
+ batch_data = self._process_batch(
+ points, cropped_im_size, crop_box, orig_size
+ )
+ data.cat(batch_data)
+ del batch_data
+ self.predictor.reset_image()
+
+ # Remove duplicates within this crop.
+ keep_by_nms = batched_nms(
+ data["boxes"].float(),
+ data["iou_preds"],
+ torch.zeros_like(data["boxes"][:, 0]), # categories
+ iou_threshold=self.box_nms_thresh,
+ )
+ data.filter(keep_by_nms)
+
+ # Return to the original image frame
+ data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
+ data["points"] = uncrop_points(data["points"], crop_box)
+ data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
+
+ return data
+
+ def _process_batch(
+ self,
+ points: np.ndarray,
+ im_size: Tuple[int, ...],
+ crop_box: List[int],
+ orig_size: Tuple[int, ...],
+ ) -> MaskData:
+ orig_h, orig_w = orig_size
+
+ # Run model on this batch
+ transformed_points = self.predictor.transform.apply_coords(points, im_size)
+ in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
+ in_labels = torch.ones(
+ in_points.shape[0], dtype=torch.int, device=in_points.device
+ )
+ masks, iou_preds, _ = self.predictor.predict_torch(
+ in_points[:, None, :],
+ in_labels[:, None],
+ multimask_output=True,
+ return_logits=True,
+ )
+
+ # Serialize predictions and store in MaskData
+ data = MaskData(
+ masks=masks.flatten(0, 1),
+ iou_preds=iou_preds.flatten(0, 1),
+ points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
+ )
+ del masks
+
+ # Filter by predicted IoU
+ if self.pred_iou_thresh > 0.0:
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
+ data.filter(keep_mask)
+
+ # Calculate stability score
+ data["stability_score"] = calculate_stability_score(
+ data["masks"],
+ self.predictor.model.mask_threshold,
+ self.stability_score_offset,
+ )
+ if self.stability_score_thresh > 0.0:
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
+ data.filter(keep_mask)
+
+ # Threshold masks and calculate boxes
+ data["masks"] = data["masks"] > self.predictor.model.mask_threshold
+ data["boxes"] = batched_mask_to_box(data["masks"])
+
+ # Filter boxes that touch crop boundaries
+ keep_mask = ~is_box_near_crop_edge(
+ data["boxes"], crop_box, [0, 0, orig_w, orig_h]
+ )
+ if not torch.all(keep_mask):
+ data.filter(keep_mask)
+
+ # Compress to RLE
+ data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
+ data["rles"] = mask_to_rle_pytorch(data["masks"])
+ del data["masks"]
+
+ return data
+
+ @staticmethod
+ def postprocess_small_regions(
+ mask_data: MaskData, min_area: int, nms_thresh: float
+ ) -> MaskData:
+ """
+ Removes small disconnected regions and holes in masks, then reruns
+ box NMS to remove any new duplicates.
+
+ Edits mask_data in place.
+
+ Requires open-cv as a dependency.
+ """
+ if len(mask_data["rles"]) == 0:
+ return mask_data
+
+ # Filter small disconnected regions and holes
+ new_masks = []
+ scores = []
+ for rle in mask_data["rles"]:
+ mask = rle_to_mask(rle)
+
+ mask, changed = remove_small_regions(mask, min_area, mode="holes")
+ unchanged = not changed
+ mask, changed = remove_small_regions(mask, min_area, mode="islands")
+ unchanged = unchanged and not changed
+
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
+ # Give score=0 to changed masks and score=1 to unchanged masks
+ # so NMS will prefer ones that didn't need postprocessing
+ scores.append(float(unchanged))
+
+ # Recalculate boxes and remove any new duplicates
+ masks = torch.cat(new_masks, dim=0)
+ boxes = batched_mask_to_box(masks)
+ keep_by_nms = batched_nms(
+ boxes.float(),
+ torch.as_tensor(scores),
+ torch.zeros_like(boxes[:, 0]), # categories
+ iou_threshold=nms_thresh,
+ )
+
+ # Only recalculate RLEs for masks that have changed
+ for i_mask in keep_by_nms:
+ if scores[i_mask] == 0.0:
+ mask_torch = masks[i_mask].unsqueeze(0)
+ mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
+ mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
+ mask_data.filter(keep_by_nms)
+
+ return mask_data
diff --git a/model/segment_anything/build_sam.py b/model/segment_anything/build_sam.py
new file mode 100644
index 0000000000000000000000000000000000000000..788d25ad5a6fd32c112201301b320f5884d6e8e8
--- /dev/null
+++ b/model/segment_anything/build_sam.py
@@ -0,0 +1,108 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from functools import partial
+
+import torch
+
+from .modeling import (ImageEncoderViT, MaskDecoder, PromptEncoder, Sam,
+ TwoWayTransformer)
+
+
+def build_sam_vit_h(checkpoint=None):
+ return _build_sam(
+ encoder_embed_dim=1280,
+ encoder_depth=32,
+ encoder_num_heads=16,
+ encoder_global_attn_indexes=[7, 15, 23, 31],
+ checkpoint=checkpoint,
+ )
+
+
+build_sam = build_sam_vit_h
+
+
+def build_sam_vit_l(checkpoint=None):
+ return _build_sam(
+ encoder_embed_dim=1024,
+ encoder_depth=24,
+ encoder_num_heads=16,
+ encoder_global_attn_indexes=[5, 11, 17, 23],
+ checkpoint=checkpoint,
+ )
+
+
+def build_sam_vit_b(checkpoint=None):
+ return _build_sam(
+ encoder_embed_dim=768,
+ encoder_depth=12,
+ encoder_num_heads=12,
+ encoder_global_attn_indexes=[2, 5, 8, 11],
+ checkpoint=checkpoint,
+ )
+
+
+sam_model_registry = {
+ "default": build_sam_vit_h,
+ "vit_h": build_sam_vit_h,
+ "vit_l": build_sam_vit_l,
+ "vit_b": build_sam_vit_b,
+}
+
+
+def _build_sam(
+ encoder_embed_dim,
+ encoder_depth,
+ encoder_num_heads,
+ encoder_global_attn_indexes,
+ checkpoint=None,
+):
+ prompt_embed_dim = 256
+ image_size = 1024
+ vit_patch_size = 16
+ image_embedding_size = image_size // vit_patch_size
+ sam = Sam(
+ image_encoder=ImageEncoderViT(
+ depth=encoder_depth,
+ embed_dim=encoder_embed_dim,
+ img_size=image_size,
+ mlp_ratio=4,
+ norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
+ num_heads=encoder_num_heads,
+ patch_size=vit_patch_size,
+ qkv_bias=True,
+ use_rel_pos=True,
+ global_attn_indexes=encoder_global_attn_indexes,
+ window_size=14,
+ out_chans=prompt_embed_dim,
+ ),
+ prompt_encoder=PromptEncoder(
+ embed_dim=prompt_embed_dim,
+ image_embedding_size=(image_embedding_size, image_embedding_size),
+ input_image_size=(image_size, image_size),
+ mask_in_chans=16,
+ ),
+ mask_decoder=MaskDecoder(
+ num_multimask_outputs=3,
+ transformer=TwoWayTransformer(
+ depth=2,
+ embedding_dim=prompt_embed_dim,
+ mlp_dim=2048,
+ num_heads=8,
+ ),
+ transformer_dim=prompt_embed_dim,
+ iou_head_depth=3,
+ iou_head_hidden_dim=256,
+ ),
+ pixel_mean=[123.675, 116.28, 103.53],
+ pixel_std=[58.395, 57.12, 57.375],
+ )
+ sam.eval()
+ if checkpoint is not None:
+ with open(checkpoint, "rb") as f:
+ state_dict = torch.load(f)
+ sam.load_state_dict(state_dict, strict=False)
+ return sam
diff --git a/model/segment_anything/modeling/__init__.py b/model/segment_anything/modeling/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..088af386e5b45d14e99d11dec132821ddba5df39
--- /dev/null
+++ b/model/segment_anything/modeling/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from .image_encoder import ImageEncoderViT
+from .mask_decoder import MaskDecoder
+from .prompt_encoder import PromptEncoder
+from .sam import Sam
+from .transformer import TwoWayTransformer
diff --git a/model/segment_anything/modeling/__pycache__/__init__.cpython-39.pyc b/model/segment_anything/modeling/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e091e2e181f5892edd3343487eed09b9e8b13c3
Binary files /dev/null and b/model/segment_anything/modeling/__pycache__/__init__.cpython-39.pyc differ
diff --git a/model/segment_anything/modeling/__pycache__/common.cpython-39.pyc b/model/segment_anything/modeling/__pycache__/common.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3cbac8c3fc868c89f06178bd6ee8396486820bb8
Binary files /dev/null and b/model/segment_anything/modeling/__pycache__/common.cpython-39.pyc differ
diff --git a/model/segment_anything/modeling/__pycache__/image_encoder.cpython-39.pyc b/model/segment_anything/modeling/__pycache__/image_encoder.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7922e2eda5a6033d7aad2c0157eb68535026facc
Binary files /dev/null and b/model/segment_anything/modeling/__pycache__/image_encoder.cpython-39.pyc differ
diff --git a/model/segment_anything/modeling/__pycache__/mask_decoder.cpython-39.pyc b/model/segment_anything/modeling/__pycache__/mask_decoder.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9bdfb05f1923112928e7115762401bf25d6494b9
Binary files /dev/null and b/model/segment_anything/modeling/__pycache__/mask_decoder.cpython-39.pyc differ
diff --git a/model/segment_anything/modeling/__pycache__/prompt_encoder.cpython-39.pyc b/model/segment_anything/modeling/__pycache__/prompt_encoder.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..393b6c403c7072ad8a18e0711f700880eff8e67c
Binary files /dev/null and b/model/segment_anything/modeling/__pycache__/prompt_encoder.cpython-39.pyc differ
diff --git a/model/segment_anything/modeling/__pycache__/sam.cpython-39.pyc b/model/segment_anything/modeling/__pycache__/sam.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29b82bd2f8728c457310207239f8ec27fba8208d
Binary files /dev/null and b/model/segment_anything/modeling/__pycache__/sam.cpython-39.pyc differ
diff --git a/model/segment_anything/modeling/__pycache__/transformer.cpython-39.pyc b/model/segment_anything/modeling/__pycache__/transformer.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56193f3963267f57d4afdb623f5ed9d4a6a648bc
Binary files /dev/null and b/model/segment_anything/modeling/__pycache__/transformer.cpython-39.pyc differ
diff --git a/model/segment_anything/modeling/common.py b/model/segment_anything/modeling/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8727816d4861a2d0c7c367879951d1d4fa791fb
--- /dev/null
+++ b/model/segment_anything/modeling/common.py
@@ -0,0 +1,43 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Type
+
+import torch
+import torch.nn as nn
+
+
+class MLPBlock(nn.Module):
+ def __init__(
+ self,
+ embedding_dim: int,
+ mlp_dim: int,
+ act: Type[nn.Module] = nn.GELU,
+ ) -> None:
+ super().__init__()
+ self.lin1 = nn.Linear(embedding_dim, mlp_dim)
+ self.lin2 = nn.Linear(mlp_dim, embedding_dim)
+ self.act = act()
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ return self.lin2(self.act(self.lin1(x)))
+
+
+# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
+# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
+class LayerNorm2d(nn.Module):
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(num_channels))
+ self.bias = nn.Parameter(torch.zeros(num_channels))
+ self.eps = eps
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ u = x.mean(1, keepdim=True)
+ s = (x - u).pow(2).mean(1, keepdim=True)
+ x = (x - u) / torch.sqrt(s + self.eps)
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
+ return x
diff --git a/model/segment_anything/modeling/image_encoder.py b/model/segment_anything/modeling/image_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..b472a3d6b7a609134afe18d7f8740e0c01a56842
--- /dev/null
+++ b/model/segment_anything/modeling/image_encoder.py
@@ -0,0 +1,426 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Optional, Tuple, Type
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from .common import LayerNorm2d, MLPBlock
+
+
+# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
+class ImageEncoderViT(nn.Module):
+ def __init__(
+ self,
+ img_size: int = 1024,
+ patch_size: int = 16,
+ in_chans: int = 3,
+ embed_dim: int = 768,
+ depth: int = 12,
+ num_heads: int = 12,
+ mlp_ratio: float = 4.0,
+ out_chans: int = 256,
+ qkv_bias: bool = True,
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
+ act_layer: Type[nn.Module] = nn.GELU,
+ use_abs_pos: bool = True,
+ use_rel_pos: bool = False,
+ rel_pos_zero_init: bool = True,
+ window_size: int = 0,
+ global_attn_indexes: Tuple[int, ...] = (),
+ ) -> None:
+ """
+ Args:
+ img_size (int): Input image size.
+ patch_size (int): Patch size.
+ in_chans (int): Number of input image channels.
+ embed_dim (int): Patch embedding dimension.
+ depth (int): Depth of ViT.
+ num_heads (int): Number of attention heads in each ViT block.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
+ norm_layer (nn.Module): Normalization layer.
+ act_layer (nn.Module): Activation layer.
+ use_abs_pos (bool): If True, use absolute positional embeddings.
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
+ window_size (int): Window size for window attention blocks.
+ global_attn_indexes (list): Indexes for blocks using global attention.
+ """
+ super().__init__()
+ self.img_size = img_size
+ self.embed_dim = embed_dim
+ self.out_chans = out_chans
+
+ self.patch_embed = PatchEmbed(
+ kernel_size=(patch_size, patch_size),
+ stride=(patch_size, patch_size),
+ in_chans=in_chans,
+ embed_dim=embed_dim,
+ )
+
+ self.pos_embed: Optional[nn.Parameter] = None
+ if use_abs_pos:
+ # Initialize absolute positional embedding with pretrain image size.
+ self.pos_embed = nn.Parameter(
+ torch.zeros(
+ 1, img_size // patch_size, img_size // patch_size, embed_dim
+ )
+ )
+
+ self.blocks = nn.ModuleList()
+ for i in range(depth):
+ block = Block(
+ dim=embed_dim,
+ num_heads=num_heads,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ norm_layer=norm_layer,
+ act_layer=act_layer,
+ use_rel_pos=use_rel_pos,
+ rel_pos_zero_init=rel_pos_zero_init,
+ window_size=window_size if i not in global_attn_indexes else 0,
+ input_size=(img_size // patch_size, img_size // patch_size),
+ )
+ self.blocks.append(block)
+
+ self.neck = nn.Sequential(
+ nn.Conv2d(
+ embed_dim,
+ out_chans,
+ kernel_size=1,
+ bias=False,
+ ),
+ LayerNorm2d(out_chans),
+ nn.Conv2d(
+ out_chans,
+ out_chans,
+ kernel_size=3,
+ padding=1,
+ bias=False,
+ ),
+ LayerNorm2d(out_chans),
+ )
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.patch_embed(x)
+ if self.pos_embed is not None:
+ x = x + self.pos_embed
+
+ for blk in self.blocks:
+ x = blk(x)
+
+ dtype = x.dtype
+ if dtype == torch.float16: # prevent overflow
+ with torch.autocast(device_type="cuda", dtype=torch.float32):
+ x = self.neck(x.permute(0, 3, 1, 2))
+ x = x.to(dtype)
+ else:
+ x = self.neck(x.permute(0, 3, 1, 2))
+ return x
+
+
+class Block(nn.Module):
+ """Transformer blocks with support of window attention and residual propagation blocks"""
+
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int,
+ mlp_ratio: float = 4.0,
+ qkv_bias: bool = True,
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
+ act_layer: Type[nn.Module] = nn.GELU,
+ use_rel_pos: bool = False,
+ rel_pos_zero_init: bool = True,
+ window_size: int = 0,
+ input_size: Optional[Tuple[int, int]] = None,
+ ) -> None:
+ """
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads in each ViT block.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
+ norm_layer (nn.Module): Normalization layer.
+ act_layer (nn.Module): Activation layer.
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
+ window_size (int): Window size for window attention blocks. If it equals 0, then
+ use global attention.
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
+ positional parameter size.
+ """
+ super().__init__()
+ self.norm1 = norm_layer(dim)
+ self.attn = Attention(
+ dim,
+ num_heads=num_heads,
+ qkv_bias=qkv_bias,
+ use_rel_pos=use_rel_pos,
+ rel_pos_zero_init=rel_pos_zero_init,
+ input_size=input_size if window_size == 0 else (window_size, window_size),
+ )
+
+ self.norm2 = norm_layer(dim)
+ self.mlp = MLPBlock(
+ embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer
+ )
+
+ self.window_size = window_size
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ shortcut = x
+ x = self.norm1(x)
+ # Window partition
+ if self.window_size > 0:
+ H, W = x.shape[1], x.shape[2]
+ x, pad_hw = window_partition(x, self.window_size)
+
+ x = self.attn(x)
+ # Reverse window partition
+ if self.window_size > 0:
+ x = window_unpartition(x, self.window_size, pad_hw, (H, W))
+
+ x = shortcut + x
+ x = x + self.mlp(self.norm2(x))
+
+ return x
+
+
+class Attention(nn.Module):
+ """Multi-head Attention block with relative position embeddings."""
+
+ def __init__(
+ self,
+ dim: int,
+ num_heads: int = 8,
+ qkv_bias: bool = True,
+ use_rel_pos: bool = False,
+ rel_pos_zero_init: bool = True,
+ input_size: Optional[Tuple[int, int]] = None,
+ ) -> None:
+ """
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
+ rel_pos (bool): If True, add relative positional embeddings to the attention map.
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
+ positional parameter size.
+ """
+ super().__init__()
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.scale = head_dim**-0.5
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.proj = nn.Linear(dim, dim)
+
+ self.use_rel_pos = use_rel_pos
+ if self.use_rel_pos:
+ assert (
+ input_size is not None
+ ), "Input size must be provided if using relative positional encoding."
+ # initialize relative positional embeddings
+ self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
+ self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ B, H, W, _ = x.shape
+ # qkv with shape (3, B, nHead, H * W, C)
+ qkv = (
+ self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
+ )
+ # q, k, v with shape (B * nHead, H * W, C)
+ q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
+
+ attn = (q * self.scale) @ k.transpose(-2, -1)
+
+ if self.use_rel_pos:
+ attn = add_decomposed_rel_pos(
+ attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)
+ )
+
+ attn = attn.softmax(dim=-1)
+ x = (
+ (attn @ v)
+ .view(B, self.num_heads, H, W, -1)
+ .permute(0, 2, 3, 1, 4)
+ .reshape(B, H, W, -1)
+ )
+ x = self.proj(x)
+
+ return x
+
+
+def window_partition(
+ x: torch.Tensor, window_size: int
+) -> Tuple[torch.Tensor, Tuple[int, int]]:
+ """
+ Partition into non-overlapping windows with padding if needed.
+ Args:
+ x (tensor): input tokens with [B, H, W, C].
+ window_size (int): window size.
+
+ Returns:
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
+ (Hp, Wp): padded height and width before partition
+ """
+ B, H, W, C = x.shape
+
+ pad_h = (window_size - H % window_size) % window_size
+ pad_w = (window_size - W % window_size) % window_size
+ if pad_h > 0 or pad_w > 0:
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
+ Hp, Wp = H + pad_h, W + pad_w
+
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
+ windows = (
+ x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
+ )
+ return windows, (Hp, Wp)
+
+
+def window_unpartition(
+ windows: torch.Tensor,
+ window_size: int,
+ pad_hw: Tuple[int, int],
+ hw: Tuple[int, int],
+) -> torch.Tensor:
+ """
+ Window unpartition into original sequences and removing padding.
+ Args:
+ windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
+ window_size (int): window size.
+ pad_hw (Tuple): padded height and width (Hp, Wp).
+ hw (Tuple): original height and width (H, W) before padding.
+
+ Returns:
+ x: unpartitioned sequences with [B, H, W, C].
+ """
+ Hp, Wp = pad_hw
+ H, W = hw
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
+ x = windows.view(
+ B, Hp // window_size, Wp // window_size, window_size, window_size, -1
+ )
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
+
+ if Hp > H or Wp > W:
+ x = x[:, :H, :W, :].contiguous()
+ return x
+
+
+def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
+ """
+ Get relative positional embeddings according to the relative positions of
+ query and key sizes.
+ Args:
+ q_size (int): size of query q.
+ k_size (int): size of key k.
+ rel_pos (Tensor): relative position embeddings (L, C).
+
+ Returns:
+ Extracted positional embeddings according to relative positions.
+ """
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
+ # Interpolate rel pos if needed.
+ if rel_pos.shape[0] != max_rel_dist:
+ # Interpolate rel pos.
+ rel_pos_resized = F.interpolate(
+ rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
+ size=max_rel_dist,
+ mode="linear",
+ )
+ rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
+ else:
+ rel_pos_resized = rel_pos
+
+ # Scale the coords with short length if shapes for q and k are different.
+ q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
+ k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
+
+ return rel_pos_resized[relative_coords.long()]
+
+
+def add_decomposed_rel_pos(
+ attn: torch.Tensor,
+ q: torch.Tensor,
+ rel_pos_h: torch.Tensor,
+ rel_pos_w: torch.Tensor,
+ q_size: Tuple[int, int],
+ k_size: Tuple[int, int],
+) -> torch.Tensor:
+ """
+ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
+ https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
+ Args:
+ attn (Tensor): attention map.
+ q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
+ rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
+ rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
+ q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
+ k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
+
+ Returns:
+ attn (Tensor): attention map with added relative positional embeddings.
+ """
+ q_h, q_w = q_size
+ k_h, k_w = k_size
+ Rh = get_rel_pos(q_h, k_h, rel_pos_h)
+ Rw = get_rel_pos(q_w, k_w, rel_pos_w)
+
+ B, _, dim = q.shape
+ r_q = q.reshape(B, q_h, q_w, dim)
+ rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
+ rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
+
+ attn = (
+ attn.view(B, q_h, q_w, k_h, k_w)
+ + rel_h[:, :, :, :, None]
+ + rel_w[:, :, :, None, :]
+ ).view(B, q_h * q_w, k_h * k_w)
+
+ return attn
+
+
+class PatchEmbed(nn.Module):
+ """
+ Image to Patch Embedding.
+ """
+
+ def __init__(
+ self,
+ kernel_size: Tuple[int, int] = (16, 16),
+ stride: Tuple[int, int] = (16, 16),
+ padding: Tuple[int, int] = (0, 0),
+ in_chans: int = 3,
+ embed_dim: int = 768,
+ ) -> None:
+ """
+ Args:
+ kernel_size (Tuple): kernel size of the projection layer.
+ stride (Tuple): stride of the projection layer.
+ padding (Tuple): padding size of the projection layer.
+ in_chans (int): Number of input image channels.
+ embed_dim (int): Patch embedding dimension.
+ """
+ super().__init__()
+
+ self.proj = nn.Conv2d(
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
+ )
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.proj(x)
+ # B C H W -> B H W C
+ x = x.permute(0, 2, 3, 1)
+ return x
diff --git a/model/segment_anything/modeling/mask_decoder.py b/model/segment_anything/modeling/mask_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb104ea48ae3dfa94b3622bfd7d2556f8c59f43d
--- /dev/null
+++ b/model/segment_anything/modeling/mask_decoder.py
@@ -0,0 +1,191 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import List, Tuple, Type
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from .common import LayerNorm2d
+
+
+class MaskDecoder(nn.Module):
+ def __init__(
+ self,
+ *,
+ transformer_dim: int,
+ transformer: nn.Module,
+ num_multimask_outputs: int = 3,
+ activation: Type[nn.Module] = nn.GELU,
+ iou_head_depth: int = 3,
+ iou_head_hidden_dim: int = 256,
+ ) -> None:
+ """
+ Predicts masks given an image and prompt embeddings, using a
+ transformer architecture.
+
+ Arguments:
+ transformer_dim (int): the channel dimension of the transformer
+ transformer (nn.Module): the transformer used to predict masks
+ num_multimask_outputs (int): the number of masks to predict
+ when disambiguating masks
+ activation (nn.Module): the type of activation to use when
+ upscaling masks
+ iou_head_depth (int): the depth of the MLP used to predict
+ mask quality
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
+ used to predict mask quality
+ """
+ super().__init__()
+ self.transformer_dim = transformer_dim
+ self.transformer = transformer
+
+ self.num_multimask_outputs = num_multimask_outputs
+
+ self.iou_token = nn.Embedding(1, transformer_dim)
+ self.num_mask_tokens = num_multimask_outputs + 1
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
+
+ self.output_upscaling = nn.Sequential(
+ nn.ConvTranspose2d(
+ transformer_dim, transformer_dim // 4, kernel_size=2, stride=2
+ ),
+ LayerNorm2d(transformer_dim // 4),
+ activation(),
+ nn.ConvTranspose2d(
+ transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2
+ ),
+ activation(),
+ )
+ self.output_hypernetworks_mlps = nn.ModuleList(
+ [
+ MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
+ for i in range(self.num_mask_tokens)
+ ]
+ )
+
+ self.iou_prediction_head = MLP(
+ transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
+ )
+
+ def forward(
+ self,
+ image_embeddings: torch.Tensor,
+ image_pe: torch.Tensor,
+ sparse_prompt_embeddings: torch.Tensor,
+ dense_prompt_embeddings: torch.Tensor,
+ multimask_output: bool,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Predict masks given image and prompt embeddings.
+
+ Arguments:
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
+ multimask_output (bool): Whether to return multiple masks or a single
+ mask.
+
+ Returns:
+ torch.Tensor: batched predicted masks
+ torch.Tensor: batched predictions of mask quality
+ """
+ masks, iou_pred = self.predict_masks(
+ image_embeddings=image_embeddings,
+ image_pe=image_pe,
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
+ dense_prompt_embeddings=dense_prompt_embeddings,
+ )
+
+ # Select the correct mask or masks for output
+ if multimask_output:
+ mask_slice = slice(1, None)
+ else:
+ mask_slice = slice(0, 1)
+ masks = masks[:, mask_slice, :, :]
+ iou_pred = iou_pred[:, mask_slice]
+
+ # Prepare output
+ return masks, iou_pred
+
+ def predict_masks(
+ self,
+ image_embeddings: torch.Tensor,
+ image_pe: torch.Tensor,
+ sparse_prompt_embeddings: torch.Tensor,
+ dense_prompt_embeddings: torch.Tensor,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """Predicts masks. See 'forward' for more details."""
+ # Concatenate output tokens
+ output_tokens = torch.cat(
+ [self.iou_token.weight, self.mask_tokens.weight], dim=0
+ )
+ output_tokens = output_tokens.unsqueeze(0).expand(
+ sparse_prompt_embeddings.size(0), -1, -1
+ )
+
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
+
+ # image_embeddings: [1, C, H, W], tokens: [B, N, C]
+ # dense_prompt_embeddings: [B, C, H, W]
+ # Expand per-image data in batch direction to be per-mask
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
+ src = src + dense_prompt_embeddings
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
+ b, c, h, w = src.shape
+
+ # Run the transformer
+ hs, src = self.transformer(src, pos_src, tokens)
+ iou_token_out = hs[:, 0, :]
+ mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
+
+ # Upscale mask embeddings and predict masks using the mask tokens
+ src = src.transpose(1, 2).view(b, c, h, w)
+ upscaled_embedding = self.output_upscaling(src)
+ hyper_in_list: List[torch.Tensor] = []
+ for i in range(self.num_mask_tokens):
+ hyper_in_list.append(
+ self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])
+ )
+ hyper_in = torch.stack(hyper_in_list, dim=1)
+ b, c, h, w = upscaled_embedding.shape
+ masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(
+ b, self.num_mask_tokens, h, w
+ )
+
+ # Generate mask quality predictions
+ iou_pred = self.iou_prediction_head(iou_token_out)
+
+ return masks, iou_pred
+
+
+# Lightly adapted from
+# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
+class MLP(nn.Module):
+ def __init__(
+ self,
+ input_dim: int,
+ hidden_dim: int,
+ output_dim: int,
+ num_layers: int,
+ sigmoid_output: bool = False,
+ ) -> None:
+ super().__init__()
+ self.num_layers = num_layers
+ h = [hidden_dim] * (num_layers - 1)
+ self.layers = nn.ModuleList(
+ nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
+ )
+ self.sigmoid_output = sigmoid_output
+
+ def forward(self, x):
+ for i, layer in enumerate(self.layers):
+ x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
+ if self.sigmoid_output:
+ x = F.sigmoid(x)
+ return x
diff --git a/model/segment_anything/modeling/prompt_encoder.py b/model/segment_anything/modeling/prompt_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..16bc3a45e75f154453ed0724c70ce8daa0324c81
--- /dev/null
+++ b/model/segment_anything/modeling/prompt_encoder.py
@@ -0,0 +1,238 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Any, Optional, Tuple, Type
+
+import numpy as np
+import torch
+from torch import nn
+
+from .common import LayerNorm2d
+
+
+class PromptEncoder(nn.Module):
+ def __init__(
+ self,
+ embed_dim: int,
+ image_embedding_size: Tuple[int, int],
+ input_image_size: Tuple[int, int],
+ mask_in_chans: int,
+ activation: Type[nn.Module] = nn.GELU,
+ ) -> None:
+ """
+ Encodes prompts for input to SAM's mask decoder.
+
+ Arguments:
+ embed_dim (int): The prompts' embedding dimension
+ image_embedding_size (tuple(int, int)): The spatial size of the
+ image embedding, as (H, W).
+ input_image_size (int): The padded size of the image as input
+ to the image encoder, as (H, W).
+ mask_in_chans (int): The number of hidden channels used for
+ encoding input masks.
+ activation (nn.Module): The activation to use when encoding
+ input masks.
+ """
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.input_image_size = input_image_size
+ self.image_embedding_size = image_embedding_size
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
+
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
+ point_embeddings = [
+ nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)
+ ]
+ self.point_embeddings = nn.ModuleList(point_embeddings)
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
+
+ self.mask_input_size = (
+ 4 * image_embedding_size[0],
+ 4 * image_embedding_size[1],
+ )
+ self.mask_downscaling = nn.Sequential(
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
+ LayerNorm2d(mask_in_chans // 4),
+ activation(),
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
+ LayerNorm2d(mask_in_chans),
+ activation(),
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
+ )
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
+
+ def get_dense_pe(self) -> torch.Tensor:
+ """
+ Returns the positional encoding used to encode point prompts,
+ applied to a dense set of points the shape of the image encoding.
+
+ Returns:
+ torch.Tensor: Positional encoding with shape
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
+ """
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
+
+ def _embed_points(
+ self,
+ points: torch.Tensor,
+ labels: torch.Tensor,
+ pad: bool,
+ ) -> torch.Tensor:
+ """Embeds point prompts."""
+ points = points + 0.5 # Shift to center of pixel
+ if pad:
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
+ points = torch.cat([points, padding_point], dim=1)
+ labels = torch.cat([labels, padding_label], dim=1)
+ point_embedding = self.pe_layer.forward_with_coords(
+ points, self.input_image_size
+ )
+ point_embedding[labels == -1] = 0.0
+ point_embedding[labels == -1] += self.not_a_point_embed.weight
+ point_embedding[labels == 0] += self.point_embeddings[0].weight
+ point_embedding[labels == 1] += self.point_embeddings[1].weight
+ return point_embedding
+
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
+ """Embeds box prompts."""
+ boxes = boxes + 0.5 # Shift to center of pixel
+ coords = boxes.reshape(-1, 2, 2)
+ corner_embedding = self.pe_layer.forward_with_coords(
+ coords, self.input_image_size
+ )
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
+ return corner_embedding
+
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
+ """Embeds mask inputs."""
+ mask_embedding = self.mask_downscaling(masks)
+ return mask_embedding
+
+ def _get_batch_size(
+ self,
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
+ boxes: Optional[torch.Tensor],
+ masks: Optional[torch.Tensor],
+ text_embeds: Optional[torch.Tensor],
+ ) -> int:
+ """
+ Gets the batch size of the output given the batch size of the input prompts.
+ """
+ if points is not None:
+ return points[0].shape[0]
+ elif boxes is not None:
+ return boxes.shape[0]
+ elif masks is not None:
+ return masks.shape[0]
+ elif text_embeds is not None:
+ return text_embeds.shape[0]
+ else:
+ return 1
+
+ def _get_device(self) -> torch.device:
+ return self.point_embeddings[0].weight.device
+
+ def forward(
+ self,
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
+ boxes: Optional[torch.Tensor],
+ masks: Optional[torch.Tensor],
+ text_embeds: Optional[torch.Tensor],
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Embeds different types of prompts, returning both sparse and dense
+ embeddings.
+
+ Arguments:
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
+ and labels to embed.
+ boxes (torch.Tensor or none): boxes to embed
+ masks (torch.Tensor or none): masks to embed
+
+ Returns:
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
+ BxNx(embed_dim), where N is determined by the number of input points
+ and boxes.
+ torch.Tensor: dense embeddings for the masks, in the shape
+ Bx(embed_dim)x(embed_H)x(embed_W)
+ """
+ bs = self._get_batch_size(points, boxes, masks, text_embeds)
+ sparse_embeddings = torch.empty(
+ (bs, 0, self.embed_dim), device=self._get_device()
+ )
+ if points is not None:
+ coords, labels = points
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
+ if boxes is not None:
+ box_embeddings = self._embed_boxes(boxes)
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
+
+ if text_embeds is not None:
+ sparse_embeddings = torch.cat([sparse_embeddings, text_embeds], dim=1)
+
+ if masks is not None:
+ dense_embeddings = self._embed_masks(masks)
+ else:
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
+ bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
+ )
+
+ return sparse_embeddings, dense_embeddings
+
+
+class PositionEmbeddingRandom(nn.Module):
+ """
+ Positional encoding using random spatial frequencies.
+ """
+
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
+ super().__init__()
+ if scale is None or scale <= 0.0:
+ scale = 1.0
+ self.register_buffer(
+ "positional_encoding_gaussian_matrix",
+ scale * torch.randn((2, num_pos_feats)),
+ )
+
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
+ """Positionally encode points that are normalized to [0,1]."""
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
+ coords = 2 * coords - 1
+
+ if coords.dtype != self.positional_encoding_gaussian_matrix.dtype:
+ coords = coords.to(self.positional_encoding_gaussian_matrix.dtype)
+
+ coords = coords @ self.positional_encoding_gaussian_matrix
+ coords = 2 * np.pi * coords
+ # outputs d_1 x ... x d_n x C shape
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
+
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
+ """Generate positional encoding for a grid of the specified size."""
+ h, w = size
+ device: Any = self.positional_encoding_gaussian_matrix.device
+ grid = torch.ones(
+ (h, w), device=device, dtype=self.positional_encoding_gaussian_matrix.dtype
+ )
+ y_embed = grid.cumsum(dim=0) - 0.5
+ x_embed = grid.cumsum(dim=1) - 0.5
+ y_embed = y_embed / h
+ x_embed = x_embed / w
+
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
+ return pe.permute(2, 0, 1) # C x H x W
+
+ def forward_with_coords(
+ self, coords_input: torch.Tensor, image_size: Tuple[int, int]
+ ) -> torch.Tensor:
+ """Positionally encode points that are not normalized to [0,1]."""
+ coords = coords_input.clone()
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
+ return self._pe_encoding(coords.to(torch.float)) # B x N x C
diff --git a/model/segment_anything/modeling/sam.py b/model/segment_anything/modeling/sam.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1d82cac3cc1deea45171fd9360dfd7fa25e457a
--- /dev/null
+++ b/model/segment_anything/modeling/sam.py
@@ -0,0 +1,184 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Any, Dict, List, Tuple
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from .image_encoder import ImageEncoderViT
+from .mask_decoder import MaskDecoder
+from .prompt_encoder import PromptEncoder
+
+
+class Sam(nn.Module):
+ mask_threshold: float = 0.0
+ image_format: str = "RGB"
+
+ def __init__(
+ self,
+ image_encoder: ImageEncoderViT,
+ prompt_encoder: PromptEncoder,
+ mask_decoder: MaskDecoder,
+ pixel_mean: List[float] = [123.675, 116.28, 103.53],
+ pixel_std: List[float] = [58.395, 57.12, 57.375],
+ ) -> None:
+ """
+ SAM predicts object masks from an image and input prompts.
+
+ Arguments:
+ image_encoder (ImageEncoderViT): The backbone used to encode the
+ image into image embeddings that allow for efficient mask prediction.
+ prompt_encoder (PromptEncoder): Encodes various types of input prompts.
+ mask_decoder (MaskDecoder): Predicts masks from the image embeddings
+ and encoded prompts.
+ pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
+ pixel_std (list(float)): Std values for normalizing pixels in the input image.
+ """
+ super().__init__()
+ self.image_encoder = image_encoder
+ self.prompt_encoder = prompt_encoder
+ self.mask_decoder = mask_decoder
+ self.register_buffer(
+ "pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False
+ )
+ self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
+
+ @property
+ def device(self) -> Any:
+ return self.pixel_mean.device
+
+ @torch.no_grad()
+ def forward(
+ self,
+ batched_input: List[Dict[str, Any]],
+ multimask_output: bool,
+ ) -> List[Dict[str, torch.Tensor]]:
+ """
+ Predicts masks end-to-end from provided images and prompts.
+ If prompts are not known in advance, using SamPredictor is
+ recommended over calling the model directly.
+
+ Arguments:
+ batched_input (list(dict)): A list over input images, each a
+ dictionary with the following keys. A prompt key can be
+ excluded if it is not present.
+ 'image': The image as a torch tensor in 3xHxW format,
+ already transformed for input to the model.
+ 'original_size': (tuple(int, int)) The original size of
+ the image before transformation, as (H, W).
+ 'point_coords': (torch.Tensor) Batched point prompts for
+ this image, with shape BxNx2. Already transformed to the
+ input frame of the model.
+ 'point_labels': (torch.Tensor) Batched labels for point prompts,
+ with shape BxN.
+ 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
+ Already transformed to the input frame of the model.
+ 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
+ in the form Bx1xHxW.
+ multimask_output (bool): Whether the model should predict multiple
+ disambiguating masks, or return a single mask.
+
+ Returns:
+ (list(dict)): A list over input images, where each element is
+ as dictionary with the following keys.
+ 'masks': (torch.Tensor) Batched binary mask predictions,
+ with shape BxCxHxW, where B is the number of input prompts,
+ C is determined by multimask_output, and (H, W) is the
+ original size of the image.
+ 'iou_predictions': (torch.Tensor) The model's predictions
+ of mask quality, in shape BxC.
+ 'low_res_logits': (torch.Tensor) Low resolution logits with
+ shape BxCxHxW, where H=W=256. Can be passed as mask input
+ to subsequent iterations of prediction.
+ """
+ input_images = torch.stack(
+ [self.preprocess(x["image"]) for x in batched_input], dim=0
+ )
+ image_embeddings = self.image_encoder(input_images)
+
+ outputs = []
+ for image_record, curr_embedding in zip(batched_input, image_embeddings):
+ if "point_coords" in image_record:
+ points = (image_record["point_coords"], image_record["point_labels"])
+ else:
+ points = None
+ sparse_embeddings, dense_embeddings = self.prompt_encoder(
+ points=points,
+ boxes=image_record.get("boxes", None),
+ masks=image_record.get("mask_inputs", None),
+ )
+ low_res_masks, iou_predictions = self.mask_decoder(
+ image_embeddings=curr_embedding.unsqueeze(0),
+ image_pe=self.prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ )
+ masks = self.postprocess_masks(
+ low_res_masks,
+ input_size=image_record["image"].shape[-2:],
+ original_size=image_record["original_size"],
+ )
+ masks = masks > self.mask_threshold
+ outputs.append(
+ {
+ "masks": masks,
+ "iou_predictions": iou_predictions,
+ "low_res_logits": low_res_masks,
+ }
+ )
+ return outputs
+
+ def postprocess_masks(
+ self,
+ masks: torch.Tensor,
+ input_size: Tuple[int, ...],
+ original_size: Tuple[int, ...],
+ ) -> torch.Tensor:
+ """
+ Remove padding and upscale masks to the original image size.
+
+ Arguments:
+ masks (torch.Tensor): Batched masks from the mask_decoder,
+ in BxCxHxW format.
+ input_size (tuple(int, int)): The size of the image input to the
+ model, in (H, W) format. Used to remove padding.
+ original_size (tuple(int, int)): The original size of the image
+ before resizing for input to the model, in (H, W) format.
+
+ Returns:
+ (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
+ is given by original_size.
+ """
+
+ dtype = masks.dtype
+
+ masks = F.interpolate(
+ masks.float(),
+ (self.image_encoder.img_size, self.image_encoder.img_size),
+ mode="bilinear",
+ align_corners=False,
+ )
+ # masks = masks.to(dtype)
+ masks = masks[..., : input_size[0], : input_size[1]]
+ masks = F.interpolate(
+ masks, original_size, mode="bilinear", align_corners=False
+ )
+ return masks
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.image_encoder.img_size - h
+ padw = self.image_encoder.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
diff --git a/model/segment_anything/modeling/transformer.py b/model/segment_anything/modeling/transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c511e4ff35cc91132b09edd788c96f9a5768161
--- /dev/null
+++ b/model/segment_anything/modeling/transformer.py
@@ -0,0 +1,242 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+from typing import Tuple, Type
+
+import torch
+from torch import Tensor, nn
+
+from .common import MLPBlock
+
+
+class TwoWayTransformer(nn.Module):
+ def __init__(
+ self,
+ depth: int,
+ embedding_dim: int,
+ num_heads: int,
+ mlp_dim: int,
+ activation: Type[nn.Module] = nn.ReLU,
+ attention_downsample_rate: int = 2,
+ ) -> None:
+ """
+ A transformer decoder that attends to an input image using
+ queries whose positional embedding is supplied.
+
+ Args:
+ depth (int): number of layers in the transformer
+ embedding_dim (int): the channel dimension for the input embeddings
+ num_heads (int): the number of heads for multihead attention. Must
+ divide embedding_dim
+ mlp_dim (int): the channel dimension internal to the MLP block
+ activation (nn.Module): the activation to use in the MLP block
+ """
+ super().__init__()
+ self.depth = depth
+ self.embedding_dim = embedding_dim
+ self.num_heads = num_heads
+ self.mlp_dim = mlp_dim
+ self.layers = nn.ModuleList()
+
+ for i in range(depth):
+ self.layers.append(
+ TwoWayAttentionBlock(
+ embedding_dim=embedding_dim,
+ num_heads=num_heads,
+ mlp_dim=mlp_dim,
+ activation=activation,
+ attention_downsample_rate=attention_downsample_rate,
+ skip_first_layer_pe=(i == 0),
+ )
+ )
+
+ self.final_attn_token_to_image = Attention(
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
+ )
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
+
+ def forward(
+ self,
+ image_embedding: Tensor,
+ image_pe: Tensor,
+ point_embedding: Tensor,
+ ) -> Tuple[Tensor, Tensor]:
+ """
+ Args:
+ image_embedding (torch.Tensor): image to attend to. Should be shape
+ B x embedding_dim x h x w for any h and w.
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
+ have the same shape as image_embedding.
+ point_embedding (torch.Tensor): the embedding to add to the query points.
+ Must have shape B x N_points x embedding_dim for any N_points.
+
+ Returns:
+ torch.Tensor: the processed point_embedding
+ torch.Tensor: the processed image_embedding
+ """
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
+ bs, c, h, w = image_embedding.shape
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
+
+ # Prepare queries
+ queries = point_embedding
+ keys = image_embedding
+
+ # Apply transformer blocks and final layernorm
+ for layer in self.layers:
+ queries, keys = layer(
+ queries=queries,
+ keys=keys,
+ query_pe=point_embedding,
+ key_pe=image_pe,
+ )
+
+ # Apply the final attention layer from the points to the image
+ q = queries + point_embedding
+ k = keys + image_pe
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
+ queries = queries + attn_out
+ queries = self.norm_final_attn(queries)
+
+ return queries, keys
+
+
+class TwoWayAttentionBlock(nn.Module):
+ def __init__(
+ self,
+ embedding_dim: int,
+ num_heads: int,
+ mlp_dim: int = 2048,
+ activation: Type[nn.Module] = nn.ReLU,
+ attention_downsample_rate: int = 2,
+ skip_first_layer_pe: bool = False,
+ ) -> None:
+ """
+ A transformer block with four layers: (1) self-attention of sparse
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
+ inputs.
+
+ Arguments:
+ embedding_dim (int): the channel dimension of the embeddings
+ num_heads (int): the number of heads in the attention layers
+ mlp_dim (int): the hidden dimension of the mlp block
+ activation (nn.Module): the activation of the mlp block
+ skip_first_layer_pe (bool): skip the PE on the first layer
+ """
+ super().__init__()
+ self.self_attn = Attention(embedding_dim, num_heads)
+ self.norm1 = nn.LayerNorm(embedding_dim)
+
+ self.cross_attn_token_to_image = Attention(
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
+ )
+ self.norm2 = nn.LayerNorm(embedding_dim)
+
+ self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
+ self.norm3 = nn.LayerNorm(embedding_dim)
+
+ self.norm4 = nn.LayerNorm(embedding_dim)
+ self.cross_attn_image_to_token = Attention(
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
+ )
+
+ self.skip_first_layer_pe = skip_first_layer_pe
+
+ def forward(
+ self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
+ ) -> Tuple[Tensor, Tensor]:
+ # Self attention block
+ if self.skip_first_layer_pe:
+ queries = self.self_attn(q=queries, k=queries, v=queries)
+ else:
+ q = queries + query_pe
+ attn_out = self.self_attn(q=q, k=q, v=queries)
+ queries = queries + attn_out
+ queries = self.norm1(queries)
+
+ # Cross attention block, tokens attending to image embedding
+ q = queries + query_pe
+ k = keys + key_pe
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
+ queries = queries + attn_out
+ queries = self.norm2(queries)
+
+ # MLP block
+ mlp_out = self.mlp(queries)
+ queries = queries + mlp_out
+ queries = self.norm3(queries)
+
+ # Cross attention block, image embedding attending to tokens
+ q = queries + query_pe
+ k = keys + key_pe
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
+ keys = keys + attn_out
+ keys = self.norm4(keys)
+
+ return queries, keys
+
+
+class Attention(nn.Module):
+ """
+ An attention layer that allows for downscaling the size of the embedding
+ after projection to queries, keys, and values.
+ """
+
+ def __init__(
+ self,
+ embedding_dim: int,
+ num_heads: int,
+ downsample_rate: int = 1,
+ ) -> None:
+ super().__init__()
+ self.embedding_dim = embedding_dim
+ self.internal_dim = embedding_dim // downsample_rate
+ self.num_heads = num_heads
+ assert (
+ self.internal_dim % num_heads == 0
+ ), "num_heads must divide embedding_dim."
+
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
+ self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
+ self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
+
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
+ b, n, c = x.shape
+ x = x.reshape(b, n, num_heads, c // num_heads)
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
+
+ def _recombine_heads(self, x: Tensor) -> Tensor:
+ b, n_heads, n_tokens, c_per_head = x.shape
+ x = x.transpose(1, 2)
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
+
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
+ # Input projections
+ q = self.q_proj(q)
+ k = self.k_proj(k)
+ v = self.v_proj(v)
+
+ # Separate into heads
+ q = self._separate_heads(q, self.num_heads)
+ k = self._separate_heads(k, self.num_heads)
+ v = self._separate_heads(v, self.num_heads)
+
+ # Attention
+ _, _, _, c_per_head = q.shape
+ attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
+ attn = attn / math.sqrt(c_per_head)
+ attn = torch.softmax(attn, dim=-1)
+
+ # Get output
+ out = attn @ v
+ out = self._recombine_heads(out)
+ out = self.out_proj(out)
+
+ return out
diff --git a/model/segment_anything/predictor.py b/model/segment_anything/predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf52d81c2ef2e81b87e574fc935e88749ae3ebf6
--- /dev/null
+++ b/model/segment_anything/predictor.py
@@ -0,0 +1,284 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Optional, Tuple
+
+import numpy as np
+import torch
+
+from .modeling import Sam
+from .utils.transforms import ResizeLongestSide
+
+
+class SamPredictor:
+ def __init__(
+ self,
+ sam_model: Sam,
+ ) -> None:
+ """
+ Uses SAM to calculate the image embedding for an image, and then
+ allow repeated, efficient mask prediction given prompts.
+
+ Arguments:
+ sam_model (Sam): The model to use for mask prediction.
+ """
+ super().__init__()
+ self.model = sam_model
+ self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
+ self.reset_image()
+
+ def set_image(
+ self,
+ image: np.ndarray,
+ image_format: str = "RGB",
+ ) -> None:
+ """
+ Calculates the image embeddings for the provided image, allowing
+ masks to be predicted with the 'predict' method.
+
+ Arguments:
+ image (np.ndarray): The image for calculating masks. Expects an
+ image in HWC uint8 format, with pixel values in [0, 255].
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
+ """
+ assert image_format in [
+ "RGB",
+ "BGR",
+ ], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
+ if image_format != self.model.image_format:
+ image = image[..., ::-1]
+
+ # Transform the image to the form expected by the model
+ input_image = self.transform.apply_image(image)
+ input_image_torch = torch.as_tensor(input_image, device=self.device)
+ input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[
+ None, :, :, :
+ ]
+
+ self.set_torch_image(input_image_torch, image.shape[:2])
+
+ @torch.no_grad()
+ def set_torch_image(
+ self,
+ transformed_image: torch.Tensor,
+ original_image_size: Tuple[int, ...],
+ ) -> None:
+ """
+ Calculates the image embeddings for the provided image, allowing
+ masks to be predicted with the 'predict' method. Expects the input
+ image to be already transformed to the format expected by the model.
+
+ Arguments:
+ transformed_image (torch.Tensor): The input image, with shape
+ 1x3xHxW, which has been transformed with ResizeLongestSide.
+ original_image_size (tuple(int, int)): The size of the image
+ before transformation, in (H, W) format.
+ """
+ assert (
+ len(transformed_image.shape) == 4
+ and transformed_image.shape[1] == 3
+ and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
+ ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
+ self.reset_image()
+
+ self.original_size = original_image_size
+ self.input_size = tuple(transformed_image.shape[-2:])
+ input_image = self.model.preprocess(transformed_image)
+ self.features = self.model.image_encoder(input_image)
+ self.is_image_set = True
+
+ def predict(
+ self,
+ point_coords: Optional[np.ndarray] = None,
+ point_labels: Optional[np.ndarray] = None,
+ box: Optional[np.ndarray] = None,
+ mask_input: Optional[np.ndarray] = None,
+ multimask_output: bool = True,
+ return_logits: bool = False,
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+ """
+ Predict masks for the given input prompts, using the currently set image.
+
+ Arguments:
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
+ model. Each point is in (X,Y) in pixels.
+ point_labels (np.ndarray or None): A length N array of labels for the
+ point prompts. 1 indicates a foreground point and 0 indicates a
+ background point.
+ box (np.ndarray or None): A length 4 array given a box prompt to the
+ model, in XYXY format.
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
+ coming from a previous prediction iteration. Has form 1xHxW, where
+ for SAM, H=W=256.
+ multimask_output (bool): If true, the model will return three masks.
+ For ambiguous input prompts (such as a single click), this will often
+ produce better masks than a single prediction. If only a single
+ mask is needed, the model's predicted quality score can be used
+ to select the best mask. For non-ambiguous prompts, such as multiple
+ input prompts, multimask_output=False can give better results.
+ return_logits (bool): If true, returns un-thresholded masks logits
+ instead of a binary mask.
+
+ Returns:
+ (np.ndarray): The output masks in CxHxW format, where C is the
+ number of masks, and (H, W) is the original image size.
+ (np.ndarray): An array of length C containing the model's
+ predictions for the quality of each mask.
+ (np.ndarray): An array of shape CxHxW, where C is the number
+ of masks and H=W=256. These low resolution logits can be passed to
+ a subsequent iteration as mask input.
+ """
+ if not self.is_image_set:
+ raise RuntimeError(
+ "An image must be set with .set_image(...) before mask prediction."
+ )
+
+ # Transform input prompts
+ coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
+ if point_coords is not None:
+ assert (
+ point_labels is not None
+ ), "point_labels must be supplied if point_coords is supplied."
+ point_coords = self.transform.apply_coords(point_coords, self.original_size)
+ coords_torch = torch.as_tensor(
+ point_coords, dtype=torch.float, device=self.device
+ )
+ labels_torch = torch.as_tensor(
+ point_labels, dtype=torch.int, device=self.device
+ )
+ coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
+ if box is not None:
+ box = self.transform.apply_boxes(box, self.original_size)
+ box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
+ box_torch = box_torch[None, :]
+ if mask_input is not None:
+ mask_input_torch = torch.as_tensor(
+ mask_input, dtype=torch.float, device=self.device
+ )
+ mask_input_torch = mask_input_torch[None, :, :, :]
+
+ masks, iou_predictions, low_res_masks = self.predict_torch(
+ coords_torch,
+ labels_torch,
+ box_torch,
+ mask_input_torch,
+ multimask_output,
+ return_logits=return_logits,
+ )
+
+ masks_np = masks[0].detach().cpu().numpy()
+ iou_predictions_np = iou_predictions[0].detach().cpu().numpy()
+ low_res_masks_np = low_res_masks[0].detach().cpu().numpy()
+ return masks_np, iou_predictions_np, low_res_masks_np
+
+ @torch.no_grad()
+ def predict_torch(
+ self,
+ point_coords: Optional[torch.Tensor],
+ point_labels: Optional[torch.Tensor],
+ boxes: Optional[torch.Tensor] = None,
+ mask_input: Optional[torch.Tensor] = None,
+ multimask_output: bool = True,
+ return_logits: bool = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Predict masks for the given input prompts, using the currently set image.
+ Input prompts are batched torch tensors and are expected to already be
+ transformed to the input frame using ResizeLongestSide.
+
+ Arguments:
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
+ model. Each point is in (X,Y) in pixels.
+ point_labels (torch.Tensor or None): A BxN array of labels for the
+ point prompts. 1 indicates a foreground point and 0 indicates a
+ background point.
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
+ model, in XYXY format.
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
+ for SAM, H=W=256. Masks returned by a previous iteration of the
+ predict method do not need further transformation.
+ multimask_output (bool): If true, the model will return three masks.
+ For ambiguous input prompts (such as a single click), this will often
+ produce better masks than a single prediction. If only a single
+ mask is needed, the model's predicted quality score can be used
+ to select the best mask. For non-ambiguous prompts, such as multiple
+ input prompts, multimask_output=False can give better results.
+ return_logits (bool): If true, returns un-thresholded masks logits
+ instead of a binary mask.
+
+ Returns:
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
+ number of masks, and (H, W) is the original image size.
+ (torch.Tensor): An array of shape BxC containing the model's
+ predictions for the quality of each mask.
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
+ of masks and H=W=256. These low res logits can be passed to
+ a subsequent iteration as mask input.
+ """
+ if not self.is_image_set:
+ raise RuntimeError(
+ "An image must be set with .set_image(...) before mask prediction."
+ )
+
+ if point_coords is not None:
+ points = (point_coords, point_labels)
+ else:
+ points = None
+
+ # Embed prompts
+ sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
+ points=points,
+ boxes=boxes,
+ masks=mask_input,
+ )
+
+ # Predict masks
+ low_res_masks, iou_predictions = self.model.mask_decoder(
+ image_embeddings=self.features,
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embeddings,
+ dense_prompt_embeddings=dense_embeddings,
+ multimask_output=multimask_output,
+ )
+
+ # Upscale the masks to the original image resolution
+ masks = self.model.postprocess_masks(
+ low_res_masks, self.input_size, self.original_size
+ )
+
+ if not return_logits:
+ masks = masks > self.model.mask_threshold
+
+ return masks, iou_predictions, low_res_masks
+
+ def get_image_embedding(self) -> torch.Tensor:
+ """
+ Returns the image embeddings for the currently set image, with
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
+ """
+ if not self.is_image_set:
+ raise RuntimeError(
+ "An image must be set with .set_image(...) to generate an embedding."
+ )
+ assert (
+ self.features is not None
+ ), "Features must exist if an image has been set."
+ return self.features
+
+ @property
+ def device(self) -> torch.device:
+ return self.model.device
+
+ def reset_image(self) -> None:
+ """Resets the currently set image."""
+ self.is_image_set = False
+ self.features = None
+ self.orig_h = None
+ self.orig_w = None
+ self.input_h = None
+ self.input_w = None
diff --git a/model/segment_anything/utils/__init__.py b/model/segment_anything/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5277f46157403e47fd830fc519144b97ef69d4ae
--- /dev/null
+++ b/model/segment_anything/utils/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/model/segment_anything/utils/__pycache__/__init__.cpython-39.pyc b/model/segment_anything/utils/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..db944fd33a556071e8fe35e296df32d5a616c686
Binary files /dev/null and b/model/segment_anything/utils/__pycache__/__init__.cpython-39.pyc differ
diff --git a/model/segment_anything/utils/__pycache__/amg.cpython-39.pyc b/model/segment_anything/utils/__pycache__/amg.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b473817b12efe1913046862076d9bf156dcd5c7
Binary files /dev/null and b/model/segment_anything/utils/__pycache__/amg.cpython-39.pyc differ
diff --git a/model/segment_anything/utils/__pycache__/transforms.cpython-39.pyc b/model/segment_anything/utils/__pycache__/transforms.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74767101e505114ecc914c1b042e8600c04e9f6e
Binary files /dev/null and b/model/segment_anything/utils/__pycache__/transforms.cpython-39.pyc differ
diff --git a/model/segment_anything/utils/amg.py b/model/segment_anything/utils/amg.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c3bc5d789049076a2404b1b2477110cebc32fb2
--- /dev/null
+++ b/model/segment_anything/utils/amg.py
@@ -0,0 +1,346 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+import math
+from copy import deepcopy
+from itertools import product
+from typing import Any, Dict, Generator, ItemsView, List, Tuple
+
+import numpy as np
+import torch
+
+
+class MaskData:
+ """
+ A structure for storing masks and their related data in batched format.
+ Implements basic filtering and concatenation.
+ """
+
+ def __init__(self, **kwargs) -> None:
+ for v in kwargs.values():
+ assert isinstance(
+ v, (list, np.ndarray, torch.Tensor)
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
+ self._stats = dict(**kwargs)
+
+ def __setitem__(self, key: str, item: Any) -> None:
+ assert isinstance(
+ item, (list, np.ndarray, torch.Tensor)
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
+ self._stats[key] = item
+
+ def __delitem__(self, key: str) -> None:
+ del self._stats[key]
+
+ def __getitem__(self, key: str) -> Any:
+ return self._stats[key]
+
+ def items(self) -> ItemsView[str, Any]:
+ return self._stats.items()
+
+ def filter(self, keep: torch.Tensor) -> None:
+ for k, v in self._stats.items():
+ if v is None:
+ self._stats[k] = None
+ elif isinstance(v, torch.Tensor):
+ self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
+ elif isinstance(v, np.ndarray):
+ self._stats[k] = v[keep.detach().cpu().numpy()]
+ elif isinstance(v, list) and keep.dtype == torch.bool:
+ self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
+ elif isinstance(v, list):
+ self._stats[k] = [v[i] for i in keep]
+ else:
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
+
+ def cat(self, new_stats: "MaskData") -> None:
+ for k, v in new_stats.items():
+ if k not in self._stats or self._stats[k] is None:
+ self._stats[k] = deepcopy(v)
+ elif isinstance(v, torch.Tensor):
+ self._stats[k] = torch.cat([self._stats[k], v], dim=0)
+ elif isinstance(v, np.ndarray):
+ self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
+ elif isinstance(v, list):
+ self._stats[k] = self._stats[k] + deepcopy(v)
+ else:
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
+
+ def to_numpy(self) -> None:
+ for k, v in self._stats.items():
+ if isinstance(v, torch.Tensor):
+ self._stats[k] = v.detach().cpu().numpy()
+
+
+def is_box_near_crop_edge(
+ boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
+) -> torch.Tensor:
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
+ boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
+ return torch.any(near_crop_edge, dim=1)
+
+
+def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
+ box_xywh = deepcopy(box_xyxy)
+ box_xywh[2] = box_xywh[2] - box_xywh[0]
+ box_xywh[3] = box_xywh[3] - box_xywh[1]
+ return box_xywh
+
+
+def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
+ assert len(args) > 0 and all(
+ len(a) == len(args[0]) for a in args
+ ), "Batched iteration must have inputs of all the same size."
+ n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
+ for b in range(n_batches):
+ yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
+
+
+def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
+ """
+ Encodes masks to an uncompressed RLE, in the format expected by
+ pycoco tools.
+ """
+ # Put in fortran order and flatten h,w
+ b, h, w = tensor.shape
+ tensor = tensor.permute(0, 2, 1).flatten(1)
+
+ # Compute change indices
+ diff = tensor[:, 1:] ^ tensor[:, :-1]
+ change_indices = diff.nonzero()
+
+ # Encode run length
+ out = []
+ for i in range(b):
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1]
+ cur_idxs = torch.cat(
+ [
+ torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
+ cur_idxs + 1,
+ torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
+ ]
+ )
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
+ counts = [] if tensor[i, 0] == 0 else [0]
+ counts.extend(btw_idxs.detach().cpu().tolist())
+ out.append({"size": [h, w], "counts": counts})
+ return out
+
+
+def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
+ """Compute a binary mask from an uncompressed RLE."""
+ h, w = rle["size"]
+ mask = np.empty(h * w, dtype=bool)
+ idx = 0
+ parity = False
+ for count in rle["counts"]:
+ mask[idx : idx + count] = parity
+ idx += count
+ parity ^= True
+ mask = mask.reshape(w, h)
+ return mask.transpose() # Put in C order
+
+
+def area_from_rle(rle: Dict[str, Any]) -> int:
+ return sum(rle["counts"][1::2])
+
+
+def calculate_stability_score(
+ masks: torch.Tensor, mask_threshold: float, threshold_offset: float
+) -> torch.Tensor:
+ """
+ Computes the stability score for a batch of masks. The stability
+ score is the IoU between the binary masks obtained by thresholding
+ the predicted mask logits at high and low values.
+ """
+ # One mask is always contained inside the other.
+ # Save memory by preventing unnecessary cast to torch.int64
+ intersections = (
+ (masks > (mask_threshold + threshold_offset))
+ .sum(-1, dtype=torch.int16)
+ .sum(-1, dtype=torch.int32)
+ )
+ unions = (
+ (masks > (mask_threshold - threshold_offset))
+ .sum(-1, dtype=torch.int16)
+ .sum(-1, dtype=torch.int32)
+ )
+ return intersections / unions
+
+
+def build_point_grid(n_per_side: int) -> np.ndarray:
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
+ offset = 1 / (2 * n_per_side)
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
+ return points
+
+
+def build_all_layer_point_grids(
+ n_per_side: int, n_layers: int, scale_per_layer: int
+) -> List[np.ndarray]:
+ """Generates point grids for all crop layers."""
+ points_by_layer = []
+ for i in range(n_layers + 1):
+ n_points = int(n_per_side / (scale_per_layer**i))
+ points_by_layer.append(build_point_grid(n_points))
+ return points_by_layer
+
+
+def generate_crop_boxes(
+ im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
+) -> Tuple[List[List[int]], List[int]]:
+ """
+ Generates a list of crop boxes of different sizes. Each layer
+ has (2**i)**2 boxes for the ith layer.
+ """
+ crop_boxes, layer_idxs = [], []
+ im_h, im_w = im_size
+ short_side = min(im_h, im_w)
+
+ # Original image
+ crop_boxes.append([0, 0, im_w, im_h])
+ layer_idxs.append(0)
+
+ def crop_len(orig_len, n_crops, overlap):
+ return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
+
+ for i_layer in range(n_layers):
+ n_crops_per_side = 2 ** (i_layer + 1)
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
+
+ crop_w = crop_len(im_w, n_crops_per_side, overlap)
+ crop_h = crop_len(im_h, n_crops_per_side, overlap)
+
+ crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
+ crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
+
+ # Crops in XYWH format
+ for x0, y0 in product(crop_box_x0, crop_box_y0):
+ box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
+ crop_boxes.append(box)
+ layer_idxs.append(i_layer + 1)
+
+ return crop_boxes, layer_idxs
+
+
+def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
+ x0, y0, _, _ = crop_box
+ offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
+ # Check if boxes has a channel dimension
+ if len(boxes.shape) == 3:
+ offset = offset.unsqueeze(1)
+ return boxes + offset
+
+
+def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
+ x0, y0, _, _ = crop_box
+ offset = torch.tensor([[x0, y0]], device=points.device)
+ # Check if points has a channel dimension
+ if len(points.shape) == 3:
+ offset = offset.unsqueeze(1)
+ return points + offset
+
+
+def uncrop_masks(
+ masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
+) -> torch.Tensor:
+ x0, y0, x1, y1 = crop_box
+ if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
+ return masks
+ # Coordinate transform masks
+ pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
+ pad = (x0, pad_x - x0, y0, pad_y - y0)
+ return torch.nn.functional.pad(masks, pad, value=0)
+
+
+def remove_small_regions(
+ mask: np.ndarray, area_thresh: float, mode: str
+) -> Tuple[np.ndarray, bool]:
+ """
+ Removes small disconnected regions and holes in a mask. Returns the
+ mask and an indicator of if the mask has been modified.
+ """
+ import cv2 # type: ignore
+
+ assert mode in ["holes", "islands"]
+ correct_holes = mode == "holes"
+ working_mask = (correct_holes ^ mask).astype(np.uint8)
+ n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
+ sizes = stats[:, -1][1:] # Row 0 is background label
+ small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
+ if len(small_regions) == 0:
+ return mask, False
+ fill_labels = [0] + small_regions
+ if not correct_holes:
+ fill_labels = [i for i in range(n_labels) if i not in fill_labels]
+ # If every region is below threshold, keep largest
+ if len(fill_labels) == 0:
+ fill_labels = [int(np.argmax(sizes)) + 1]
+ mask = np.isin(regions, fill_labels)
+ return mask, True
+
+
+def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
+ from pycocotools import mask as mask_utils # type: ignore
+
+ h, w = uncompressed_rle["size"]
+ rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
+ rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
+ return rle
+
+
+def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
+ """
+ Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
+ an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
+ """
+ # torch.max below raises an error on empty inputs, just skip in this case
+ if torch.numel(masks) == 0:
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
+
+ # Normalize shape to CxHxW
+ shape = masks.shape
+ h, w = shape[-2:]
+ if len(shape) > 2:
+ masks = masks.flatten(0, -3)
+ else:
+ masks = masks.unsqueeze(0)
+
+ # Get top and bottom edges
+ in_height, _ = torch.max(masks, dim=-1)
+ in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
+ in_height_coords = in_height_coords + h * (~in_height)
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
+
+ # Get left and right edges
+ in_width, _ = torch.max(masks, dim=-2)
+ in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
+ in_width_coords = in_width_coords + w * (~in_width)
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
+
+ # If the mask is empty the right edge will be to the left of the left edge.
+ # Replace these boxes with [0, 0, 0, 0]
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
+ out = out * (~empty_filter).unsqueeze(-1)
+
+ # Return to original shape
+ if len(shape) > 2:
+ out = out.reshape(*shape[:-2], 4)
+ else:
+ out = out[0]
+
+ return out
diff --git a/model/segment_anything/utils/onnx.py b/model/segment_anything/utils/onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..3521208f620aeef707707037d027c0156d940cdf
--- /dev/null
+++ b/model/segment_anything/utils/onnx.py
@@ -0,0 +1,157 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Tuple
+
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+
+from ..modeling import Sam
+from .amg import calculate_stability_score
+
+
+class SamOnnxModel(nn.Module):
+ """
+ This model should not be called directly, but is used in ONNX export.
+ It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
+ with some functions modified to enable model tracing. Also supports extra
+ options controlling what information. See the ONNX export script for details.
+ """
+
+ def __init__(
+ self,
+ model: Sam,
+ return_single_mask: bool,
+ use_stability_score: bool = False,
+ return_extra_metrics: bool = False,
+ ) -> None:
+ super().__init__()
+ self.mask_decoder = model.mask_decoder
+ self.model = model
+ self.img_size = model.image_encoder.img_size
+ self.return_single_mask = return_single_mask
+ self.use_stability_score = use_stability_score
+ self.stability_score_offset = 1.0
+ self.return_extra_metrics = return_extra_metrics
+
+ @staticmethod
+ def resize_longest_image_size(
+ input_image_size: torch.Tensor, longest_side: int
+ ) -> torch.Tensor:
+ input_image_size = input_image_size.to(torch.float32)
+ scale = longest_side / torch.max(input_image_size)
+ transformed_size = scale * input_image_size
+ transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
+ return transformed_size
+
+ def _embed_points(
+ self, point_coords: torch.Tensor, point_labels: torch.Tensor
+ ) -> torch.Tensor:
+ point_coords = point_coords + 0.5
+ point_coords = point_coords / self.img_size
+ point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
+ point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
+
+ point_embedding = point_embedding * (point_labels != -1)
+ point_embedding = (
+ point_embedding
+ + self.model.prompt_encoder.not_a_point_embed.weight * (point_labels == -1)
+ )
+
+ for i in range(self.model.prompt_encoder.num_point_embeddings):
+ point_embedding = (
+ point_embedding
+ + self.model.prompt_encoder.point_embeddings[i].weight
+ * (point_labels == i)
+ )
+
+ return point_embedding
+
+ def _embed_masks(
+ self, input_mask: torch.Tensor, has_mask_input: torch.Tensor
+ ) -> torch.Tensor:
+ mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(
+ input_mask
+ )
+ mask_embedding = mask_embedding + (
+ 1 - has_mask_input
+ ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
+ return mask_embedding
+
+ def mask_postprocessing(
+ self, masks: torch.Tensor, orig_im_size: torch.Tensor
+ ) -> torch.Tensor:
+ masks = F.interpolate(
+ masks,
+ size=(self.img_size, self.img_size),
+ mode="bilinear",
+ align_corners=False,
+ )
+
+ prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size).to(
+ torch.int64
+ )
+ masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore
+
+ orig_im_size = orig_im_size.to(torch.int64)
+ h, w = orig_im_size[0], orig_im_size[1]
+ masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
+ return masks
+
+ def select_masks(
+ self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ # Determine if we should return the multiclick mask or not from the number of points.
+ # The reweighting is used to avoid control flow.
+ score_reweight = torch.tensor(
+ [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
+ ).to(iou_preds.device)
+ score = iou_preds + (num_points - 2.5) * score_reweight
+ best_idx = torch.argmax(score, dim=1)
+ masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
+ iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
+
+ return masks, iou_preds
+
+ @torch.no_grad()
+ def forward(
+ self,
+ image_embeddings: torch.Tensor,
+ point_coords: torch.Tensor,
+ point_labels: torch.Tensor,
+ mask_input: torch.Tensor,
+ has_mask_input: torch.Tensor,
+ orig_im_size: torch.Tensor,
+ ):
+ sparse_embedding = self._embed_points(point_coords, point_labels)
+ dense_embedding = self._embed_masks(mask_input, has_mask_input)
+
+ masks, scores = self.model.mask_decoder.predict_masks(
+ image_embeddings=image_embeddings,
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
+ sparse_prompt_embeddings=sparse_embedding,
+ dense_prompt_embeddings=dense_embedding,
+ )
+
+ if self.use_stability_score:
+ scores = calculate_stability_score(
+ masks, self.model.mask_threshold, self.stability_score_offset
+ )
+
+ if self.return_single_mask:
+ masks, scores = self.select_masks(masks, scores, point_coords.shape[1])
+
+ upscaled_masks = self.mask_postprocessing(masks, orig_im_size)
+
+ if self.return_extra_metrics:
+ stability_scores = calculate_stability_score(
+ upscaled_masks, self.model.mask_threshold, self.stability_score_offset
+ )
+ areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1)
+ return upscaled_masks, scores, stability_scores, areas, masks
+
+ return upscaled_masks, scores, masks
diff --git a/model/segment_anything/utils/transforms.py b/model/segment_anything/utils/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..4232d84252ea4983b194b2ebe8796741d252ef87
--- /dev/null
+++ b/model/segment_anything/utils/transforms.py
@@ -0,0 +1,113 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+
+from copy import deepcopy
+from typing import Tuple
+
+import numpy as np
+import torch
+from torch.nn import functional as F
+from torchvision.transforms.functional import resize # type: ignore
+from torchvision.transforms.functional import to_pil_image
+
+
+class ResizeLongestSide:
+ """
+ Resizes images to the longest side 'target_length', as well as provides
+ methods for resizing coordinates and boxes. Provides methods for
+ transforming both numpy array and batched torch tensors.
+ """
+
+ def __init__(self, target_length: int) -> None:
+ self.target_length = target_length
+
+ def apply_image(self, image: np.ndarray) -> np.ndarray:
+ """
+ Expects a numpy array with shape HxWxC in uint8 format.
+ """
+ target_size = self.get_preprocess_shape(
+ image.shape[0], image.shape[1], self.target_length
+ )
+ return np.array(resize(to_pil_image(image), target_size))
+
+ def apply_coords(
+ self, coords: np.ndarray, original_size: Tuple[int, ...]
+ ) -> np.ndarray:
+ """
+ Expects a numpy array of length 2 in the final dimension. Requires the
+ original image size in (H, W) format.
+ """
+ old_h, old_w = original_size
+ new_h, new_w = self.get_preprocess_shape(
+ original_size[0], original_size[1], self.target_length
+ )
+ coords = deepcopy(coords).astype(float)
+ coords[..., 0] = coords[..., 0] * (new_w / old_w)
+ coords[..., 1] = coords[..., 1] * (new_h / old_h)
+ return coords
+
+ def apply_boxes(
+ self, boxes: np.ndarray, original_size: Tuple[int, ...]
+ ) -> np.ndarray:
+ """
+ Expects a numpy array shape Bx4. Requires the original image size
+ in (H, W) format.
+ """
+ boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
+ return boxes.reshape(-1, 4)
+
+ def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
+ """
+ Expects batched images with shape BxCxHxW and float format. This
+ transformation may not exactly match apply_image. apply_image is
+ the transformation expected by the model.
+ """
+ # Expects an image in BCHW format. May not exactly match apply_image.
+ target_size = self.get_preprocess_shape(
+ image.shape[0], image.shape[1], self.target_length
+ )
+ return F.interpolate(
+ image, target_size, mode="bilinear", align_corners=False, antialias=True
+ )
+
+ def apply_coords_torch(
+ self, coords: torch.Tensor, original_size: Tuple[int, ...]
+ ) -> torch.Tensor:
+ """
+ Expects a torch tensor with length 2 in the last dimension. Requires the
+ original image size in (H, W) format.
+ """
+ old_h, old_w = original_size
+ new_h, new_w = self.get_preprocess_shape(
+ original_size[0], original_size[1], self.target_length
+ )
+ coords = deepcopy(coords).to(torch.float)
+ coords[..., 0] = coords[..., 0] * (new_w / old_w)
+ coords[..., 1] = coords[..., 1] * (new_h / old_h)
+ return coords
+
+ def apply_boxes_torch(
+ self, boxes: torch.Tensor, original_size: Tuple[int, ...]
+ ) -> torch.Tensor:
+ """
+ Expects a torch tensor with shape Bx4. Requires the original image
+ size in (H, W) format.
+ """
+ boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
+ return boxes.reshape(-1, 4)
+
+ @staticmethod
+ def get_preprocess_shape(
+ oldh: int, oldw: int, long_side_length: int
+ ) -> Tuple[int, int]:
+ """
+ Compute the output size given input size and target long side length.
+ """
+ scale = long_side_length * 1.0 / max(oldh, oldw)
+ newh, neww = oldh * scale, oldw * scale
+ neww = int(neww + 0.5)
+ newh = int(newh + 0.5)
+ return (newh, neww)
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..eb0d67afee51c39e1394b595ab1b6109ddee8441
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,23 @@
+--extra-index-url https://download.pytorch.org/whl/cu117
+torch==1.13.1
+torchvision==0.14.1
+packaging
+sentencepiece
+peft==0.4.0
+einops==0.4.1
+fastapi==0.100.1
+gradio==3.39.0
+markdown2==2.4.10
+numpy==1.24.2
+openai==0.27.8
+opencv_python==4.8.0.74
+Pillow==9.4.0
+pycocotools==2.0.6
+ray==2.6.1
+Requests==2.31.0
+shortuuid==1.0.11
+tqdm==4.64.1
+transformers==4.31.0
+uvicorn==0.23.2
+scipy==1.11.2
+bitsandbytes==0.41.1
\ No newline at end of file
diff --git a/runs/AffordanceVLM-7B/events.out.tfevents.1772609080.6e04508537b1ed92679964d5d3029bb2-taskrole1-0.62578.0 b/runs/AffordanceVLM-7B/events.out.tfevents.1772609080.6e04508537b1ed92679964d5d3029bb2-taskrole1-0.62578.0
new file mode 100644
index 0000000000000000000000000000000000000000..3ad7017a2b05925ca46ab85ce9159dda52046991
--- /dev/null
+++ b/runs/AffordanceVLM-7B/events.out.tfevents.1772609080.6e04508537b1ed92679964d5d3029bb2-taskrole1-0.62578.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00099659f2e96003b940dcf33d71dd1505961facb82e3c82d0861b34d66715e9
+size 88
diff --git a/runs/AffordanceVLM-7B/events.out.tfevents.1772609262.6e04508537b1ed92679964d5d3029bb2-taskrole1-0.63980.0 b/runs/AffordanceVLM-7B/events.out.tfevents.1772609262.6e04508537b1ed92679964d5d3029bb2-taskrole1-0.63980.0
new file mode 100644
index 0000000000000000000000000000000000000000..17f4ddb02baea2a6c8a3c07ad885476d603ff1a2
--- /dev/null
+++ b/runs/AffordanceVLM-7B/events.out.tfevents.1772609262.6e04508537b1ed92679964d5d3029bb2-taskrole1-0.63980.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1b7d626cd6cdec93d58803c36a26c7e194bd946cadb5b0c8e13b16d323291670
+size 88
diff --git a/runs/AffordanceVLM-7B/events.out.tfevents.1773216948.6e04508537b1ed92679964d5d3029bb2-taskrole1-0.4053037.0 b/runs/AffordanceVLM-7B/events.out.tfevents.1773216948.6e04508537b1ed92679964d5d3029bb2-taskrole1-0.4053037.0
new file mode 100644
index 0000000000000000000000000000000000000000..9fb53a96fdb78883c7eb7cd8879aed51be68061d
--- /dev/null
+++ b/runs/AffordanceVLM-7B/events.out.tfevents.1773216948.6e04508537b1ed92679964d5d3029bb2-taskrole1-0.4053037.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:20580afbe344daf312a5b58c0cfd3cc7619c601ffe9d6c437b36f024942e2390
+size 88
diff --git a/scripts/.ipynb_checkpoints/evaluate-checkpoint.sh b/scripts/.ipynb_checkpoints/evaluate-checkpoint.sh
new file mode 100644
index 0000000000000000000000000000000000000000..111a7b335eb938c799ac6ec00ac1d283b26c956a
--- /dev/null
+++ b/scripts/.ipynb_checkpoints/evaluate-checkpoint.sh
@@ -0,0 +1,92 @@
+# export PATH=/data/cuda/cuda-11.7/cuda/bin:$PATH
+# export LD_LIBRARY_PATH=/data/cuda/cuda-11.7/cuda/lib64:$LD_LIBRARY_PATH
+
+affordance_model="/gemini/code/AffordanceNet/ckpts/AffordanceVLM-7B"
+
+# deepspeed --master_port=24990 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="handal_all"
+
+# deepspeed --master_port=24991 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="handal_mini"
+
+# deepspeed --master_port=24992 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='/gemini/space/wrz/AffordanceNet/data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="graspnet_test_seen"
+
+# deepspeed --master_port=24993 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="graspnet_test_novel"
+
+# deepspeed --master_port=24994 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="3doi"
+
+# deepspeed --master_port=24995 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_reason_aff \
+# --val_dataset="handal_hard_reasoning"
+
+deepspeed --master_port=24996 train_aff.py \
+ --version=$affordance_model \
+ --dataset_dir='/gemini/space/wrz/AffordanceNet/data' \
+ --dataset="reason_seg" \
+ --sample_rates="1" \
+ --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+ --exp_name="AffordanceVLM-7B" \
+ --eval_only \
+ --eval_reason_aff \
+ --val_dataset="handal_easy_reasoning"
+
+# deepspeed --master_port=24997 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_reason_aff \
+# --val_dataset="3doi_easy_reasoning"
diff --git a/scripts/evaluate.sh b/scripts/evaluate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..111a7b335eb938c799ac6ec00ac1d283b26c956a
--- /dev/null
+++ b/scripts/evaluate.sh
@@ -0,0 +1,92 @@
+# export PATH=/data/cuda/cuda-11.7/cuda/bin:$PATH
+# export LD_LIBRARY_PATH=/data/cuda/cuda-11.7/cuda/lib64:$LD_LIBRARY_PATH
+
+affordance_model="/gemini/code/AffordanceNet/ckpts/AffordanceVLM-7B"
+
+# deepspeed --master_port=24990 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="handal_all"
+
+# deepspeed --master_port=24991 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="handal_mini"
+
+# deepspeed --master_port=24992 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='/gemini/space/wrz/AffordanceNet/data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="graspnet_test_seen"
+
+# deepspeed --master_port=24993 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="graspnet_test_novel"
+
+# deepspeed --master_port=24994 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_affordance \
+# --val_dataset="3doi"
+
+# deepspeed --master_port=24995 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_reason_aff \
+# --val_dataset="handal_hard_reasoning"
+
+deepspeed --master_port=24996 train_aff.py \
+ --version=$affordance_model \
+ --dataset_dir='/gemini/space/wrz/AffordanceNet/data' \
+ --dataset="reason_seg" \
+ --sample_rates="1" \
+ --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+ --exp_name="AffordanceVLM-7B" \
+ --eval_only \
+ --eval_reason_aff \
+ --val_dataset="handal_easy_reasoning"
+
+# deepspeed --master_port=24997 train_aff.py \
+# --version=$affordance_model \
+# --dataset_dir='./data' \
+# --dataset="reason_seg" \
+# --sample_rates="1" \
+# --vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+# --exp_name="AffordanceVLM-7B" \
+# --eval_only \
+# --eval_reason_aff \
+# --val_dataset="3doi_easy_reasoning"
diff --git a/scripts/merge_lora_weights.sh b/scripts/merge_lora_weights.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0f38f2f1096abfa2430a1822d6366e2641f7d534
--- /dev/null
+++ b/scripts/merge_lora_weights.sh
@@ -0,0 +1,10 @@
+#cd ./runs/lisa/ckpt_model
+#
+#python zero_to_fp32.py . ../pytorch_model.bin
+#
+#cd ../../..p
+
+CUDA_VISIBLE_DEVICES="" python3 merge_lora_weights_and_save_hf_model.py \
+--version="./LLaVA/LLaVA-Lightning-7B-v1-1" \
+--weight="./runs/AffordanceVLM-7B/pytorch_model.bin" \
+--save_path="./exps/AffordanceVLM-7B"
\ No newline at end of file
diff --git a/scripts/train.sh b/scripts/train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..489b53b8c454d617ab0e9d180a903c57cb66065f
--- /dev/null
+++ b/scripts/train.sh
@@ -0,0 +1,17 @@
+export PATH=/data/cuda/cuda-11.7/cuda/bin:$PATH
+export LD_LIBRARY_PATH=/data/cuda/cuda-11.7/cuda/lib64:$LD_LIBRARY_PATH
+
+deepspeed --include localhost:0,1,2,3,4,5,6,7 --master_port=23996 train_aff.py \
+--version="./LLaVA/LLaVA-Lightning-7B-v1-1/" \
+--vision_pretrained="ckpts/sam_vit_h_4b8939.pth" \
+--dataset_dir='./data' \
+--dataset="sem_seg||refer_seg||vqa||reason_seg||aff_seg||reason_aff" \
+--sample_rates="3,1,1,1,9,3" \
+--aff_seg_data="handal||openx||egoobjects||graspnet||rlbench" \
+--aff_sample_rates='2,2,4,2,1' \
+--reason_aff_data="handal_hard_reasoning||egoobjects_easy_reasoning||egoobjects_hard_reasoning" \
+--reason_aff_sample_rates='1,1,1' \
+--exp_name="AffordanceVLM-7B" \
+--batch_size=40 \
+--grad_accumulation_steps=1 \
+--epochs=10
\ No newline at end of file
diff --git a/server.py b/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..52799cab00b70bc7ec2ff9a79f23ca150213d478
--- /dev/null
+++ b/server.py
@@ -0,0 +1,233 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Flask-based service to receive an image and a prompt, perform vision-language model inference, and return a segmentation mask.
+
+from __future__ import absolute_import, print_function, division
+from flask import Flask, request, jsonify
+import os
+import json
+import base64
+import argparse
+import numpy as np
+import cv2
+import torch
+import torch.nn.functional as F
+from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
+from PIL import Image
+from io import BytesIO
+
+# Custom model and utility imports
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+from utils.utils import (
+ DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX
+)
+
+app = Flask(__name__)
+
+# ---------------------------
+# Argument parser for model config
+# ---------------------------
+def parse_args(args):
+ parser = argparse.ArgumentParser(description="AffordanceVLM Flask Service")
+ parser.add_argument("--version", default="/data/AffordanceNet/exps/AffordanceVLM-7B")
+ parser.add_argument("--vis_save_path", default="./vis_output/ur5_samples", type=str)
+ parser.add_argument("--precision", default="bf16", choices=["fp32", "bf16", "fp16"])
+ parser.add_argument("--image_size", default=1024, type=int)
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument("--vision-tower", default="openai/clip-vit-large-patch14")
+ parser.add_argument("--local-rank", default=0, type=int)
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument("--conv_type", default="llava_v1", choices=["llava_v1", "llava_llama_2"])
+ return parser.parse_args(args)
+
+# ---------------------------
+# Model initialization
+# ---------------------------
+args = parse_args(None)
+os.makedirs(args.vis_save_path, exist_ok=True)
+
+# Load tokenizer and add custom tokens
+tokenizer = AutoTokenizer.from_pretrained(args.version, model_max_length=args.model_max_length, padding_side="right", use_fast=False)
+tokenizer.pad_token = tokenizer.unk_token
+args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+# Set precision
+torch_dtype = {
+ "bf16": torch.bfloat16,
+ "fp16": torch.half,
+ "fp32": torch.float32
+}[args.precision]
+
+# Optional quantization configs
+kwargs = {"torch_dtype": torch_dtype}
+if args.load_in_4bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "load_in_4bit": True,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type="nf4",
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ })
+elif args.load_in_8bit:
+ kwargs.update({
+ "torch_dtype": torch.half,
+ "quantization_config": BitsAndBytesConfig(
+ load_in_8bit=True,
+ llm_int8_skip_modules=["visual_model"],
+ ),
+ })
+
+# Load model
+model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version,
+ vision_tower=args.vision_tower,
+ seg_token_idx=args.seg_token_idx,
+ aff_token_idx=args.aff_token_idx,
+ low_cpu_mem_usage=True,
+ **kwargs
+)
+
+# Set special tokens
+model.config.eos_token_id = tokenizer.eos_token_id
+model.config.bos_token_id = tokenizer.bos_token_id
+model.config.pad_token_id = tokenizer.pad_token_id
+
+# Initialize vision modules
+model.get_model().initialize_vision_modules(model.get_model().config)
+vision_tower = model.get_model().get_vision_tower().to(dtype=torch_dtype)
+
+# Model precision setup
+if args.precision == "bf16":
+ model = model.bfloat16().cuda()
+elif args.precision == "fp16" and not args.load_in_4bit and not args.load_in_8bit:
+ model.model.vision_tower = None
+ import deepspeed
+ model = deepspeed.init_inference(model=model, dtype=torch.half, replace_with_kernel_inject=True).module
+ model.model.vision_tower = vision_tower.half().cuda()
+else:
+ model = model.float().cuda()
+
+vision_tower.to(device=args.local_rank)
+clip_image_processor = CLIPImageProcessor.from_pretrained(model.config.vision_tower)
+transform = ResizeLongestSide(args.image_size)
+
+model.eval()
+
+# ---------------------------
+# Image preprocessing function
+# ---------------------------
+def preprocess(x, pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
+ pixel_std=torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1),
+ img_size=1024) -> torch.Tensor:
+ x = (x - pixel_mean) / pixel_std
+ h, w = x.shape[-2:]
+ x = F.pad(x, (0, img_size - w, 0, img_size - h))
+ return x
+
+# ---------------------------
+# Segmentation core logic
+# ---------------------------
+def segment(image_path, prompt):
+ conv = conversation_lib.conv_templates[args.conv_type].copy()
+ conv.messages = []
+
+ prompt = DEFAULT_IMAGE_TOKEN + "\nYou are an embodied robot. " + prompt
+ if args.use_mm_start_end:
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN,
+ DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN)
+
+ conv.append_message(conv.roles[0], prompt)
+ conv.append_message(conv.roles[1], "")
+ prompt = conv.get_prompt()
+
+ if not os.path.exists(image_path):
+ print(f"File not found: {image_path}")
+ return None
+
+ image_np = cv2.imread(image_path)
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
+ original_size_list = [image_np.shape[:2]]
+
+ # CLIP preprocessing
+ image_clip = clip_image_processor.preprocess(image_np, return_tensors="pt")["pixel_values"][0].unsqueeze(0).cuda()
+ image_clip = image_clip.to(dtype=torch_dtype)
+
+ # Resize and normalize
+ image = transform.apply_image(image_np)
+ resize_list = [image.shape[:2]]
+ image = preprocess(torch.from_numpy(image).permute(2, 0, 1)).unsqueeze(0).cuda().to(dtype=torch_dtype)
+
+ # Tokenize prompt
+ input_ids = tokenizer_image_token(prompt, tokenizer, return_tensors="pt").unsqueeze(0).cuda()
+
+ # Model inference
+ output_ids, pred_masks = model.evaluate(image_clip, image, input_ids, resize_list, original_size_list,
+ max_new_tokens=512, tokenizer=tokenizer)
+ output_ids = output_ids[0][output_ids[0] != IMAGE_TOKEN_INDEX]
+ text_output = tokenizer.decode(output_ids, skip_special_tokens=False).replace("\n", "").replace(" ", " ")
+ print("text_output:", text_output)
+
+ # Save predicted masks
+ save_mask_path = ""
+ for i, pred_mask in enumerate(pred_masks):
+ if pred_mask.shape[0] == 0:
+ continue
+ pred_mask = pred_mask.detach().cpu().numpy()[0] > 0
+ save_mask_path = f"{args.vis_save_path}/{os.path.basename(image_path).split('.')[0]}_mask_{i}.jpg"
+ cv2.imwrite(save_mask_path, pred_mask * 100)
+ print(f"Saved: {save_mask_path}")
+
+ save_img_path = f"{args.vis_save_path}/{os.path.basename(image_path).split('.')[0]}_masked_img_{i}.jpg"
+ save_img = image_np.copy()
+ save_img[pred_mask] = (image_np * 0.5 + pred_mask[:, :, None] * np.array([255, 0, 0]) * 0.5)[pred_mask]
+ save_img = cv2.cvtColor(save_img, cv2.COLOR_RGB2BGR)
+ cv2.imwrite(save_img_path, save_img)
+ print(f"Saved: {save_img_path}")
+
+ return save_mask_path
+
+# ---------------------------
+# Convert image to base64
+# ---------------------------
+def img2b64(img):
+ _, buffer = cv2.imencode('.bmp', img)
+ return base64.b64encode(buffer).decode()
+
+# ---------------------------
+# HTTP endpoint: /img_mask
+# ---------------------------
+@app.route("/img_mask", methods=['POST', 'GET'])
+def recv_json():
+ data = json.loads(request.data)
+ prompt = data.get('prompt', 'no_recv')
+ print("Received prompt:", prompt)
+
+ # Decode base64 image
+ img_data = base64.b64decode(data['img'])
+ img_np = cv2.imdecode(np.frombuffer(img_data, np.uint8), cv2.IMREAD_COLOR)
+ cv2.imwrite(os.path.join(args.vis_save_path, 'img.jpg'), img_np)
+
+ # Run segmentation
+ save_path = segment(os.path.join(args.vis_save_path, 'img.jpg'), prompt)
+ img = cv2.imread(save_path)
+ pic_str = img2b64(img)
+
+ return jsonify({'img': pic_str})
+
+# ---------------------------
+# App entry point
+# ---------------------------
+if __name__ == "__main__":
+ app.run(host='0.0.0.0', port=3200)
diff --git a/train_aff.py b/train_aff.py
new file mode 100644
index 0000000000000000000000000000000000000000..87f22173252054d43f44bf78cd8bdecf479e5b1b
--- /dev/null
+++ b/train_aff.py
@@ -0,0 +1,620 @@
+import argparse
+import os
+import shutil
+import sys
+import time
+from functools import partial
+
+import deepspeed
+import numpy as np
+import torch
+import tqdm
+import transformers
+from peft import LoraConfig, get_peft_model
+from torch.utils.tensorboard import SummaryWriter
+
+from model.AffordanceVLM import AffordanceVLMForCausalLM
+from model.llava import conversation as conversation_lib
+from utils.dataset import HybridDataset, ValDataset, collate_fn
+from utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ AverageMeter, ProgressMeter, Summary, dict_to_cuda,
+ intersectionAndUnionGPU)
+
+from utils.aff_seg_dataset import AffValDataset
+from utils.reason_aff_dataset import ReasonAffValDataset
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser(description="LISA Model Training")
+ parser.add_argument("--local_rank", default=0, type=int, help="node rank")
+ parser.add_argument(
+ "--version", default="liuhaotian/llava-llama-2-13b-chat-lightning-preview"
+ )
+ parser.add_argument("--vis_save_path", default="./vis_output", type=str)
+ parser.add_argument(
+ "--precision",
+ default="bf16",
+ type=str,
+ choices=["fp32", "bf16", "fp16"],
+ help="precision for inference",
+ )
+ parser.add_argument("--image_size", default=1024, type=int, help="image size")
+ parser.add_argument("--model_max_length", default=512, type=int)
+ parser.add_argument("--lora_r", default=8, type=int)
+ parser.add_argument(
+ "--vision-tower", default="openai/clip-vit-large-patch14", type=str
+ )
+ parser.add_argument("--load_in_8bit", action="store_true", default=False)
+ parser.add_argument("--load_in_4bit", action="store_true", default=False)
+
+ parser.add_argument(
+ "--dataset", default="sem_seg||refer_seg||vqa||reason_seg", type=str
+ )
+ parser.add_argument("--sample_rates", default="9,3,3,1", type=str)
+ parser.add_argument(
+ "--sem_seg_data",
+ default="ade20k||cocostuff||pascal_part||paco_lvis||mapillary",
+ type=str,
+ )
+ parser.add_argument(
+ "--refer_seg_data", default="refclef||refcoco||refcoco+||refcocog", type=str
+ )
+ parser.add_argument("--vqa_data", default="llava_instruct_150k", type=str)
+ parser.add_argument("--reason_seg_data", default="ReasonSeg|train", type=str)
+ parser.add_argument("--aff_seg_data", default="handal", type=str)
+ parser.add_argument("--aff_sample_rates", default="1", type=str)
+ parser.add_argument("--reason_aff_data", default="handal_hard_reasoning", type=str)
+ parser.add_argument("--reason_aff_sample_rates", default="1", type=str)
+ parser.add_argument("--val_dataset", default="ReasonSeg|val", type=str)
+ parser.add_argument("--dataset_dir", default="./dataset", type=str)
+ parser.add_argument("--log_base_dir", default="./runs", type=str)
+ parser.add_argument("--exp_name", default="lisa", type=str)
+ parser.add_argument("--epochs", default=10, type=int)
+ parser.add_argument("--steps_per_epoch", default=500, type=int)
+ parser.add_argument(
+ "--batch_size", default=2, type=int, help="batch size per device per step"
+ )
+ parser.add_argument(
+ "--grad_accumulation_steps",
+ default=10,
+ type=int,
+ )
+ parser.add_argument("--val_batch_size", default=1, type=int)
+ parser.add_argument("--workers", default=4, type=int)
+ parser.add_argument("--lr", default=0.0003, type=float)
+ parser.add_argument("--ce_loss_weight", default=1.0, type=float)
+ parser.add_argument("--dice_loss_weight", default=0.5, type=float)
+ parser.add_argument("--bce_loss_weight", default=2.0, type=float)
+ parser.add_argument("--lora_alpha", default=16, type=int)
+ parser.add_argument("--lora_dropout", default=0.05, type=float)
+ parser.add_argument("--lora_target_modules", default="q_proj,v_proj", type=str)
+ parser.add_argument("--explanatory", default=0.1, type=float)
+ parser.add_argument("--beta1", default=0.9, type=float)
+ parser.add_argument("--beta2", default=0.95, type=float)
+ parser.add_argument("--num_classes_per_sample", default=3, type=int)
+ parser.add_argument("--exclude_val", action="store_true", default=False)
+ parser.add_argument("--no_eval", action="store_true", default=False)
+ parser.add_argument("--eval_only", action="store_true", default=False)
+ parser.add_argument("--eval_affordance", action="store_true", default=False)
+ parser.add_argument("--eval_reason_aff", action="store_true", default=False)
+ parser.add_argument("--vision_pretrained", default="PATH_TO_SAM_ViT-H", type=str)
+ parser.add_argument("--out_dim", default=256, type=int)
+ parser.add_argument("--resume", default="", type=str)
+ parser.add_argument("--print_freq", default=1, type=int)
+ parser.add_argument("--start_epoch", default=0, type=int)
+ parser.add_argument("--gradient_checkpointing", action="store_true", default=True)
+ parser.add_argument("--train_mask_decoder", action="store_true", default=True)
+ parser.add_argument("--use_mm_start_end", action="store_true", default=True)
+ parser.add_argument("--auto_resume", action="store_true", default=True)
+ parser.add_argument(
+ "--conv_type",
+ default="llava_v1",
+ type=str,
+ choices=["llava_v1", "llava_llama_2"],
+ )
+ return parser.parse_args(args)
+
+
+def main(args):
+ args = parse_args(args)
+ args.log_dir = os.path.join(args.log_base_dir, args.exp_name)
+ if args.local_rank == 0:
+ os.makedirs(args.log_dir, exist_ok=True)
+ writer = SummaryWriter(args.log_dir)
+ else:
+ writer = None
+
+ # Create model
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
+ args.version,
+ cache_dir=None,
+ model_max_length=args.model_max_length,
+ padding_side="right",
+ use_fast=False,
+ )
+ tokenizer.pad_token = tokenizer.unk_token
+ num_added_tokens = tokenizer.add_tokens("[SEG]")
+ args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[0]
+ num_added_tokens = tokenizer.add_tokens("[AFF]")
+ args.aff_token_idx = tokenizer("[AFF]", add_special_tokens=False).input_ids[0]
+
+ if args.use_mm_start_end:
+ tokenizer.add_tokens(
+ [DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True
+ )
+
+ model_args = {
+ "train_mask_decoder": args.train_mask_decoder,
+ "out_dim": args.out_dim,
+ "ce_loss_weight": args.ce_loss_weight,
+ "dice_loss_weight": args.dice_loss_weight,
+ "bce_loss_weight": args.bce_loss_weight,
+ "seg_token_idx": args.seg_token_idx,
+ "aff_token_idx": args.aff_token_idx,
+ "vision_pretrained": args.vision_pretrained,
+ "vision_tower": args.vision_tower,
+ "use_mm_start_end": args.use_mm_start_end,
+ }
+ torch_dtype = torch.float32
+ if args.precision == "bf16":
+ torch_dtype = torch.bfloat16
+ elif args.precision == "fp16":
+ torch_dtype = torch.half
+ model = AffordanceVLMForCausalLM.from_pretrained(
+ args.version, torch_dtype=torch_dtype, low_cpu_mem_usage=True, **model_args
+ )
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+ model.enable_input_require_grads()
+ model.gradient_checkpointing_enable()
+
+ model.get_model().initialize_vision_modules(model.get_model().config)
+ vision_tower = model.get_model().get_vision_tower()
+ vision_tower.to(dtype=torch_dtype, device=args.local_rank)
+ if not args.eval_only:
+ model.get_model().initialize_lisa_modules(model.get_model().config)
+
+ for p in vision_tower.parameters():
+ p.requires_grad = False
+ for p in model.get_model().mm_projector.parameters():
+ p.requires_grad = False
+
+ conversation_lib.default_conversation = conversation_lib.conv_templates[
+ args.conv_type
+ ]
+
+ lora_r = args.lora_r
+ if lora_r > 0:
+
+ def find_linear_layers(model, lora_target_modules):
+ cls = torch.nn.Linear
+ lora_module_names = set()
+ for name, module in model.named_modules():
+ if (
+ isinstance(module, cls)
+ and all(
+ [
+ x not in name
+ for x in [
+ "visual_model",
+ "vision_tower",
+ "mm_projector",
+ "text_hidden_fcs",
+ ]
+ ]
+ )
+ and any([x in name for x in lora_target_modules])
+ ):
+ lora_module_names.add(name)
+ return sorted(list(lora_module_names))
+
+ lora_alpha = args.lora_alpha
+ lora_dropout = args.lora_dropout
+ lora_target_modules = find_linear_layers(
+ model, args.lora_target_modules.split(",")
+ )
+ lora_config = LoraConfig(
+ r=lora_r,
+ lora_alpha=lora_alpha,
+ target_modules=lora_target_modules,
+ lora_dropout=lora_dropout,
+ bias="none",
+ task_type="CAUSAL_LM",
+ )
+ model = get_peft_model(model, lora_config)
+ model.print_trainable_parameters()
+
+ model.resize_token_embeddings(len(tokenizer))
+
+ # make text_hidden_fcs, mask_decoder, lm_head, embed_tokens trainable
+ for n, p in model.named_parameters():
+ if any(
+ [
+ x in n
+ for x in ["lm_head", "embed_tokens", "mask_decoder", "text_hidden_fcs"]
+ ]
+ ):
+ print("n: ", n, "p.shape: ", p.shape)
+ p.requires_grad = True
+
+ world_size = torch.cuda.device_count()
+ args.distributed = world_size > 1
+ train_dataset = HybridDataset(
+ args.dataset_dir,
+ tokenizer,
+ args.vision_tower,
+ samples_per_epoch=args.batch_size
+ * args.grad_accumulation_steps
+ * args.steps_per_epoch
+ * world_size,
+ precision=args.precision,
+ image_size=args.image_size,
+ num_classes_per_sample=args.num_classes_per_sample,
+ exclude_val=args.exclude_val,
+ dataset=args.dataset,
+ sample_rate=[float(x) for x in args.sample_rates.split(",")],
+ sem_seg_data=args.sem_seg_data,
+ refer_seg_data=args.refer_seg_data,
+ vqa_data=args.vqa_data,
+ reason_seg_data=args.reason_seg_data,
+ aff_seg_data=args.aff_seg_data,
+ aff_sample_rate=[float(x) for x in args.aff_sample_rates.split(",")],
+ reason_aff_data=args.reason_aff_data,
+ reason_aff_sample_rate=[float(x) for x in args.reason_aff_sample_rates.split(",")],
+ explanatory=args.explanatory,
+ )
+
+ if args.no_eval == False:
+ if args.eval_affordance:
+ val_dataset = AffValDataset(
+ args.dataset_dir,
+ tokenizer,
+ args.vision_tower,
+ args.val_dataset,
+ args.image_size,
+ )
+ elif args.eval_reason_aff:
+ val_dataset = ReasonAffValDataset(
+ args.dataset_dir,
+ tokenizer,
+ args.vision_tower,
+ args.val_dataset,
+ args.image_size,
+ )
+ else:
+ val_dataset = ValDataset(
+ args.dataset_dir,
+ tokenizer,
+ args.vision_tower,
+ args.val_dataset,
+ args.image_size,
+ )
+ print(
+ f"Training with {len(train_dataset)} examples and validating with {len(val_dataset)} examples."
+ )
+ else:
+ val_dataset = None
+ print(f"Training with {len(train_dataset)} examples.")
+
+ ds_config = {
+ "train_micro_batch_size_per_gpu": args.batch_size,
+ "gradient_accumulation_steps": args.grad_accumulation_steps,
+ "optimizer": {
+ "type": "AdamW",
+ "params": {
+ "lr": args.lr,
+ "weight_decay": 0.0,
+ "betas": (args.beta1, args.beta2),
+ },
+ },
+ "scheduler": {
+ "type": "WarmupDecayLR",
+ "params": {
+ "total_num_steps": args.epochs * args.steps_per_epoch,
+ "warmup_min_lr": 0,
+ "warmup_max_lr": args.lr,
+ "warmup_num_steps": 100,
+ "warmup_type": "linear",
+ },
+ },
+ "fp16": {
+ "enabled": args.precision == "fp16",
+ },
+ "bf16": {
+ "enabled": args.precision == "bf16",
+ },
+ "gradient_clipping": 1.0,
+ "zero_optimization": {
+ "stage": 2,
+ "contiguous_gradients": True,
+ "overlap_comm": True,
+ "reduce_scatter": True,
+ "reduce_bucket_size": 5e8,
+ "allgather_bucket_size": 5e8,
+ },
+ }
+ model_engine, optimizer, train_loader, scheduler = deepspeed.initialize(
+ model=model,
+ model_parameters=model.parameters(),
+ training_data=train_dataset,
+ collate_fn=partial(
+ collate_fn,
+ tokenizer=tokenizer,
+ conv_type=args.conv_type,
+ use_mm_start_end=args.use_mm_start_end,
+ local_rank=args.local_rank,
+ ),
+ config=ds_config,
+ )
+
+ # resume deepspeed checkpoint
+ if args.auto_resume and len(args.resume) == 0:
+ resume = os.path.join(args.log_dir, "ckpt_model")
+ if os.path.exists(resume):
+ args.resume = resume
+
+ if args.resume:
+ load_path, client_state = model_engine.load_checkpoint(args.resume)
+ with open(os.path.join(args.resume, "latest"), "r") as f:
+ ckpt_dir = f.readlines()[0].strip()
+ args.start_epoch = (
+ int(ckpt_dir.replace("global_step", "")) // args.steps_per_epoch
+ )
+ print(
+ "resume training from {}, start from epoch {}".format(
+ args.resume, args.start_epoch
+ )
+ )
+
+ # validation dataset
+ if val_dataset is not None:
+ assert args.val_batch_size == 1
+ val_sampler = torch.utils.data.distributed.DistributedSampler(
+ val_dataset, shuffle=False, drop_last=False
+ )
+ val_loader = torch.utils.data.DataLoader(
+ val_dataset,
+ batch_size=args.val_batch_size,
+ shuffle=False,
+ num_workers=args.workers,
+ pin_memory=False,
+ sampler=val_sampler,
+ collate_fn=partial(
+ collate_fn,
+ tokenizer=tokenizer,
+ conv_type=args.conv_type,
+ use_mm_start_end=args.use_mm_start_end,
+ local_rank=args.local_rank,
+ ),
+ )
+
+ train_iter = iter(train_loader)
+ best_score, cur_ciou = 0.0, 0.0
+
+ if args.eval_only:
+ giou, ciou = validate(val_loader, model_engine, 0, writer, args)
+ if args.local_rank == 0:
+ with open(os.path.join(args.version, "eval_result.txt"), "a") as f:
+ f.write(f"dataset: {args.val_dataset}, giou: {giou}, ciou: {ciou} \n")
+ exit()
+
+ for epoch in range(args.start_epoch, args.epochs):
+ # train for one epoch
+ train_iter = train(
+ train_loader,
+ model_engine,
+ epoch,
+ scheduler,
+ writer,
+ train_iter,
+ args,
+ )
+
+ if args.no_eval == False:
+ giou, ciou = validate(val_loader, model_engine, epoch, writer, args)
+ is_best = giou > best_score
+ best_score = max(giou, best_score)
+ cur_ciou = ciou if is_best else cur_ciou
+
+ if args.no_eval or is_best:
+ save_dir = os.path.join(args.log_dir, "ckpt_model")
+ if args.local_rank == 0:
+ torch.save(
+ {"epoch": epoch},
+ os.path.join(
+ args.log_dir,
+ "meta_log_giou{:.3f}_ciou{:.3f}.pth".format(
+ best_score, cur_ciou
+ ),
+ ),
+ )
+ if os.path.exists(save_dir):
+ shutil.rmtree(save_dir)
+ torch.distributed.barrier()
+ model_engine.save_checkpoint(save_dir)
+
+
+def train(
+ train_loader,
+ model,
+ epoch,
+ scheduler,
+ writer,
+ train_iter,
+ args,
+):
+ """Main training loop."""
+ batch_time = AverageMeter("Time", ":6.3f")
+ data_time = AverageMeter("Data", ":6.3f")
+ losses = AverageMeter("Loss", ":.4f")
+ ce_losses = AverageMeter("CeLoss", ":.4f")
+ mask_bce_losses = AverageMeter("MaskBCELoss", ":.4f")
+ mask_dice_losses = AverageMeter("MaskDICELoss", ":.4f")
+ mask_losses = AverageMeter("MaskLoss", ":.4f")
+
+ progress = ProgressMeter(
+ args.steps_per_epoch,
+ [
+ batch_time,
+ losses,
+ ce_losses,
+ mask_losses,
+ mask_bce_losses,
+ mask_dice_losses,
+ ],
+ prefix="Epoch: [{}]".format(epoch),
+ )
+
+ # switch to train mode
+ model.train()
+ end = time.time()
+ for global_step in range(args.steps_per_epoch):
+ for i in range(args.grad_accumulation_steps):
+ try:
+ input_dict = next(train_iter)
+ except:
+ train_iter = iter(train_loader)
+ input_dict = next(train_iter)
+
+ data_time.update(time.time() - end)
+ input_dict = dict_to_cuda(input_dict)
+
+ if args.precision == "fp16":
+ input_dict["images"] = input_dict["images"].half()
+ input_dict["images_clip"] = input_dict["images_clip"].half()
+ elif args.precision == "bf16":
+ input_dict["images"] = input_dict["images"].bfloat16()
+ input_dict["images_clip"] = input_dict["images_clip"].bfloat16()
+ else:
+ input_dict["images"] = input_dict["images"].float()
+ input_dict["images_clip"] = input_dict["images_clip"].float()
+
+ output_dict = model(**input_dict)
+
+ loss = output_dict["loss"]
+ ce_loss = output_dict["ce_loss"]
+ mask_bce_loss = output_dict["mask_bce_loss"]
+ mask_dice_loss = output_dict["mask_dice_loss"]
+ mask_loss = output_dict["mask_loss"]
+
+ losses.update(loss.item(), input_dict["images"].size(0))
+ ce_losses.update(ce_loss.item(), input_dict["images"].size(0))
+ mask_bce_losses.update(mask_bce_loss.item(), input_dict["images"].size(0))
+ mask_dice_losses.update(mask_dice_loss.item(), input_dict["images"].size(0))
+ mask_losses.update(mask_loss.item(), input_dict["images"].size(0))
+ model.backward(loss)
+ model.step()
+
+ # measure elapsed time
+ batch_time.update(time.time() - end)
+ end = time.time()
+
+ if global_step % args.print_freq == 0:
+ if args.distributed:
+ batch_time.all_reduce()
+ data_time.all_reduce()
+
+ losses.all_reduce()
+ ce_losses.all_reduce()
+ mask_bce_losses.all_reduce()
+ mask_dice_losses.all_reduce()
+ mask_losses.all_reduce()
+
+ if args.local_rank == 0:
+ progress.display(global_step + 1)
+ writer.add_scalar("train/loss", losses.avg, global_step)
+ writer.add_scalar("train/ce_loss", ce_losses.avg, global_step)
+ writer.add_scalar(
+ "train/mask_bce_loss", mask_bce_losses.avg, global_step
+ )
+ writer.add_scalar(
+ "train/mask_dice_loss", mask_dice_losses.avg, global_step
+ )
+ writer.add_scalar("train/mask_loss", mask_losses.avg, global_step)
+ writer.add_scalar(
+ "metrics/total_secs_per_batch", batch_time.avg, global_step
+ )
+ writer.add_scalar(
+ "metrics/data_secs_per_batch", data_time.avg, global_step
+ )
+
+ batch_time.reset()
+ data_time.reset()
+ losses.reset()
+ ce_losses.reset()
+ mask_bce_losses.reset()
+ mask_dice_losses.reset()
+ mask_losses.reset()
+
+ if global_step != 0:
+ curr_lr = scheduler.get_last_lr()
+ if args.local_rank == 0:
+ writer.add_scalar("train/lr", curr_lr[0], global_step)
+
+ return train_iter
+
+
+def validate(val_loader, model_engine, epoch, writer, args):
+ intersection_meter = AverageMeter("Intersec", ":6.3f", Summary.SUM)
+ union_meter = AverageMeter("Union", ":6.3f", Summary.SUM)
+ acc_iou_meter = AverageMeter("gIoU", ":6.3f", Summary.SUM)
+
+ model_engine.eval()
+
+ for input_dict in tqdm.tqdm(val_loader):
+ torch.cuda.empty_cache()
+
+ input_dict = dict_to_cuda(input_dict)
+ if args.precision == "fp16":
+ input_dict["images"] = input_dict["images"].half()
+ input_dict["images_clip"] = input_dict["images_clip"].half()
+ elif args.precision == "bf16":
+ input_dict["images"] = input_dict["images"].bfloat16()
+ input_dict["images_clip"] = input_dict["images_clip"].bfloat16()
+ else:
+ input_dict["images"] = input_dict["images"].float()
+ input_dict["images_clip"] = input_dict["images_clip"].float()
+
+ with torch.no_grad():
+ output_dict = model_engine(**input_dict)
+
+ pred_masks = output_dict["pred_masks"]
+ masks_list = output_dict["gt_masks"][0].int()
+ output_list = (pred_masks[0] > 0).int()
+ assert len(pred_masks) == 1
+
+ intersection, union, acc_iou = 0.0, 0.0, 0.0
+ for mask_i, output_i in zip(masks_list, output_list):
+ intersection_i, union_i, _ = intersectionAndUnionGPU(
+ output_i.contiguous().clone(), mask_i.contiguous(), 2, ignore_index=255
+ )
+ intersection += intersection_i
+ union += union_i
+ acc_iou += intersection_i / (union_i + 1e-5)
+ acc_iou[union_i == 0] += 1.0 # no-object target
+ intersection, union = intersection.cpu().numpy(), union.cpu().numpy()
+ acc_iou = acc_iou.cpu().numpy() / masks_list.shape[0]
+ intersection_meter.update(intersection), union_meter.update(
+ union
+ ), acc_iou_meter.update(acc_iou, n=masks_list.shape[0])
+
+ intersection_meter.all_reduce()
+ union_meter.all_reduce()
+ acc_iou_meter.all_reduce()
+
+ iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
+ ciou = iou_class[1]
+ giou = acc_iou_meter.avg[1]
+
+ if args.local_rank == 0:
+ writer.add_scalar("val/giou", giou, epoch)
+ writer.add_scalar("val/ciou", ciou, epoch)
+ print("giou: {:.4f}, ciou: {:.4f}".format(giou, ciou))
+
+ return giou, ciou
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/utils/__pycache__/aff_seg_dataset.cpython-39.pyc b/utils/__pycache__/aff_seg_dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..252c285c75d40d2d7d729328bba4c8021353fc67
Binary files /dev/null and b/utils/__pycache__/aff_seg_dataset.cpython-39.pyc differ
diff --git a/utils/__pycache__/conversation.cpython-39.pyc b/utils/__pycache__/conversation.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3fcb6897d070441b3a08aa51fdc85189b2f82503
Binary files /dev/null and b/utils/__pycache__/conversation.cpython-39.pyc differ
diff --git a/utils/__pycache__/data_processing.cpython-39.pyc b/utils/__pycache__/data_processing.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..faa3804bdb872e18267226c3a3efbd1af7158a45
Binary files /dev/null and b/utils/__pycache__/data_processing.cpython-39.pyc differ
diff --git a/utils/__pycache__/dataset.cpython-39.pyc b/utils/__pycache__/dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5902f1b968f9972875117933dab17d0bee24a5d4
Binary files /dev/null and b/utils/__pycache__/dataset.cpython-39.pyc differ
diff --git a/utils/__pycache__/grefer.cpython-39.pyc b/utils/__pycache__/grefer.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9387c35a6a3c01c205ac9807f78ad87f0ac87c2f
Binary files /dev/null and b/utils/__pycache__/grefer.cpython-39.pyc differ
diff --git a/utils/__pycache__/reason_aff_dataset.cpython-39.pyc b/utils/__pycache__/reason_aff_dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02753d8f72401e3cb29da2c975909d20f87f7d12
Binary files /dev/null and b/utils/__pycache__/reason_aff_dataset.cpython-39.pyc differ
diff --git a/utils/__pycache__/reason_seg_dataset.cpython-39.pyc b/utils/__pycache__/reason_seg_dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a338cbaed0fb24909f681b2dae7592dde8775d10
Binary files /dev/null and b/utils/__pycache__/reason_seg_dataset.cpython-39.pyc differ
diff --git a/utils/__pycache__/refer.cpython-39.pyc b/utils/__pycache__/refer.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ab88d4646cbe3c9ce576cf3bfc76c4e3596b757
Binary files /dev/null and b/utils/__pycache__/refer.cpython-39.pyc differ
diff --git a/utils/__pycache__/refer_seg_dataset.cpython-39.pyc b/utils/__pycache__/refer_seg_dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..da3b0afbbbcc8e8ca947e83a78417e8939a49fe4
Binary files /dev/null and b/utils/__pycache__/refer_seg_dataset.cpython-39.pyc differ
diff --git a/utils/__pycache__/sem_seg_dataset.cpython-39.pyc b/utils/__pycache__/sem_seg_dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44ec265f0929401081d687cb891c9cae76910896
Binary files /dev/null and b/utils/__pycache__/sem_seg_dataset.cpython-39.pyc differ
diff --git a/utils/__pycache__/utils.cpython-39.pyc b/utils/__pycache__/utils.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ef0176bafa638960c99ab15d137ff79596452f6
Binary files /dev/null and b/utils/__pycache__/utils.cpython-39.pyc differ
diff --git a/utils/__pycache__/vqa_dataset.cpython-39.pyc b/utils/__pycache__/vqa_dataset.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2835362d95593d6e24eb8458f876725a36f167b3
Binary files /dev/null and b/utils/__pycache__/vqa_dataset.cpython-39.pyc differ
diff --git a/utils/ade20k_classes.json b/utils/ade20k_classes.json
new file mode 100644
index 0000000000000000000000000000000000000000..1f96e616bc3fd2f8c0ec4caea975d77c680f44bb
--- /dev/null
+++ b/utils/ade20k_classes.json
@@ -0,0 +1,30 @@
+[
+ "wall", "building", "sky", "floor", "tree", "ceiling", "road",
+ "bed", "windowpane", "grass", "cabinet", "sidewalk",
+ "person", "earth", "door", "table", "mountain", "plant",
+ "curtain", "chair", "car", "water", "painting", "sofa",
+ "shelf", "house", "sea", "mirror", "rug", "field", "armchair",
+ "seat", "fence", "desk", "rock", "wardrobe", "lamp",
+ "bathtub", "railing", "cushion", "base", "box", "column",
+ "signboard", "chest of drawers", "counter", "sand", "sink",
+ "skyscraper", "fireplace", "refrigerator", "grandstand",
+ "path", "stairs", "runway", "case", "pool table", "pillow",
+ "screen door", "stairway", "river", "bridge", "bookcase",
+ "blind", "coffee table", "toilet", "flower", "book", "hill",
+ "bench", "countertop", "stove", "palm", "kitchen island",
+ "computer", "swivel chair", "boat", "bar", "arcade machine",
+ "hovel", "bus", "towel", "light", "truck", "tower",
+ "chandelier", "awning", "streetlight", "booth",
+ "television receiver", "airplane", "dirt track", "apparel",
+ "pole", "land", "bannister", "escalator", "ottoman", "bottle",
+ "buffet", "poster", "stage", "van", "ship", "fountain",
+ "conveyer belt", "canopy", "washer", "plaything",
+ "swimming pool", "stool", "barrel", "basket", "waterfall",
+ "tent", "bag", "minibike", "cradle", "oven", "ball", "food",
+ "step", "tank", "trade name", "microwave", "pot", "animal",
+ "bicycle", "lake", "dishwasher", "screen", "blanket",
+ "sculpture", "hood", "sconce", "vase", "traffic light",
+ "tray", "ashcan", "fan", "pier", "crt screen", "plate",
+ "monitor", "bulletin board", "shower", "radiator", "glass",
+ "clock", "flag"
+]
\ No newline at end of file
diff --git a/utils/aff_seg_dataset.py b/utils/aff_seg_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..7152b0ec5bdab2905f1f2ba3707e9c3ec00d7278
--- /dev/null
+++ b/utils/aff_seg_dataset.py
@@ -0,0 +1,437 @@
+import glob
+import json
+import os
+import random
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import CLIPImageProcessor
+
+from model.llava import conversation as conversation_lib
+from model.segment_anything.utils.transforms import ResizeLongestSide
+
+from .data_processing import get_mask_from_json
+from .utils import (ANSWER_LIST, DEFAULT_IMAGE_TOKEN,
+ EXPLANATORY_QUESTION_LIST, LONG_QUESTION_LIST,
+ SHORT_QUESTION_LIST)
+from PIL import Image
+
+import pickle
+
+
+AFFORDANCE_QUESTION_LIST = [
+ DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. Can you segment the affordance map of {class_name} in this image?",
+ DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. Please segment the affordance map of {class_name} in this image.",
+ DEFAULT_IMAGE_TOKEN
+ + "\n"
+ + "You are an embodied robot. What is the affordance map of {class_name} in this image?",
+ DEFAULT_IMAGE_TOKEN
+ + "\n"
+ + "You are an embodied robot. What is the affordance map of {class_name} in this image?",
+]
+
+AFFORDANCE_ANSWER_LIST = [
+ "It is [AFF].",
+ "Sure, [AFF].",
+ "Sure, it is [AFF].",
+ "Sure, the affordance map is [AFF].",
+ "[AFF].",
+]
+
+
+class AffordanceSegDataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ samples_per_epoch=500 * 8 * 2 * 10,
+ precision: str = "fp32",
+ image_size: int = 224,
+ num_classes_per_sample: int = 3,
+ exclude_val=False,
+ aff_seg_data="handal||openx||egoobjects",
+ aff_sample_ratio=[1, 1, 1],
+ explanatory=0.1,
+ ):
+ self.exclude_val = exclude_val
+ self.aff_seg_data = aff_seg_data
+ aff_sample_ratio = np.array(aff_sample_ratio)
+ self.aff_sample_ratio = aff_sample_ratio / aff_sample_ratio.sum()
+ self.samples_per_epoch = samples_per_epoch
+ self.explanatory = explanatory
+ self.num_classes_per_sample = num_classes_per_sample
+
+ self.base_image_dir = base_image_dir
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.precision = precision
+ self.transform = ResizeLongestSide(image_size)
+ self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
+
+ self.short_question_list = SHORT_QUESTION_LIST
+ self.affordance_question_list = AFFORDANCE_QUESTION_LIST
+ self.long_question_list = LONG_QUESTION_LIST
+ # self.answer_list = ANSWER_LIST
+ self.answer_list = AFFORDANCE_ANSWER_LIST
+
+ aff_seg_datas = aff_seg_data.split("||")
+ self.data2list = {}
+ self.object_ids = {}
+ for ds in aff_seg_datas:
+ if ds == "handal":
+ aff_cls_list = os.listdir(os.path.join(base_image_dir, "HANDAL", "without_depth"))
+ aff_cls_list = [aff_cls[15:] for aff_cls in aff_cls_list if '.zip' not in aff_cls]
+ aff_cls_list = [aff_cls.replace('_', ' ') for aff_cls in aff_cls_list if len(aff_cls) > 0]
+ images = {}
+ labels = {}
+ num_handal = 0
+ for aff_cls in aff_cls_list:
+ images[aff_cls] = glob.glob(
+ os.path.join(
+ base_image_dir, "HANDAL", "without_depth",
+ 'handal_dataset' + '_' + aff_cls.replace(' ', '_'),
+ 'train', '*', 'rgb', '*.jpg'
+ )
+ )
+ labels[aff_cls] = [img.replace('rgb', 'mask_parts')[:-4] + '_000000_handle.png' for img in
+ images[aff_cls]]
+ # masks[aff_cls] = [mask for mask in masks[aff_cls] if os.path.exists(mask)]
+ assert len(images[aff_cls]) == len(labels[aff_cls])
+ num_handal += len(images[aff_cls])
+ self.data2list[ds] = (images, labels)
+ print("categories of handal: ", aff_cls_list)
+ print("number of handal samples: ", num_handal)
+ elif ds == "openx" or ds == "egoobjects" or ds == "rlbench":
+ pkl_path = os.path.join(base_image_dir, f"{ds}_train.pkl")
+ images = {}
+ labels = {}
+ with open(pkl_path, 'rb') as f:
+ aff_datas = pickle.load(f)
+ for aff_data in aff_datas:
+ if aff_data['task_object_class'] not in images:
+ images[aff_data['task_object_class']] = []
+ labels[aff_data['task_object_class']] = []
+ images[aff_data['task_object_class']].append(aff_data['frame_path'])
+ labels[aff_data['task_object_class']].append(aff_data['mask_path'])
+ # keep same numbers of samples for each class
+ for k in images.keys():
+ assert len(images[k]) == len(labels[k])
+ self.data2list[ds] = (images, labels)
+ print(f"categories of {ds}: ", images.keys())
+ print(f"number of {ds} samples: ", len(aff_datas))
+ elif ds == 'graspnet':
+ pkl_path = os.path.join(base_image_dir, f"{ds}_train.pkl")
+ images = {}
+ labels = {}
+ object_ids = {}
+ with open(pkl_path, 'rb') as f:
+ graspnet_datas = pickle.load(f)
+ for graspnet_data in graspnet_datas:
+ if graspnet_data['task_object_class'] not in images:
+ images[graspnet_data['task_object_class']] = []
+ labels[graspnet_data['task_object_class']] = []
+ object_ids[graspnet_data['task_object_class']] = []
+ images[graspnet_data['task_object_class']].append(graspnet_data['frame_path'])
+ labels[graspnet_data['task_object_class']].append(graspnet_data['mask_path'])
+ if 'graspnet_object_id' in graspnet_data.keys():
+ object_ids[graspnet_data['task_object_class']].append(graspnet_data['graspnet_object_id'])
+ else:
+ object_ids[graspnet_data['task_object_class']].append(None)
+ # keep same numbers of samples for each class
+ for k in images.keys():
+ assert len(images[k]) == len(labels[k])
+ assert len(images[k]) == len(object_ids[k])
+ self.data2list[ds] = (images, labels)
+ self.object_ids[ds] = object_ids
+ print(f"categories of {ds}: ", images.keys())
+ print("number of graspnet samples: ", len(graspnet_datas))
+ else:
+ raise ValueError(f"Unsupported affordance segmentation dataset: {ds}")
+
+ def __len__(self):
+ return self.samples_per_epoch
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.img_size - h
+ padw = self.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+ def __getitem__(self, idx):
+ ds = np.random.choice(list(self.data2list.keys()), p=self.aff_sample_ratio)
+
+ images, labels = self.data2list[ds]
+ class_name = random.choice(list(images.keys()))
+ idx = random.randint(0, len(images[class_name]) - 1)
+ image_path = images[class_name][idx]
+ label_path = labels[class_name][idx]
+ if "rlbench" in ds:
+ if "target" in class_name or "jar" in class_name or "button" in class_name:
+ is_flip = random.random() > 0.5
+ flip_code = random.choice([-1, 0, 1])
+ elif "drawer" in class_name:
+ is_flip = random.random() > 0.5
+ flip_code = 1
+ else:
+ is_flip = False
+ flip_code = 0
+ else:
+ is_flip = False
+ flip_code = 0
+
+ # load image and prepare input for clip and sam
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ if is_flip:
+ image = cv2.flip(image, flip_code)
+ ori_size = image.shape[:2]
+ # preprocess image for clip
+ image_clip = self.clip_image_processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+
+ image = self.transform.apply_image(image) # preprocess image for sam
+ resize = image.shape[:2]
+ image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+
+ # load class names
+ sampled_classes = [class_name]
+
+ # load label
+ label = Image.open(label_path)
+ label = np.array(label)
+ if is_flip:
+ label = cv2.flip(label, flip_code)
+ label = torch.from_numpy(label).long()
+ masks = []
+ if ds == 'graspnet':
+ object_id = self.object_ids[ds][class_name][idx]
+ # if data is from graspnet and object_id exists, use the mask of the object_id
+ if object_id is None:
+ for _ in range(len(sampled_classes)):
+ masks.append(label > 0)
+ else:
+ for _ in range(len(sampled_classes)):
+ masks.append(label == object_id)
+ else:
+ for _ in range(len(sampled_classes)):
+ masks.append(label > 0)
+ masks = torch.stack(masks, dim=0)
+
+ questions = []
+ answers = []
+ for sampled_cls in sampled_classes:
+ text = sampled_cls
+
+ assert len(text.split("||")) == 1
+ question_template = random.choice(self.affordance_question_list)
+ questions.append(question_template.format(class_name=text.lower()))
+
+ answers.append(random.choice(self.answer_list))
+
+ conversations = []
+ conv = conversation_lib.default_conversation.copy()
+
+ i = 0
+ while i < len(questions):
+ conv.messages = []
+ conv.append_message(conv.roles[0], questions[i])
+ conv.append_message(conv.roles[1], answers[i])
+ conversations.append(conv.get_prompt())
+ i += 1
+
+ return (
+ image_path,
+ image,
+ image_clip,
+ conversations,
+ masks,
+ label,
+ resize,
+ questions,
+ sampled_classes,
+ )
+
+
+class AffValDataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ val_dataset,
+ image_size=1024,
+ ):
+ self.base_image_dir = base_image_dir.replace("/lisa_data", "")
+ # splits = val_dataset.split("|")
+ # ds, split = splits
+ ds = val_dataset
+
+ self.class_names = []
+ self.class_ids = []
+ self.images = []
+ self.labels = []
+ pkl_path = os.path.join(base_image_dir, f"{ds}_val.pkl")
+ if ds == 'handal_all':
+ aff_cls_list = os.listdir(os.path.join(self.base_image_dir, "HANDAL", "without_depth"))
+ aff_cls_list = [aff_cls[15:] for aff_cls in aff_cls_list if '.zip' not in aff_cls]
+ aff_cls_list = [aff_cls.replace('_', ' ') for aff_cls in aff_cls_list if len(aff_cls) > 0]
+
+ num_handal = 0
+ images = {}
+ labels = {}
+ class_names = {}
+ for aff_cls in aff_cls_list:
+ images[aff_cls] = glob.glob(
+ os.path.join(
+ self.base_image_dir, "HANDAL", "without_depth",
+ 'handal_dataset' + '_' + aff_cls.replace(' ', '_'),
+ 'test', '*', 'rgb', '*.jpg'
+ )
+ )
+ labels[aff_cls] = [img.replace('rgb', 'mask_parts')[:-4] + '_000000_handle.png' for img in
+ images[aff_cls]]
+ class_names[aff_cls] = [aff_cls] * len(images[aff_cls])
+ # masks[aff_cls] = [mask for mask in masks[aff_cls] if os.path.exists(mask)]
+ assert len(images[aff_cls]) == len(labels[aff_cls])
+ assert len(images[aff_cls]) == len(class_names[aff_cls])
+ num_handal += len(images[aff_cls])
+
+ for aff_cls in images.keys():
+ self.images.extend(images[aff_cls])
+ self.labels.extend(labels[aff_cls])
+ self.class_names.extend(class_names[aff_cls])
+ self.class_ids.extend([None] * len(images[aff_cls]))
+ print(f'handal_all test number: {num_handal}')
+
+ else:
+ with open(pkl_path, 'rb') as f:
+ val_datas = pickle.load(f)
+ for class_name in val_datas['images'].keys():
+ # one image is broken, so skip it
+ image_list = val_datas['images'][class_name]
+ for idx, img_path in enumerate(image_list):
+ if os.path.basename(img_path) == "EK_frame_0000040462.jpg":
+ # remove the corresponding data
+ del val_datas['images'][class_name][idx]
+ del val_datas['labels'][class_name][idx]
+ del val_datas['class_names'][class_name][idx]
+ if 'class_ids' in val_datas:
+ del val_datas['class_ids'][class_name][idx]
+
+ self.images.extend(val_datas['images'][class_name])
+ self.labels.extend(val_datas['labels'][class_name])
+ self.class_names.extend(val_datas['class_names'][class_name])
+ if 'class_ids' in val_datas.keys():
+ self.class_ids.extend(val_datas['class_ids'][class_name])
+ else:
+ self.class_ids.extend([None] * len(val_datas['images'][class_name]))
+
+ self.ds = ds
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.transform = ResizeLongestSide(image_size)
+ self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
+
+ def __len__(self):
+ return len(self.images)
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.img_size - h
+ padw = self.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+ def __getitem__(self, idx):
+
+ # load image
+ image_path = self.images[idx]
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+
+ # preprocess image for clip
+ image_clip = self.clip_image_processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+
+ # preprocess image for sam
+ image = self.transform.apply_image(image)
+ resize = image.shape[:2]
+ image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+
+ # load class names
+ sampled_sents = [self.class_names[idx]]
+
+ # load label
+ label_path = self.labels[idx]
+ label = Image.open(label_path)
+ label = np.array(label)
+ label = torch.from_numpy(label).long()
+ masks = []
+ class_id = self.class_ids[idx]
+ # if data object_id exists, use the mask of the object_id
+ if class_id is None:
+ for _ in range(len(sampled_sents)):
+ masks.append(label > 0)
+ else:
+ for _ in range(len(sampled_sents)):
+ masks.append(label == class_id)
+ masks = torch.stack(masks, dim=0)
+
+ conversations = []
+ conv = conversation_lib.default_conversation.copy()
+ i = 0
+ while i < len(sampled_sents):
+ conv.messages = []
+ text = sampled_sents[i].strip()
+
+ conv.append_message(
+ conv.roles[0],
+ DEFAULT_IMAGE_TOKEN
+ + "\nYou are an embodied robot. What is the affordance map of {} in this image?".format(
+ text
+ ),
+ )
+ conv.append_message(conv.roles[1], "[AFF].")
+ conversations.append(conv.get_prompt())
+ i += 1
+
+ inference = True
+
+ return (
+ image_path,
+ image,
+ image_clip,
+ conversations,
+ masks,
+ label,
+ resize,
+ None,
+ None,
+ inference,
+ )
\ No newline at end of file
diff --git a/utils/cocostuff_classes.txt b/utils/cocostuff_classes.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1d5a692b83ac8eead2bfffa805e1115cef737bae
--- /dev/null
+++ b/utils/cocostuff_classes.txt
@@ -0,0 +1,183 @@
+0: unlabeled
+1: person
+2: bicycle
+3: car
+4: motorcycle
+5: airplane
+6: bus
+7: train
+8: truck
+9: boat
+10: traffic light
+11: fire hydrant
+12: street sign
+13: stop sign
+14: parking meter
+15: bench
+16: bird
+17: cat
+18: dog
+19: horse
+20: sheep
+21: cow
+22: elephant
+23: bear
+24: zebra
+25: giraffe
+26: hat
+27: backpack
+28: umbrella
+29: shoe
+30: eye glasses
+31: handbag
+32: tie
+33: suitcase
+34: frisbee
+35: skis
+36: snowboard
+37: sports ball
+38: kite
+39: baseball bat
+40: baseball glove
+41: skateboard
+42: surfboard
+43: tennis racket
+44: bottle
+45: plate
+46: wine glass
+47: cup
+48: fork
+49: knife
+50: spoon
+51: bowl
+52: banana
+53: apple
+54: sandwich
+55: orange
+56: broccoli
+57: carrot
+58: hot dog
+59: pizza
+60: donut
+61: cake
+62: chair
+63: couch
+64: potted plant
+65: bed
+66: mirror
+67: dining table
+68: window
+69: desk
+70: toilet
+71: door
+72: tv
+73: laptop
+74: mouse
+75: remote
+76: keyboard
+77: cell phone
+78: microwave
+79: oven
+80: toaster
+81: sink
+82: refrigerator
+83: blender
+84: book
+85: clock
+86: vase
+87: scissors
+88: teddy bear
+89: hair drier
+90: toothbrush
+91: hair brush
+92: banner
+93: blanket
+94: branch
+95: bridge
+96: building-other
+97: bush
+98: cabinet
+99: cage
+100: cardboard
+101: carpet
+102: ceiling-other
+103: ceiling-tile
+104: cloth
+105: clothes
+106: clouds
+107: counter
+108: cupboard
+109: curtain
+110: desk-stuff
+111: dirt
+112: door-stuff
+113: fence
+114: floor-marble
+115: floor-other
+116: floor-stone
+117: floor-tile
+118: floor-wood
+119: flower
+120: fog
+121: food-other
+122: fruit
+123: furniture-other
+124: grass
+125: gravel
+126: ground-other
+127: hill
+128: house
+129: leaves
+130: light
+131: mat
+132: metal
+133: mirror-stuff
+134: moss
+135: mountain
+136: mud
+137: napkin
+138: net
+139: paper
+140: pavement
+141: pillow
+142: plant-other
+143: plastic
+144: platform
+145: playingfield
+146: railing
+147: railroad
+148: river
+149: road
+150: rock
+151: roof
+152: rug
+153: salad
+154: sand
+155: sea
+156: shelf
+157: sky
+158: skyscraper
+159: snow
+160: solid-other
+161: stairs
+162: stone
+163: straw
+164: structural-other
+165: table
+166: tent
+167: textile-other
+168: towel
+169: tree
+170: vegetable
+171: wall-brick
+172: wall-concrete
+173: wall-other
+174: wall-panel
+175: wall-stone
+176: wall-tile
+177: wall-wood
+178: water-other
+179: waterdrops
+180: window-blind
+181: window-other
+182: wood
diff --git a/utils/conversation.py b/utils/conversation.py
new file mode 100644
index 0000000000000000000000000000000000000000..65ea31ff2e1ba6f93c5942d096162576284fff61
--- /dev/null
+++ b/utils/conversation.py
@@ -0,0 +1,308 @@
+"""
+Conversation prompt templates.
+"""
+
+import dataclasses
+from enum import Enum, auto
+from typing import Any, List
+
+
+class SeparatorStyle(Enum):
+ """Different separator style."""
+
+ ADD_COLON_SINGLE = auto()
+ ADD_COLON_TWO = auto()
+ NO_COLON_SINGLE = auto()
+ BAIZE = auto()
+ DOLLY = auto()
+ RWKV = auto()
+
+
+@dataclasses.dataclass
+class Conversation:
+ """A class that keeps all conversation history."""
+
+ # System prompts
+ system: str
+ # Two roles
+ roles: List[str]
+ # All messages
+ messages: List[List[str]]
+ # Offset of few shot examples
+ offset: int
+ # Separator
+ sep_style: SeparatorStyle
+ sep: str
+ sep2: str = None
+ # Stop criteria (the default one is EOS token)
+ stop_str: str = None
+ # Stops generation if meeting any token in this list
+ stop_token_ids: List[int] = None
+
+ # Used for the state in the gradio servers.
+ # TODO(lmzheng): refactor this
+ conv_id: Any = None
+ skip_next: bool = False
+ model_name: str = None
+
+ def get_prompt(self):
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
+ ret = self.system + self.sep
+ for role, message in self.messages:
+ if message:
+ ret += role + ": " + message + self.sep
+ else:
+ ret += role + ":"
+ return ret
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
+ seps = [self.sep, self.sep2]
+ ret = self.system + seps[0]
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += role + ": " + message + seps[i % 2]
+ else:
+ ret += role + ":"
+ return ret
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
+ ret = self.system
+ for role, message in self.messages:
+ if message:
+ ret += role + message + self.sep
+ else:
+ ret += role
+ return ret
+ elif self.sep_style == SeparatorStyle.BAIZE:
+ ret = self.system + "\n"
+ for role, message in self.messages:
+ if message:
+ ret += role + message + "\n"
+ else:
+ ret += role
+ return ret
+ elif self.sep_style == SeparatorStyle.DOLLY:
+ seps = [self.sep, self.sep2]
+ ret = self.system
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += role + ":\n" + message + seps[i % 2]
+ if i % 2 == 1:
+ ret += "\n\n"
+ else:
+ ret += role + ":\n"
+ return ret
+ elif self.sep_style == SeparatorStyle.RWKV:
+ ret = self.system
+ for i, (role, message) in enumerate(self.messages):
+ if message:
+ ret += (
+ role
+ + ": "
+ + message.replace("\r\n", "\n").replace("\n\n", "\n")
+ )
+ ret += "\n\n"
+ else:
+ ret += role + ":"
+ return ret
+ else:
+ raise ValueError(f"Invalid style: {self.sep_style}")
+
+ def append_message(self, role, message):
+ self.messages.append([role, message])
+
+ def to_gradio_chatbot(self):
+ ret = []
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
+ if i % 2 == 0:
+ ret.append([msg, None])
+ else:
+ ret[-1][-1] = msg
+ return ret
+
+ def copy(self):
+ return Conversation(
+ system=self.system,
+ roles=self.roles,
+ messages=[[x, y] for x, y in self.messages],
+ offset=self.offset,
+ sep_style=self.sep_style,
+ sep=self.sep,
+ sep2=self.sep2,
+ stop_str=self.stop_str,
+ stop_token_ids=self.stop_token_ids,
+ conv_id=self.conv_id,
+ model_name=self.model_name,
+ )
+
+ def dict(self):
+ return {
+ "system": self.system,
+ "roles": self.roles,
+ "messages": self.messages,
+ "offset": self.offset,
+ "conv_id": self.conv_id,
+ "model_name": self.model_name,
+ }
+
+
+# A template with one conversation example
+conv_one_shot = Conversation(
+ system="A chat between a curious human and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
+ roles=("Human", "Assistant"),
+ messages=(
+ (
+ "Human",
+ "What are the key differences between renewable and non-renewable energy sources?",
+ ),
+ (
+ "Assistant",
+ "Renewable energy sources are those that can be replenished naturally in a relatively "
+ "short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
+ "Non-renewable energy sources, on the other hand, are finite and will eventually be "
+ "depleted, such as coal, oil, and natural gas. Here are some key differences between "
+ "renewable and non-renewable energy sources:\n"
+ "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
+ "energy sources are finite and will eventually run out.\n"
+ "2. Environmental impact: Renewable energy sources have a much lower environmental impact "
+ "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
+ "and other negative effects.\n"
+ "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
+ "have lower operational costs than non-renewable sources.\n"
+ "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
+ "locations than non-renewable sources.\n"
+ "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
+ "situations and needs, while non-renewable sources are more rigid and inflexible.\n"
+ "6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
+ "non-renewable sources are not, and their depletion can lead to economic and social instability.",
+ ),
+ ),
+ offset=2,
+ sep_style=SeparatorStyle.ADD_COLON_SINGLE,
+ sep="\n### ",
+ stop_str="###",
+)
+
+
+# Vicuna v1.1 template
+conv_vicuna_v1_1 = Conversation(
+ system="A chat between a curious user and an artificial intelligence assistant. "
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
+ roles=("USER", "ASSISTANT"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
+ sep=" ",
+ sep2="",
+)
+
+# Koala default template
+conv_koala_v1 = Conversation(
+ system="BEGINNING OF CONVERSATION:",
+ roles=("USER", "GPT"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.ADD_COLON_TWO,
+ sep=" ",
+ sep2="",
+)
+
+# Dolly V2 default template
+conv_dolly = Conversation(
+ system="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
+ roles=("### Instruction", "### Response"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.DOLLY,
+ sep="\n\n",
+ sep2="### End",
+)
+
+# OpenAssistant Pythia default template
+conv_oasst = Conversation(
+ system="",
+ roles=("<|prompter|>", "<|assistant|>"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
+ sep="<|endoftext|>",
+)
+
+# StableLM Alpha default template
+conv_stablelm = Conversation(
+ system="""<|SYSTEM|># StableLM Tuned (Alpha version)
+- StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
+- StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
+- StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
+- StableLM will refuse to participate in anything that could harm a human.
+""",
+ roles=("<|USER|>", "<|ASSISTANT|>"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.NO_COLON_SINGLE,
+ sep="",
+ stop_token_ids=[50278, 50279, 50277, 1, 0],
+)
+
+# Baize default template
+conv_baize = Conversation(
+ system="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.",
+ roles=("[|Human|]", "[|AI|]"),
+ messages=(
+ ("[|Human|]", "Hello!"),
+ ("[|AI|]", "Hi!"),
+ ),
+ offset=2,
+ sep_style=SeparatorStyle.BAIZE,
+ sep="[|Human|]",
+ stop_str="[|Human|]",
+)
+
+# RWKV-4-Raven default template
+conv_rwkv = Conversation(
+ system="",
+ roles=("Bob", "Alice"),
+ messages=(),
+ offset=0,
+ sep_style=SeparatorStyle.RWKV,
+ sep="",
+ stop_str="\n\n",
+)
+
+conv_templates = {
+ "baize": conv_baize,
+ "conv_one_shot": conv_one_shot,
+ "dolly": conv_dolly,
+ "koala_v1": conv_koala_v1,
+ "oasst": conv_oasst,
+ "stablelm": conv_stablelm,
+ "vicuna_v1.1": conv_vicuna_v1_1,
+ "rwkv": conv_rwkv,
+}
+
+
+def get_default_conv_template(model_name):
+ model_name = model_name.lower()
+ if "vicuna" in model_name or "output" in model_name:
+ return conv_vicuna_v1_1
+ elif "koala" in model_name:
+ return conv_koala_v1
+ elif "dolly-v2" in model_name:
+ return conv_dolly
+ elif "oasst" in model_name and "pythia" in model_name:
+ return conv_oasst
+ elif "baize" in model_name:
+ return conv_baize
+ elif "stablelm" in model_name:
+ return conv_stablelm
+ elif "rwkv-4" in model_name:
+ return conv_rwkv
+ return conv_one_shot
+
+
+if __name__ == "__main__":
+ conv = conv_templates["vicuna_v1.1"].copy()
+ conv.append_message(conv.roles[0], "Hello!")
+ conv.append_message(conv.roles[1], "Hi!")
+ conv.append_message(conv.roles[0], "How are you?")
+ conv.append_message(conv.roles[1], None)
+ print(conv.get_prompt())
diff --git a/utils/data_processing.py b/utils/data_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..d47a80f0111019c97ccb2ce198f37495ee037471
--- /dev/null
+++ b/utils/data_processing.py
@@ -0,0 +1,90 @@
+import glob
+import json
+import os
+
+import cv2
+import numpy as np
+
+
+def get_mask_from_json(json_path, img):
+ try:
+ with open(json_path, "r") as r:
+ anno = json.loads(r.read())
+ except:
+ with open(json_path, "r", encoding="cp1252") as r:
+ anno = json.loads(r.read())
+
+ inform = anno["shapes"]
+ comments = anno["text"]
+ is_sentence = anno["is_sentence"]
+
+ height, width = img.shape[:2]
+
+ ### sort polies by area
+ area_list = []
+ valid_poly_list = []
+ for i in inform:
+ label_id = i["label"]
+ points = i["points"]
+ if "flag" == label_id.lower(): ## meaningless deprecated annotations
+ continue
+
+ tmp_mask = np.zeros((height, width), dtype=np.uint8)
+ cv2.polylines(tmp_mask, np.array([points], dtype=np.int32), True, 1, 1)
+ cv2.fillPoly(tmp_mask, np.array([points], dtype=np.int32), 1)
+ tmp_area = tmp_mask.sum()
+
+ area_list.append(tmp_area)
+ valid_poly_list.append(i)
+
+ ### ground-truth mask
+ sort_index = np.argsort(area_list)[::-1].astype(np.int32)
+ sort_index = list(sort_index)
+ sort_inform = []
+ for s_idx in sort_index:
+ sort_inform.append(valid_poly_list[s_idx])
+
+ mask = np.zeros((height, width), dtype=np.uint8)
+ for i in sort_inform:
+ label_id = i["label"]
+ points = i["points"]
+
+ if "ignore" in label_id.lower():
+ label_value = 255 # ignored during evaluation
+ else:
+ label_value = 1 # target
+
+ cv2.polylines(mask, np.array([points], dtype=np.int32), True, label_value, 1)
+ cv2.fillPoly(mask, np.array([points], dtype=np.int32), label_value)
+
+ return mask, comments, is_sentence
+
+
+if __name__ == "__main__":
+ data_dir = "./train"
+ vis_dir = "./vis"
+
+ if not os.path.exists(vis_dir):
+ os.makedirs(vis_dir)
+
+ json_path_list = sorted(glob.glob(data_dir + "/*.json"))
+ for json_path in json_path_list:
+ img_path = json_path.replace(".json", ".jpg")
+ img = cv2.imread(img_path)[:, :, ::-1]
+
+ # In generated mask, value 1 denotes valid target region, and value 255 stands for region ignored during evaluaiton.
+ mask, comments, is_sentence = get_mask_from_json(json_path, img)
+
+ ## visualization. Green for target, and red for ignore.
+ valid_mask = (mask == 1).astype(np.float32)[:, :, None]
+ ignore_mask = (mask == 255).astype(np.float32)[:, :, None]
+ vis_img = img * (1 - valid_mask) * (1 - ignore_mask) + (
+ (np.array([0, 255, 0]) * 0.6 + img * 0.4) * valid_mask
+ + (np.array([255, 0, 0]) * 0.6 + img * 0.4) * ignore_mask
+ )
+ vis_img = np.concatenate([img, vis_img], 1)
+ vis_path = os.path.join(
+ vis_dir, json_path.split("/")[-1].replace(".json", ".jpg")
+ )
+ cv2.imwrite(vis_path, vis_img[:, :, ::-1])
+ print("Visualization has been saved to: ", vis_path)
diff --git a/utils/dataset.py b/utils/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..224da36fa56f4493d5885a9da88e48172eceba6d
--- /dev/null
+++ b/utils/dataset.py
@@ -0,0 +1,502 @@
+import glob
+import os
+import random
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from pycocotools import mask
+from transformers import CLIPImageProcessor
+
+from model.llava import conversation as conversation_lib
+from model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX,
+ IMAGE_TOKEN_INDEX)
+from model.llava.mm_utils import tokenizer_image_token
+from model.segment_anything.utils.transforms import ResizeLongestSide
+
+from .conversation import get_default_conv_template
+from .data_processing import get_mask_from_json
+from .reason_seg_dataset import ReasonSegDataset
+from .refer import REFER
+from .refer_seg_dataset import ReferSegDataset
+from .sem_seg_dataset import SemSegDataset
+from .aff_seg_dataset import AffordanceSegDataset
+from .reason_aff_dataset import ReasonAffDataset
+from .utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
+ DEFAULT_IMAGE_TOKEN)
+from .vqa_dataset import VQADataset
+
+
+def collate_fn(
+ batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1
+):
+ image_path_list = []
+ images_list = []
+ images_clip_list = []
+ conversation_list = []
+ masks_list = []
+ label_list = []
+ resize_list = []
+ questions_list = []
+ sampled_classes_list = []
+ offset_list = [0]
+ cnt = 0
+ inferences = []
+ for (
+ image_path,
+ images,
+ images_clip,
+ conversations,
+ masks,
+ label,
+ resize,
+ questions,
+ sampled_classes,
+ inference,
+ ) in batch:
+ image_path_list.append(image_path)
+ images_list.append(images)
+ images_clip_list.append(images_clip)
+ conversation_list.extend(conversations)
+ label_list.append(label)
+ masks_list.append(masks.float())
+ resize_list.append(resize)
+ questions_list.append(questions)
+ sampled_classes_list.append(sampled_classes)
+ cnt += len(conversations)
+ offset_list.append(cnt)
+ inferences.append(inference)
+
+ if use_mm_start_end:
+ # replace token
+ for i in range(len(conversation_list)):
+ replace_token = DEFAULT_IMAGE_TOKEN
+ replace_token = (
+ DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
+ )
+ conversation_list[i] = conversation_list[i].replace(
+ DEFAULT_IMAGE_TOKEN, replace_token
+ )
+ input_ids = [
+ tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
+ for prompt in conversation_list
+ ]
+ input_ids = torch.nn.utils.rnn.pad_sequence(
+ input_ids, batch_first=True, padding_value=tokenizer.pad_token_id
+ )
+ attention_masks = input_ids.ne(tokenizer.pad_token_id)
+
+ conv = conversation_lib.default_conversation.copy()
+ targets = input_ids.clone()
+
+ if conv_type == "llava_v1":
+ sep = conv.sep + conv.roles[1] + ": "
+ else:
+ sep = "[/INST] "
+ for conversation, target in zip(conversation_list, targets):
+ total_len = int(target.ne(tokenizer.pad_token_id).sum())
+
+ rounds = conversation.split(conv.sep2)
+ cur_len = 1
+ target[:cur_len] = IGNORE_INDEX
+ for i, rou in enumerate(rounds):
+ if rou == "":
+ break
+
+ parts = rou.split(sep)
+ # if len(parts) != 2:
+ # break
+ assert len(parts) == 2, (len(parts), rou)
+ parts[0] += sep
+
+ if DEFAULT_IMAGE_TOKEN in conversation:
+ round_len = len(tokenizer_image_token(rou, tokenizer))
+ instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
+ else:
+ round_len = len(tokenizer(rou).input_ids)
+ instruction_len = len(tokenizer(parts[0]).input_ids) - 2
+
+ target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
+
+ cur_len += round_len
+ target[cur_len:] = IGNORE_INDEX
+
+ if False:
+ z = target.clone()
+ z = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z)
+ if local_rank == 0:
+ print(
+ "conversation: ",
+ conversation,
+ "tokenizer.decode(z): ",
+ tokenizer.decode(z),
+ )
+
+ if cur_len < tokenizer.model_max_length:
+ assert cur_len == total_len
+
+ if inferences[0] == False:
+ truncate_len = tokenizer.model_max_length - 255
+
+ if input_ids.shape[1] > truncate_len:
+ input_ids = input_ids[:, :truncate_len]
+ targets = targets[:, :truncate_len]
+ attention_masks = attention_masks[:, :truncate_len]
+
+ return {
+ "image_paths": image_path_list,
+ "images": torch.stack(images_list, dim=0),
+ "images_clip": torch.stack(images_clip_list, dim=0),
+ "input_ids": input_ids,
+ "labels": targets,
+ "attention_masks": attention_masks,
+ "masks_list": masks_list,
+ "label_list": label_list,
+ "resize_list": resize_list,
+ "offset": torch.LongTensor(offset_list),
+ "questions_list": questions_list,
+ "sampled_classes_list": sampled_classes_list,
+ "inference": inferences[0],
+ "conversation_list": conversation_list,
+ }
+
+
+class HybridDataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ samples_per_epoch=500 * 8 * 2 * 10,
+ precision: str = "fp32",
+ image_size: int = 224,
+ num_classes_per_sample: int = 3,
+ exclude_val=False,
+ dataset="sem_seg||refer_seg||vqa||reason_seg",
+ sample_rate=[9, 3, 3, 1],
+ sem_seg_data="ade20k||cocostuff||partimagenet||pascal_part||paco_lvis||mapillary",
+ refer_seg_data="refclef||refcoco||refcoco+||refcocog",
+ vqa_data="llava_instruct_150k",
+ reason_seg_data="ReasonSeg|train",
+ aff_seg_data="handal",
+ aff_sample_rate=[1],
+ reason_aff_data="handal_hard_reasoning",
+ reason_aff_sample_rate=[1],
+ explanatory=0.1,
+ ):
+ self.exclude_val = exclude_val
+ self.dataset = dataset
+ self.samples_per_epoch = samples_per_epoch
+ self.explanatory = explanatory
+ self.num_classes_per_sample = num_classes_per_sample
+ sample_rate = np.array(sample_rate)
+ self.sample_rate = sample_rate / sample_rate.sum()
+
+ self.base_image_dir = base_image_dir
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.precision = precision
+
+ self.datasets = dataset.split("||")
+
+ self.all_datasets = []
+ for dataset in self.datasets:
+ if dataset == "sem_seg":
+ self.all_datasets.append(
+ SemSegDataset(
+ base_image_dir.replace('./data', './data/lisa_data'),
+ tokenizer,
+ vision_tower,
+ samples_per_epoch,
+ precision,
+ image_size,
+ num_classes_per_sample,
+ exclude_val,
+ sem_seg_data,
+ )
+ )
+ elif dataset == "refer_seg":
+ self.all_datasets.append(
+ ReferSegDataset(
+ base_image_dir.replace('./data', './data/lisa_data'),
+ tokenizer,
+ vision_tower,
+ samples_per_epoch,
+ precision,
+ image_size,
+ num_classes_per_sample,
+ exclude_val,
+ refer_seg_data,
+ )
+ )
+ elif dataset == "vqa":
+ self.all_datasets.append(
+ VQADataset(
+ base_image_dir.replace('./data', './data/lisa_data'),
+ tokenizer,
+ vision_tower,
+ samples_per_epoch,
+ precision,
+ image_size,
+ num_classes_per_sample,
+ exclude_val,
+ vqa_data,
+ )
+ )
+ elif dataset == "reason_seg":
+ self.all_datasets.append(
+ ReasonSegDataset(
+ base_image_dir.replace('./data', './data/lisa_data'),
+ tokenizer,
+ vision_tower,
+ samples_per_epoch,
+ precision,
+ image_size,
+ num_classes_per_sample,
+ exclude_val,
+ reason_seg_data,
+ explanatory,
+ )
+ )
+ elif dataset == "aff_seg":
+ self.all_datasets.append(
+ AffordanceSegDataset(
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ samples_per_epoch,
+ precision,
+ image_size,
+ num_classes_per_sample,
+ exclude_val,
+ aff_seg_data,
+ aff_sample_rate,
+ )
+ )
+ elif dataset == "reason_aff":
+ self.all_datasets.append(
+ ReasonAffDataset(
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ samples_per_epoch,
+ precision,
+ image_size,
+ num_classes_per_sample,
+ exclude_val,
+ reason_aff_data,
+ reason_aff_sample_rate,
+ )
+ )
+
+ def __len__(self):
+ return self.samples_per_epoch
+
+ def __getitem__(self, idx):
+ ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate)
+ data = self.all_datasets[ind]
+ inference = False
+ return *data[0], inference
+
+
+class ValDataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ val_dataset,
+ image_size=1024,
+ ):
+ self.base_image_dir = base_image_dir.replace('./data', './data/lisa_data')
+ splits = val_dataset.split("|")
+ if len(splits) == 2:
+ ds, split = splits
+ images = glob.glob(
+ os.path.join(self.base_image_dir, "reason_seg", ds, split, "*.jpg")
+ )
+ self.images = images
+ self.data_type = "reason_seg"
+ elif len(splits) == 3:
+ ds, splitBy, split = splits
+ refer_api = REFER(self.base_image_dir, ds, splitBy)
+ ref_ids_val = refer_api.getRefIds(split=split)
+ images_ids_val = refer_api.getImgIds(ref_ids=ref_ids_val)
+ refs_val = refer_api.loadRefs(ref_ids=ref_ids_val)
+ refer_seg_ds = {}
+ refer_seg_ds["images"] = []
+ loaded_images = refer_api.loadImgs(image_ids=images_ids_val)
+ for item in loaded_images:
+ item = item.copy()
+ if ds == "refclef":
+ item["file_name"] = os.path.join(
+ base_image_dir, "images/saiapr_tc-12", item["file_name"]
+ )
+ elif ds in ["refcoco", "refcoco+", "refcocog", "grefcoco"]:
+ item["file_name"] = os.path.join(
+ base_image_dir,
+ "images/mscoco/images/train2014",
+ item["file_name"],
+ )
+ refer_seg_ds["images"].append(item)
+ refer_seg_ds["annotations"] = refer_api.Anns # anns_val
+
+ img2refs = {}
+ for ref in refs_val:
+ image_id = ref["image_id"]
+ img2refs[image_id] = img2refs.get(image_id, []) + [
+ ref,
+ ]
+ refer_seg_ds["img2refs"] = img2refs
+ self.refer_seg_ds = refer_seg_ds
+ self.data_type = "refer_seg"
+
+ self.ds = ds
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.transform = ResizeLongestSide(image_size)
+ self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
+
+ def __len__(self):
+ if self.data_type == "refer_seg":
+ return len(self.refer_seg_ds["images"])
+ else:
+ return len(self.images)
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.img_size - h
+ padw = self.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+ def __getitem__(self, idx):
+ if self.data_type == "refer_seg":
+ refer_seg_ds = self.refer_seg_ds
+ images = refer_seg_ds["images"]
+ annotations = refer_seg_ds["annotations"]
+ img2refs = refer_seg_ds["img2refs"]
+
+ image_info = images[idx]
+ image_path = image_info["file_name"]
+ image_id = image_info["id"]
+
+ refs = img2refs[image_id]
+ if len(refs) == 0:
+ raise ValueError("image {} has no refs".format(image_id))
+
+ sents = []
+ ann_ids = []
+ for ref in refs:
+ for sent in ref["sentences"]:
+ sents.append(sent["sent"].strip().lower())
+ ann_ids.append(ref["ann_id"])
+
+ sampled_sents = sents
+ sampled_ann_ids = ann_ids
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ is_sentence = False
+ else:
+ image_path = self.images[idx]
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ json_path = image_path.replace(".jpg", ".json")
+ mask_json, sampled_sents, is_sentence = get_mask_from_json(json_path, image)
+ sampled_sents = [sampled_sents[0]]
+
+ conversations = []
+ conv = conversation_lib.default_conversation.copy()
+ i = 0
+ while i < len(sampled_sents):
+ conv.messages = []
+ text = sampled_sents[i].strip()
+ if is_sentence:
+ conv.append_message(
+ conv.roles[0],
+ DEFAULT_IMAGE_TOKEN
+ + "\n {} Please output segmentation mask.".format(text),
+ )
+ conv.append_message(conv.roles[1], "[SEG].")
+ else:
+ conv.append_message(
+ conv.roles[0],
+ DEFAULT_IMAGE_TOKEN
+ + "\n What is {} in this image? Please output segmentation mask.".format(
+ text
+ ),
+ )
+ conv.append_message(conv.roles[1], "[SEG].")
+ conversations.append(conv.get_prompt())
+ i += 1
+
+ # preprocess image for clip
+ image_clip = self.clip_image_processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+
+ # preprocess image for sam
+ image = self.transform.apply_image(image)
+ resize = image.shape[:2]
+ image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+
+ if self.data_type == "refer_seg":
+ masks = []
+ for i, ann_id in enumerate(sampled_ann_ids):
+ ann = annotations[ann_id]
+ if len(ann["segmentation"]) == 0 and sampled_sents[i] != "":
+ m = np.zeros((image_info["height"], image_info["width"], 1))
+ else:
+ if type(ann["segmentation"][0]) == list: # polygon
+ rle = mask.frPyObjects(
+ ann["segmentation"],
+ image_info["height"],
+ image_info["width"],
+ )
+ else:
+ rle = ann["segmentation"]
+ for i in range(len(rle)):
+ if not isinstance(rle[i]["counts"], bytes):
+ rle[i]["counts"] = rle[i]["counts"].encode()
+ m = mask.decode(rle)
+ m = np.sum(
+ m, axis=2
+ ) # sometimes there are multiple binary map (corresponding to multiple segs)
+ m = m.astype(np.uint8) # convert to np.uint8
+ masks.append(m)
+ else:
+ masks = [mask_json]
+
+ masks = np.stack(masks, axis=0)
+ masks = torch.from_numpy(masks)
+ labels = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label
+ inference = True
+
+ return (
+ image_path,
+ image,
+ image_clip,
+ conversations,
+ masks,
+ labels,
+ resize,
+ None,
+ None,
+ inference,
+ )
diff --git a/utils/graspnet_classes.txt b/utils/graspnet_classes.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f593c740b8f0c0e70b9c6ccc281cec4bba4e7400
--- /dev/null
+++ b/utils/graspnet_classes.txt
@@ -0,0 +1,88 @@
+"id:0,Name:cracker box",
+"id:1,Name:sugar box",
+"id:2,Name:tomato soup can",
+"id:3,Name:mustard bottle",
+"id:4,Name:potted meat can",
+"id:5,Name:banana",
+"id:6,Name:bowl",
+"id:7,Name:mug",
+"id:8,Name:power drill",
+"id:9,Name:scissors",
+"id:10,Name:chips can",
+"id:11,Name:strawberry",
+"id:12,Name:apple",
+"id:13,Name:lemon",
+"id:14,Name:peach",
+"id:15,Name:pear",
+"id:16,Name:orange",
+"id:17,Name:plum",
+"id:18,Name:knife",
+"id:19,Name:phillips screwdriver",
+"id:20,Name:flat screwdriver",
+"id:21,Name:racquetball",
+"id:22,Name:cups",
+"id:23,Name:cups",
+"id:24,Name:toy airplane",
+"id:25,Name:toy airplane",
+"id:26,Name:toy airplane",
+"id:27,Name:toy airplane",
+"id:28,Name:toy airplane",
+"id:29,Name:toy airplane",
+"id:30,Name:toy airplane",
+"id:31,Name:toy airplane",
+"id:32,Name:padlock",
+"id:33,Name:dragon",
+"id:34,Name:sum37 secret repair",
+"id:35,Name:jvr cleansing foam",
+"id:36,Name:dabao wash soup",
+"id:37,Name:nzskincare mouth rinse",
+"id:38,Name:dabao sod",
+"id:39,Name:soap box",
+"id:40,Name:kispa cleanser",
+"id:41,Name:darlie toothpaste",
+"id:42,Name:nivea men oil control",
+"id:43,Name:baoke marker",
+"id:44,Name:hosjam",
+"id:45,Name:pitcher cap",
+"id:46,Name:dish",
+"id:47,Name:white mouse",
+"id:48,Name:camel",
+"id:49,Name:deer",
+"id:50,Name:zebra",
+"id:51,Name:large elephant",
+"id:52,Name:rhinocero",
+"id:53,Name:small elephant",
+"id:54,Name:monkey",
+"id:55,Name:girafle",
+"id:56,Name:gorilla",
+"id:57,Name:weiquan",
+"id:58,Name:darlie box",
+"id:59,Name:soap",
+"id:60,Name:black mouse",
+"id:61,Name:dabao facewash",
+"id:62,Name:pantene",
+"id:63,Name:head shoulders supreme",
+"id:64,Name:thera med",
+"id:65,Name:dove",
+"id:66,Name:head shoulders care",
+"id:67,Name:lion",
+"id:68,Name:coconut juice box",
+"id:69,Name:hippo",
+"id:70,Name:tape",
+"id:71,Name:rubiks cube",
+"id:72,Name:peeler cover",
+"id:73,Name:peeler",
+"id:74,Name:ice cube mould",
+"id:75,Name:bar clamp",
+"id:76,Name:climbing hold",
+"id:77,Name:endstop holder",
+"id:78,Name:gearbox",
+"id:79,Name:mount1",
+"id:80,Name:mount2",
+"id:81,Name:nozzle",
+"id:82,Name:part1",
+"id:83,Name:part3",
+"id:84,Name:pawn",
+"id:85,Name:pipe connector",
+"id:86,Name:turbine housing",
+"id:87,Name:vase"
\ No newline at end of file
diff --git a/utils/grefcoco.py b/utils/grefcoco.py
new file mode 100644
index 0000000000000000000000000000000000000000..98274d7180cee4abaefb1b61c406b78d286f9dfe
--- /dev/null
+++ b/utils/grefcoco.py
@@ -0,0 +1,198 @@
+import contextlib
+import copy
+import io
+import logging
+import os
+import random
+
+import numpy as np
+import pycocotools.mask as mask_util
+from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes
+from detectron2.utils.file_io import PathManager
+from fvcore.common.timer import Timer
+from PIL import Image
+
+"""
+This file contains functions to parse RefCOCO-format annotations into dicts in "Detectron2 format".
+"""
+
+
+logger = logging.getLogger(__name__)
+
+__all__ = ["load_refcoco_json"]
+
+
+def load_grefcoco_json(
+ refer_root,
+ dataset_name,
+ splitby,
+ split,
+ image_root,
+ extra_annotation_keys=None,
+ extra_refer_keys=None,
+):
+ if dataset_name == "refcocop":
+ dataset_name = "refcoco+"
+ if dataset_name == "refcoco" or dataset_name == "refcoco+":
+ splitby == "unc"
+ if dataset_name == "refcocog":
+ assert splitby == "umd" or splitby == "google"
+
+ dataset_id = "_".join([dataset_name, splitby, split])
+
+ from .grefer import G_REFER
+
+ logger.info("Loading dataset {} ({}-{}) ...".format(dataset_name, splitby, split))
+ logger.info("Refcoco root: {}".format(refer_root))
+ timer = Timer()
+ refer_root = PathManager.get_local_path(refer_root)
+ with contextlib.redirect_stdout(io.StringIO()):
+ refer_api = G_REFER(data_root=refer_root, dataset=dataset_name, splitBy=splitby)
+ if timer.seconds() > 1:
+ logger.info(
+ "Loading {} takes {:.2f} seconds.".format(dataset_id, timer.seconds())
+ )
+
+ ref_ids = refer_api.getRefIds(split=split)
+ img_ids = refer_api.getImgIds(ref_ids)
+ refs = refer_api.loadRefs(ref_ids)
+ imgs = [refer_api.loadImgs(ref["image_id"])[0] for ref in refs]
+ anns = [refer_api.loadAnns(ref["ann_id"]) for ref in refs]
+ imgs_refs_anns = list(zip(imgs, refs, anns))
+
+ logger.info(
+ "Loaded {} images, {} referring object sets in G_RefCOCO format from {}".format(
+ len(img_ids), len(ref_ids), dataset_id
+ )
+ )
+
+ dataset_dicts = []
+
+ ann_keys = ["iscrowd", "bbox", "category_id"] + (extra_annotation_keys or [])
+ ref_keys = ["raw", "sent_id"] + (extra_refer_keys or [])
+
+ ann_lib = {}
+
+ NT_count = 0
+ MT_count = 0
+
+ for img_dict, ref_dict, anno_dicts in imgs_refs_anns:
+ record = {}
+ record["source"] = "grefcoco"
+ record["file_name"] = os.path.join(image_root, img_dict["file_name"])
+ record["height"] = img_dict["height"]
+ record["width"] = img_dict["width"]
+ image_id = record["image_id"] = img_dict["id"]
+
+ # Check that information of image, ann and ref match each other
+ # This fails only when the data parsing logic or the annotation file is buggy.
+ assert ref_dict["image_id"] == image_id
+ assert ref_dict["split"] == split
+ if not isinstance(ref_dict["ann_id"], list):
+ ref_dict["ann_id"] = [ref_dict["ann_id"]]
+
+ # No target samples
+ if None in anno_dicts:
+ assert anno_dicts == [None]
+ assert ref_dict["ann_id"] == [-1]
+ record["empty"] = True
+ obj = {key: None for key in ann_keys if key in ann_keys}
+ obj["bbox_mode"] = BoxMode.XYWH_ABS
+ obj["empty"] = True
+ obj = [obj]
+
+ # Multi target samples
+ else:
+ record["empty"] = False
+ obj = []
+ for anno_dict in anno_dicts:
+ ann_id = anno_dict["id"]
+ if anno_dict["iscrowd"]:
+ continue
+ assert anno_dict["image_id"] == image_id
+ assert ann_id in ref_dict["ann_id"]
+
+ if ann_id in ann_lib:
+ ann = ann_lib[ann_id]
+ else:
+ ann = {key: anno_dict[key] for key in ann_keys if key in anno_dict}
+ ann["bbox_mode"] = BoxMode.XYWH_ABS
+ ann["empty"] = False
+
+ segm = anno_dict.get("segmentation", None)
+ assert segm # either list[list[float]] or dict(RLE)
+ if isinstance(segm, dict):
+ if isinstance(segm["counts"], list):
+ # convert to compressed RLE
+ segm = mask_util.frPyObjects(segm, *segm["size"])
+ else:
+ # filter out invalid polygons (< 3 points)
+ segm = [
+ poly
+ for poly in segm
+ if len(poly) % 2 == 0 and len(poly) >= 6
+ ]
+ if len(segm) == 0:
+ num_instances_without_valid_segmentation += 1
+ continue # ignore this instance
+ ann["segmentation"] = segm
+ ann_lib[ann_id] = ann
+
+ obj.append(ann)
+
+ record["annotations"] = obj
+
+ # Process referring expressions
+ sents = ref_dict["sentences"]
+ for sent in sents:
+ ref_record = record.copy()
+ ref = {key: sent[key] for key in ref_keys if key in sent}
+ ref["ref_id"] = ref_dict["ref_id"]
+ ref_record["sentence"] = ref
+ dataset_dicts.append(ref_record)
+ # if ref_record['empty']:
+ # NT_count += 1
+ # else:
+ # MT_count += 1
+
+ # logger.info("NT samples: %d, MT samples: %d", NT_count, MT_count)
+
+ # Debug mode
+ # return dataset_dicts[:100]
+
+ return dataset_dicts
+
+
+if __name__ == "__main__":
+ """
+ Test the COCO json dataset loader.
+
+ Usage:
+ python -m detectron2.data.datasets.coco \
+ path/to/json path/to/image_root dataset_name
+
+ "dataset_name" can be "coco_2014_minival_100", or other
+ pre-registered ones
+ """
+ import sys
+
+ import detectron2.data.datasets # noqa # add pre-defined metadata
+ from detectron2.utils.logger import setup_logger
+ from detectron2.utils.visualizer import Visualizer
+
+ REFCOCO_PATH = "/mnt/lustre/hhding/code/ReLA/datasets"
+ COCO_TRAIN_2014_IMAGE_ROOT = "/mnt/lustre/hhding/code/ReLA/datasets/images"
+ REFCOCO_DATASET = "grefcoco"
+ REFCOCO_SPLITBY = "unc"
+ REFCOCO_SPLIT = "train"
+
+ logger = setup_logger(name=__name__)
+
+ dicts = load_grefcoco_json(
+ REFCOCO_PATH,
+ REFCOCO_DATASET,
+ REFCOCO_SPLITBY,
+ REFCOCO_SPLIT,
+ COCO_TRAIN_2014_IMAGE_ROOT,
+ )
+ logger.info("Done loading {} samples.".format(len(dicts)))
diff --git a/utils/grefer.py b/utils/grefer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c881c5860a2bbfc89eb91b8fcf91cc32c27fbbf
--- /dev/null
+++ b/utils/grefer.py
@@ -0,0 +1,352 @@
+"""
+grefer v0.1
+This interface provides access to gRefCOCO.
+
+The following API functions are defined:
+G_REFER - REFER api class
+getRefIds - get ref ids that satisfy given filter conditions.
+getAnnIds - get ann ids that satisfy given filter conditions.
+getImgIds - get image ids that satisfy given filter conditions.
+getCatIds - get category ids that satisfy given filter conditions.
+loadRefs - load refs with the specified ref ids.
+loadAnns - load anns with the specified ann ids.
+loadImgs - load images with the specified image ids.
+loadCats - load category names with the specified category ids.
+getRefBox - get ref's bounding box [x, y, w, h] given the ref_id
+showRef - show image, segmentation or box of the referred object with the ref
+getMaskByRef - get mask and area of the referred object given ref or ref ids
+getMask - get mask and area of the referred object given ref
+showMask - show mask of the referred object given ref
+"""
+
+import itertools
+import json
+import os.path as osp
+import pickle
+import time
+
+import matplotlib.pyplot as plt
+import numpy as np
+import skimage.io as io
+from matplotlib.collections import PatchCollection
+from matplotlib.patches import Polygon, Rectangle
+from pycocotools import mask
+
+
+class G_REFER:
+ def __init__(self, data_root, dataset="grefcoco", splitBy="unc"):
+ # provide data_root folder which contains grefcoco
+ print("loading dataset %s into memory..." % dataset)
+ self.ROOT_DIR = osp.abspath(osp.dirname(__file__))
+ self.DATA_DIR = osp.join(data_root, dataset)
+ if dataset in ["grefcoco"]:
+ self.IMAGE_DIR = osp.join(data_root, "images/train2014")
+ else:
+ raise KeyError("No refer dataset is called [%s]" % dataset)
+
+ tic = time.time()
+
+ # load refs from data/dataset/refs(dataset).json
+ self.data = {}
+ self.data["dataset"] = dataset
+
+ ref_file = osp.join(self.DATA_DIR, f"grefs({splitBy}).p")
+ if osp.exists(ref_file):
+ self.data["refs"] = pickle.load(open(ref_file, "rb"), fix_imports=True)
+ else:
+ ref_file = osp.join(self.DATA_DIR, f"grefs({splitBy}).json")
+ if osp.exists(ref_file):
+ self.data["refs"] = json.load(open(ref_file, "rb"))
+ else:
+ raise FileNotFoundError("JSON file not found")
+
+ # load annotations from data/dataset/instances.json
+ instances_file = osp.join(self.DATA_DIR, "instances.json")
+ instances = json.load(open(instances_file, "r"))
+ self.data["images"] = instances["images"]
+ self.data["annotations"] = instances["annotations"]
+ self.data["categories"] = instances["categories"]
+
+ # create index
+ self.createIndex()
+ print("DONE (t=%.2fs)" % (time.time() - tic))
+
+ @staticmethod
+ def _toList(x):
+ return x if isinstance(x, list) else [x]
+
+ @staticmethod
+ def match_any(a, b):
+ a = a if isinstance(a, list) else [a]
+ b = b if isinstance(b, list) else [b]
+ return set(a) & set(b)
+
+ def createIndex(self):
+ # create sets of mapping
+ # 1) Refs: {ref_id: ref}
+ # 2) Anns: {ann_id: ann}
+ # 3) Imgs: {image_id: image}
+ # 4) Cats: {category_id: category_name}
+ # 5) Sents: {sent_id: sent}
+ # 6) imgToRefs: {image_id: refs}
+ # 7) imgToAnns: {image_id: anns}
+ # 8) refToAnn: {ref_id: ann}
+ # 9) annToRef: {ann_id: ref}
+ # 10) catToRefs: {category_id: refs}
+ # 11) sentToRef: {sent_id: ref}
+ # 12) sentToTokens: {sent_id: tokens}
+ print("creating index...")
+ # fetch info from instances
+ Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}
+ Anns[-1] = None
+ for ann in self.data["annotations"]:
+ Anns[ann["id"]] = ann
+ imgToAnns[ann["image_id"]] = imgToAnns.get(ann["image_id"], []) + [ann]
+ for img in self.data["images"]:
+ Imgs[img["id"]] = img
+ for cat in self.data["categories"]:
+ Cats[cat["id"]] = cat["name"]
+
+ # fetch info from refs
+ Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}
+ Sents, sentToRef, sentToTokens = {}, {}, {}
+ availableSplits = []
+ for ref in self.data["refs"]:
+ # ids
+ ref_id = ref["ref_id"]
+ ann_id = ref["ann_id"]
+ category_id = ref["category_id"]
+ image_id = ref["image_id"]
+
+ if ref["split"] not in availableSplits:
+ availableSplits.append(ref["split"])
+
+ # add mapping related to ref
+ if ref_id in Refs:
+ print("Duplicate ref id")
+ Refs[ref_id] = ref
+ imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]
+
+ category_id = self._toList(category_id)
+ added_cats = []
+ for cat in category_id:
+ if cat not in added_cats:
+ added_cats.append(cat)
+ catToRefs[cat] = catToRefs.get(cat, []) + [ref]
+
+ ann_id = self._toList(ann_id)
+ refToAnn[ref_id] = [Anns[ann] for ann in ann_id]
+ for ann_id_n in ann_id:
+ annToRef[ann_id_n] = annToRef.get(ann_id_n, []) + [ref]
+
+ # add mapping of sent
+ for sent in ref["sentences"]:
+ Sents[sent["sent_id"]] = sent
+ sentToRef[sent["sent_id"]] = ref
+ sentToTokens[sent["sent_id"]] = sent["tokens"]
+
+ # create class members
+ self.Refs = Refs
+ self.Anns = Anns
+ self.Imgs = Imgs
+ self.Cats = Cats
+ self.Sents = Sents
+ self.imgToRefs = imgToRefs
+ self.imgToAnns = imgToAnns
+ self.refToAnn = refToAnn
+ self.annToRef = annToRef
+ self.catToRefs = catToRefs
+ self.sentToRef = sentToRef
+ self.sentToTokens = sentToTokens
+ self.availableSplits = availableSplits
+ print("index created.")
+
+ def getRefIds(self, image_ids=[], cat_ids=[], split=[]):
+ image_ids = self._toList(image_ids)
+ cat_ids = self._toList(cat_ids)
+ split = self._toList(split)
+
+ for s in split:
+ if s not in self.availableSplits:
+ raise ValueError(f"Invalid split name: {s}")
+
+ refs = self.data["refs"]
+
+ if len(image_ids) > 0:
+ lists = [self.imgToRefs[image_id] for image_id in image_ids]
+ refs = list(itertools.chain.from_iterable(lists))
+ if len(cat_ids) > 0:
+ refs = [ref for ref in refs if self.match_any(ref["category_id"], cat_ids)]
+ if len(split) > 0:
+ refs = [ref for ref in refs if ref["split"] in split]
+
+ ref_ids = [ref["ref_id"] for ref in refs]
+ return ref_ids
+
+ def getAnnIds(self, image_ids=[], ref_ids=[]):
+ image_ids = self._toList(image_ids)
+ ref_ids = self._toList(ref_ids)
+
+ if any([len(image_ids), len(ref_ids)]):
+ if len(image_ids) > 0:
+ lists = [
+ self.imgToAnns[image_id]
+ for image_id in image_ids
+ if image_id in self.imgToAnns
+ ]
+ anns = list(itertools.chain.from_iterable(lists))
+ else:
+ anns = self.data["annotations"]
+ ann_ids = [ann["id"] for ann in anns]
+ if len(ref_ids) > 0:
+ lists = [self.Refs[ref_id]["ann_id"] for ref_id in ref_ids]
+ anns_by_ref_id = list(itertools.chain.from_iterable(lists))
+ ann_ids = list(set(ann_ids).intersection(set(anns_by_ref_id)))
+ else:
+ ann_ids = [ann["id"] for ann in self.data["annotations"]]
+
+ return ann_ids
+
+ def getImgIds(self, ref_ids=[]):
+ ref_ids = self._toList(ref_ids)
+
+ if len(ref_ids) > 0:
+ image_ids = list(set([self.Refs[ref_id]["image_id"] for ref_id in ref_ids]))
+ else:
+ image_ids = self.Imgs.keys()
+ return image_ids
+
+ def getCatIds(self):
+ return self.Cats.keys()
+
+ def loadRefs(self, ref_ids=[]):
+ return [self.Refs[ref_id] for ref_id in self._toList(ref_ids)]
+
+ def loadAnns(self, ann_ids=[]):
+ if isinstance(ann_ids, str):
+ ann_ids = int(ann_ids)
+ return [self.Anns[ann_id] for ann_id in self._toList(ann_ids)]
+
+ def loadImgs(self, image_ids=[]):
+ return [self.Imgs[image_id] for image_id in self._toList(image_ids)]
+
+ def loadCats(self, cat_ids=[]):
+ return [self.Cats[cat_id] for cat_id in self._toList(cat_ids)]
+
+ def getRefBox(self, ref_id):
+ anns = self.refToAnn[ref_id]
+ return [ann["bbox"] for ann in anns] # [x, y, w, h]
+
+ def showRef(self, ref, seg_box="seg"):
+ ax = plt.gca()
+ # show image
+ image = self.Imgs[ref["image_id"]]
+ I = io.imread(osp.join(self.IMAGE_DIR, image["file_name"]))
+ ax.imshow(I)
+ # show refer expression
+ for sid, sent in enumerate(ref["sentences"]):
+ print("%s. %s" % (sid + 1, sent["sent"]))
+ # show segmentations
+ if seg_box == "seg":
+ ann_id = ref["ann_id"]
+ ann = self.Anns[ann_id]
+ polygons = []
+ color = []
+ c = "none"
+ if type(ann["segmentation"][0]) == list:
+ # polygon used for refcoco*
+ for seg in ann["segmentation"]:
+ poly = np.array(seg).reshape((len(seg) / 2, 2))
+ polygons.append(Polygon(poly, True, alpha=0.4))
+ color.append(c)
+ p = PatchCollection(
+ polygons,
+ facecolors=color,
+ edgecolors=(1, 1, 0, 0),
+ linewidths=3,
+ alpha=1,
+ )
+ ax.add_collection(p) # thick yellow polygon
+ p = PatchCollection(
+ polygons,
+ facecolors=color,
+ edgecolors=(1, 0, 0, 0),
+ linewidths=1,
+ alpha=1,
+ )
+ ax.add_collection(p) # thin red polygon
+ else:
+ # mask used for refclef
+ rle = ann["segmentation"]
+ m = mask.decode(rle)
+ img = np.ones((m.shape[0], m.shape[1], 3))
+ color_mask = np.array([2.0, 166.0, 101.0]) / 255
+ for i in range(3):
+ img[:, :, i] = color_mask[i]
+ ax.imshow(np.dstack((img, m * 0.5)))
+ # show bounding-box
+ elif seg_box == "box":
+ ann_id = ref["ann_id"]
+ ann = self.Anns[ann_id]
+ bbox = self.getRefBox(ref["ref_id"])
+ box_plot = Rectangle(
+ (bbox[0], bbox[1]),
+ bbox[2],
+ bbox[3],
+ fill=False,
+ edgecolor="green",
+ linewidth=3,
+ )
+ ax.add_patch(box_plot)
+
+ def getMask(self, ann):
+ if not ann:
+ return None
+ if ann["iscrowd"]:
+ raise ValueError("Crowd object")
+ image = self.Imgs[ann["image_id"]]
+ if type(ann["segmentation"][0]) == list: # polygon
+ rle = mask.frPyObjects(ann["segmentation"], image["height"], image["width"])
+ else:
+ rle = ann["segmentation"]
+
+ m = mask.decode(rle)
+ m = np.sum(
+ m, axis=2
+ ) # sometimes there are multiple binary map (corresponding to multiple segs)
+ m = m.astype(np.uint8) # convert to np.uint8
+ # compute area
+ area = sum(mask.area(rle)) # should be close to ann['area']
+ return {"mask": m, "area": area}
+
+ def getMaskByRef(self, ref=None, ref_id=None, merge=False):
+ if not ref and not ref_id:
+ raise ValueError
+ if ref:
+ ann_ids = ref["ann_id"]
+ ref_id = ref["ref_id"]
+ else:
+ ann_ids = self.getAnnIds(ref_ids=ref_id)
+
+ if ann_ids == [-1]:
+ img = self.Imgs[self.Refs[ref_id]["image_id"]]
+ return {
+ "mask": np.zeros([img["height"], img["width"]], dtype=np.uint8),
+ "empty": True,
+ }
+
+ anns = self.loadAnns(ann_ids)
+ mask_list = [self.getMask(ann) for ann in anns if not ann["iscrowd"]]
+
+ if merge:
+ merged_masks = sum([mask["mask"] for mask in mask_list])
+ merged_masks[np.where(merged_masks > 1)] = 1
+ return {"mask": merged_masks, "empty": False}
+ else:
+ return mask_list
+
+ def showMask(self, ref):
+ M = self.getMask(ref)
+ msk = M["mask"]
+ ax = plt.gca()
+ ax.imshow(msk)
diff --git a/utils/reason_aff_dataset.py b/utils/reason_aff_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4c0e40092cddabdd810564dfe58939ad0e8bc6a
--- /dev/null
+++ b/utils/reason_aff_dataset.py
@@ -0,0 +1,334 @@
+import glob
+import json
+import os
+import random
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import CLIPImageProcessor
+
+from model.llava import conversation as conversation_lib
+from model.segment_anything.utils.transforms import ResizeLongestSide
+
+from .data_processing import get_mask_from_json
+from .utils import (ANSWER_LIST, DEFAULT_IMAGE_TOKEN,
+ EXPLANATORY_QUESTION_LIST, LONG_QUESTION_LIST,
+ SHORT_QUESTION_LIST)
+from PIL import Image
+
+import pickle
+
+
+AFFORDANCE_QUESTION_LIST = [
+ DEFAULT_IMAGE_TOKEN + "\n" + "Can you segment the affordance map of {class_name} in this image?",
+ DEFAULT_IMAGE_TOKEN + "\n" + "Please segment the affordance map of {class_name} in this image.",
+ DEFAULT_IMAGE_TOKEN
+ + "\n"
+ + "What is the affordance map of {class_name} in this image? Please respond with segmentation mask.",
+ DEFAULT_IMAGE_TOKEN
+ + "\n"
+ + "What is the affordance map of {class_name} in this image? Please output segmentation mask.",
+]
+
+
+class ReasonAffDataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ samples_per_epoch=500 * 8 * 2 * 10,
+ precision: str = "fp32",
+ image_size: int = 224,
+ num_classes_per_sample: int = 3,
+ exclude_val=False,
+ reason_aff_data="handal_hard_reasoning",
+ reason_aff_sample_ratio=[1],
+ explanatory=0.1,
+ ):
+ self.exclude_val = exclude_val
+ self.reason_aff_data = reason_aff_data
+ reason_aff_sample_ratio = np.array(reason_aff_sample_ratio)
+ self.reason_aff_sample_ratio = reason_aff_sample_ratio / reason_aff_sample_ratio.sum()
+ self.samples_per_epoch = samples_per_epoch
+ self.explanatory = explanatory
+ self.num_classes_per_sample = num_classes_per_sample
+
+ self.base_image_dir = base_image_dir
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.precision = precision
+ self.transform = ResizeLongestSide(image_size)
+ self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
+
+ self.short_question_list = SHORT_QUESTION_LIST
+ self.affordance_question_list = AFFORDANCE_QUESTION_LIST
+ self.long_question_list = LONG_QUESTION_LIST
+ self.answer_list = ANSWER_LIST
+
+ reason_aff_datas = reason_aff_data.split("||")
+ self.data2list = {}
+ self.object_ids = {}
+ for ds in reason_aff_datas:
+ if ds == "handal_hard_reasoning" or ds == "egoobjects_easy_reasoning" or ds == "egoobjects_hard_reasoning":
+ pkl_path = os.path.join(base_image_dir, f'{ds}_train.pkl')
+ images = {}
+ labels = {}
+ questions = {}
+ answers = {}
+ with open(pkl_path, 'rb') as f:
+ aff_datas = pickle.load(f)
+ for aff_data in aff_datas:
+ if aff_data['task_object_class'] not in images:
+ images[aff_data['task_object_class']] = []
+ labels[aff_data['task_object_class']] = []
+ questions[aff_data['task_object_class']] = []
+ answers[aff_data['task_object_class']] = []
+ images[aff_data['task_object_class']].append(aff_data['frame_path'])
+ labels[aff_data['task_object_class']].append(aff_data['mask_path'])
+ questions[aff_data['task_object_class']].append(aff_data['question'])
+ answers[aff_data['task_object_class']].append(aff_data['answer'])
+ # keep same numbers of samples for each class
+ for k in images.keys():
+ assert len(images[k]) == len(labels[k])
+ self.data2list[ds] = (images, labels, questions, answers)
+ print(f"categories of {ds}: ", images.keys())
+ print(f"number of {ds} samples: ", len(aff_datas))
+ else:
+ raise ValueError(f"Unsupported affordance segmentation dataset: {ds}")
+
+ def __len__(self):
+ return self.samples_per_epoch
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.img_size - h
+ padw = self.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+ def __getitem__(self, idx):
+ ds = np.random.choice(list(self.data2list.keys()), p=self.reason_aff_sample_ratio)
+
+ images, labels, my_questions, my_answers = self.data2list[ds]
+ class_name = random.choice(list(images.keys()))
+ idx = random.randint(0, len(images[class_name]) - 1)
+ image_path = images[class_name][idx]
+ label_path = labels[class_name][idx]
+ my_question = my_questions[class_name][idx]
+ my_answer = my_answers[class_name][idx]
+
+ # load image and prepare input for clip and sam
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ ori_size = image.shape[:2]
+ # preprocess image for clip
+ image_clip = self.clip_image_processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+
+ image = self.transform.apply_image(image) # preprocess image for sam
+ resize = image.shape[:2]
+ image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+
+ # load class names
+ sampled_classes = [class_name]
+
+ # load label
+ label = Image.open(label_path)
+ label = np.array(label)
+ label = torch.from_numpy(label).long()
+ masks = []
+ if ds == 'graspnet':
+ object_id = self.object_ids[ds][class_name][idx]
+ # if data is from graspnet and object_id exists, use the mask of the object_id
+ if object_id is None:
+ for _ in range(len(sampled_classes)):
+ masks.append(label > 0)
+ else:
+ for _ in range(len(sampled_classes)):
+ masks.append(label == object_id)
+ else:
+ for _ in range(len(sampled_classes)):
+ masks.append(label > 0)
+ masks = torch.stack(masks, dim=0)
+
+ questions = []
+ answers = []
+ for sampled_cls in sampled_classes:
+ text = sampled_cls
+
+ # assert len(text.split("||")) == 1
+ # question_template = random.choice(self.affordance_question_list)
+ # questions.append(question_template.format(class_name=text.lower()))
+ #
+ # answers.append(random.choice(self.answer_list))
+ questions.append(DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + my_question)
+ # answers.append(my_answer + " [SEG].")
+ answers.append(my_answer + " [AFF].")
+
+ conversations = []
+ conv = conversation_lib.default_conversation.copy()
+
+ i = 0
+ while i < len(questions):
+ conv.messages = []
+ conv.append_message(conv.roles[0], questions[i])
+ conv.append_message(conv.roles[1], answers[i])
+ conversations.append(conv.get_prompt())
+ i += 1
+
+ return (
+ image_path,
+ image,
+ image_clip,
+ conversations,
+ masks,
+ label,
+ resize,
+ questions,
+ sampled_classes,
+ )
+
+
+class ReasonAffValDataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ val_dataset,
+ image_size=1024,
+ ):
+ self.base_image_dir = base_image_dir.replace("/lisa_data", "")
+ # splits = val_dataset.split("|")
+ # ds, split = splits
+ ds = val_dataset
+
+ self.images = []
+ self.labels = []
+ self.questions = []
+ self.answers = []
+ self.class_ids = []
+ self.class_names = []
+ pkl_path = os.path.join(self.base_image_dir, f'{ds}_val.pkl')
+ with open(pkl_path, 'rb') as f:
+ reason_datas = pickle.load(f)
+ for reason_data in reason_datas:
+ # one image is broken in 3doi_easy_reasoning_val.pkl, so skip it
+ if 'EK_frame_0000040462.jpg' in reason_data['frame_path']:
+ continue
+ self.images.append(reason_data['frame_path'])
+ self.labels.append(reason_data['mask_path'])
+ self.questions.append(reason_data['question'])
+ self.answers.append(reason_data['answer'])
+ self.class_ids.append(None)
+ self.class_names.append(reason_data['task_object_class'])
+
+ self.ds = ds
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.transform = ResizeLongestSide(image_size)
+ self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
+
+ def __len__(self):
+ return len(self.images)
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.img_size - h
+ padw = self.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+ def __getitem__(self, idx):
+
+ # load image
+ image_path = self.images[idx]
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+
+ # preprocess image for clip
+ image_clip = self.clip_image_processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+
+ # preprocess image for sam
+ image = self.transform.apply_image(image)
+ resize = image.shape[:2]
+ image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+
+ # load class names
+ sampled_sents = [self.class_names[idx]]
+
+ # load label
+ label_path = self.labels[idx]
+ label = Image.open(label_path)
+ label = np.array(label)
+ label = torch.from_numpy(label).long()
+ masks = []
+ class_id = self.class_ids[idx]
+ # if data object_id exists, use the mask of the object_id
+ if class_id is None:
+ for _ in range(len(sampled_sents)):
+ masks.append(label > 0)
+ else:
+ for _ in range(len(sampled_sents)):
+ masks.append(label == class_id)
+ masks = torch.stack(masks, dim=0)
+
+ # load question and answer
+ my_question = self.questions[idx]
+ my_answer = self.answers[idx]
+
+ conversations = []
+ conv = conversation_lib.default_conversation.copy()
+ i = 0
+ while i < len(sampled_sents):
+ conv.messages = []
+ text = sampled_sents[i].strip()
+
+ conv.append_message(
+ conv.roles[0],
+ DEFAULT_IMAGE_TOKEN + "\n" + "You are an embodied robot. " + "{}".format(my_question),
+ )
+ conv.append_message(conv.roles[1], my_answer + " [AFF].")
+ conversations.append(conv.get_prompt())
+ i += 1
+
+ inference = True
+
+ return (
+ image_path,
+ image,
+ image_clip,
+ conversations,
+ masks,
+ label,
+ resize,
+ None,
+ None,
+ inference,
+ )
\ No newline at end of file
diff --git a/utils/reason_seg_dataset.py b/utils/reason_seg_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..62012092250f0279a179691c88d9cfa213b34463
--- /dev/null
+++ b/utils/reason_seg_dataset.py
@@ -0,0 +1,218 @@
+import glob
+import json
+import os
+import random
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from transformers import CLIPImageProcessor
+
+from model.llava import conversation as conversation_lib
+from model.segment_anything.utils.transforms import ResizeLongestSide
+
+from .data_processing import get_mask_from_json
+from .utils import (ANSWER_LIST, DEFAULT_IMAGE_TOKEN,
+ EXPLANATORY_QUESTION_LIST, LONG_QUESTION_LIST,
+ SHORT_QUESTION_LIST)
+
+
+class ReasonSegDataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ samples_per_epoch=500 * 8 * 2 * 10,
+ precision: str = "fp32",
+ image_size: int = 224,
+ num_classes_per_sample: int = 3,
+ exclude_val=False,
+ reason_seg_data="ReasonSeg|train",
+ explanatory=0.1,
+ ):
+ self.exclude_val = exclude_val
+ self.reason_seg_data = reason_seg_data
+ self.samples_per_epoch = samples_per_epoch
+ self.explanatory = explanatory
+ self.num_classes_per_sample = num_classes_per_sample
+
+ self.base_image_dir = base_image_dir
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.precision = precision
+ self.transform = ResizeLongestSide(image_size)
+ self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
+
+ self.short_question_list = SHORT_QUESTION_LIST
+ self.long_question_list = LONG_QUESTION_LIST
+ self.answer_list = ANSWER_LIST
+
+ reason_seg_data, splits = reason_seg_data.split("|")
+ splits = splits.split("_")
+ images = []
+ for split in splits:
+ images_split = glob.glob(
+ os.path.join(
+ base_image_dir, "reason_seg", reason_seg_data, split, "*.jpg"
+ )
+ )
+ images.extend(images_split)
+ jsons = [path.replace(".jpg", ".json") for path in images]
+ self.reason_seg_data = (images, jsons)
+
+ print("number of reason_seg samples: ", len(images))
+
+ if explanatory != -1:
+ self.explanatory_question_list = EXPLANATORY_QUESTION_LIST
+ self.img_to_explanation = {}
+ with open(
+ os.path.join(
+ base_image_dir,
+ "reason_seg",
+ reason_seg_data,
+ "explanatory",
+ "train.json",
+ )
+ ) as f:
+ items = json.load(f)
+ for item in items:
+ img_name = item["image"]
+ self.img_to_explanation[img_name] = {
+ "query": item["query"],
+ "outputs": item["outputs"],
+ }
+
+ print("len(self.img_to_explanation): ", len(self.img_to_explanation))
+
+ def __len__(self):
+ return self.samples_per_epoch
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.img_size - h
+ padw = self.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+ def __getitem__(self, idx):
+ images, jsons = self.reason_seg_data
+ idx = random.randint(0, len(images) - 1)
+ image_path = images[idx]
+ json_path = jsons[idx]
+
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ ori_size = image.shape[:2]
+ # preprocess image for clip
+ image_clip = self.clip_image_processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+
+ mask, sents, is_sentence = get_mask_from_json(json_path, image)
+ if len(sents) >= self.num_classes_per_sample:
+ sampled_inds = np.random.choice(
+ list(range(len(sents))), size=self.num_classes_per_sample, replace=False
+ )
+ else:
+ sampled_inds = list(range(len(sents)))
+ sampled_sents = np.vectorize(sents.__getitem__)(sampled_inds).tolist()
+ sampled_masks = [
+ (mask == 1).astype(np.float32) for _ in range(len(sampled_inds))
+ ]
+
+ image = self.transform.apply_image(image) # preprocess image for sam
+ resize = image.shape[:2]
+
+ image_name = image_path.split("/")[-1]
+ if self.explanatory != -1 and image_name in self.img_to_explanation:
+ if random.random() < self.explanatory:
+ choice = 2
+ else:
+ choice = random.randint(0, 1)
+
+ questions = []
+ answers = []
+ for text in sampled_sents:
+ if is_sentence:
+ question_template = random.choice(self.long_question_list)
+ questions.append(question_template.format(sent=text))
+ else:
+ question_template = random.choice(self.short_question_list)
+ questions.append(question_template.format(class_name=text.lower()))
+
+ # add explanation if applicable
+ img_name = image_path.split("/")[-1]
+ if self.explanatory != -1 and img_name in self.img_to_explanation:
+ if choice == 0: # [SEG] token
+ answers.append(random.choice(self.answer_list))
+ elif choice == 1: # [SEG] token + text answer
+ image_name = image_path.split("/")[-1]
+ answer = self.img_to_explanation[image_name]["outputs"]
+ answer = random.choice(self.answer_list) + " {}".format(answer)
+ questions[-1] = (
+ DEFAULT_IMAGE_TOKEN
+ + "\n"
+ + text
+ + " {}".format(random.choice(self.explanatory_question_list))
+ )
+ answers.append(answer)
+ elif choice == 2: # vanilla text answer
+ image_name = image_path.split("/")[-1]
+ answer = self.img_to_explanation[image_name]["outputs"]
+ questions[-1] = DEFAULT_IMAGE_TOKEN + "\n" + text
+ answers.append(answer)
+ else:
+ raise ValueError("Not implemented yet.")
+ else:
+ answers.append(random.choice(self.answer_list))
+
+ conversations = []
+ conv = conversation_lib.default_conversation.copy()
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
+
+ i = 0
+ while i < len(questions):
+ conv.messages = []
+ conv.append_message(conv.roles[0], questions[i])
+ conv.append_message(conv.roles[1], answers[i])
+ conversations.append(conv.get_prompt())
+ i += 1
+
+ image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+
+ image_name = image_path.split("/")[-1]
+ if (
+ self.explanatory != -1
+ and image_name in self.img_to_explanation
+ and choice == 2
+ ):
+ masks = torch.rand(0, *ori_size)
+ label = torch.ones(ori_size) * self.ignore_label
+ else:
+ masks = np.stack(sampled_masks, axis=0)
+ masks = torch.from_numpy(masks)
+ label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label
+
+ return (
+ image_path,
+ image,
+ image_clip,
+ conversations,
+ masks,
+ label,
+ resize,
+ questions,
+ sampled_sents,
+ )
diff --git a/utils/refer.py b/utils/refer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b4cea716e40e73d0b5aa118143eb076392f5eb1
--- /dev/null
+++ b/utils/refer.py
@@ -0,0 +1,391 @@
+__author__ = "licheng"
+
+"""
+This interface provides access to four datasets:
+1) refclef
+2) refcoco
+3) refcoco+
+4) refcocog
+split by unc and google
+
+The following API functions are defined:
+REFER - REFER api class
+getRefIds - get ref ids that satisfy given filter conditions.
+getAnnIds - get ann ids that satisfy given filter conditions.
+getImgIds - get image ids that satisfy given filter conditions.
+getCatIds - get category ids that satisfy given filter conditions.
+loadRefs - load refs with the specified ref ids.
+loadAnns - load anns with the specified ann ids.
+loadImgs - load images with the specified image ids.
+loadCats - load category names with the specified category ids.
+getRefBox - get ref's bounding box [x, y, w, h] given the ref_id
+showRef - show image, segmentation or box of the referred object with the ref
+getMask - get mask and area of the referred object given ref
+showMask - show mask of the referred object given ref
+"""
+
+import itertools
+import json
+import os.path as osp
+import pickle
+import sys
+import time
+from pprint import pprint
+
+import matplotlib.pyplot as plt
+import numpy as np
+import skimage.io as io
+from matplotlib.collections import PatchCollection
+from matplotlib.patches import Polygon, Rectangle
+from pycocotools import mask
+
+
+class REFER:
+ def __init__(self, data_root, dataset="refcoco", splitBy="unc"):
+ # provide data_root folder which contains refclef, refcoco, refcoco+ and refcocog
+ # also provide dataset name and splitBy information
+ # e.g., dataset = 'refcoco', splitBy = 'unc'
+ print("loading dataset %s into memory..." % dataset)
+ self.ROOT_DIR = osp.abspath(osp.dirname(__file__))
+ self.DATA_DIR = osp.join(data_root, dataset)
+ if dataset in ["refcoco", "refcoco+", "refcocog"]:
+ self.IMAGE_DIR = osp.join(data_root, "images/mscoco/images/train2014")
+ elif dataset == "refclef":
+ self.IMAGE_DIR = osp.join(data_root, "images/saiapr_tc-12")
+ else:
+ print("No refer dataset is called [%s]" % dataset)
+ sys.exit()
+
+ self.dataset = dataset
+
+ # load refs from data/dataset/refs(dataset).json
+ tic = time.time()
+
+ ref_file = osp.join(self.DATA_DIR, "refs(" + splitBy + ").p")
+ print("ref_file: ", ref_file)
+ self.data = {}
+ self.data["dataset"] = dataset
+ self.data["refs"] = pickle.load(open(ref_file, "rb"))
+
+ # load annotations from data/dataset/instances.json
+ instances_file = osp.join(self.DATA_DIR, "instances.json")
+ instances = json.load(open(instances_file, "rb"))
+ self.data["images"] = instances["images"]
+ self.data["annotations"] = instances["annotations"]
+ self.data["categories"] = instances["categories"]
+
+ # create index
+ self.createIndex()
+ print("DONE (t=%.2fs)" % (time.time() - tic))
+
+ def createIndex(self):
+ # create sets of mapping
+ # 1) Refs: {ref_id: ref}
+ # 2) Anns: {ann_id: ann}
+ # 3) Imgs: {image_id: image}
+ # 4) Cats: {category_id: category_name}
+ # 5) Sents: {sent_id: sent}
+ # 6) imgToRefs: {image_id: refs}
+ # 7) imgToAnns: {image_id: anns}
+ # 8) refToAnn: {ref_id: ann}
+ # 9) annToRef: {ann_id: ref}
+ # 10) catToRefs: {category_id: refs}
+ # 11) sentToRef: {sent_id: ref}
+ # 12) sentToTokens: {sent_id: tokens}
+ print("creating index...")
+ # fetch info from instances
+ Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}
+ for ann in self.data["annotations"]:
+ Anns[ann["id"]] = ann
+ imgToAnns[ann["image_id"]] = imgToAnns.get(ann["image_id"], []) + [ann]
+ for img in self.data["images"]:
+ Imgs[img["id"]] = img
+ for cat in self.data["categories"]:
+ Cats[cat["id"]] = cat["name"]
+
+ # fetch info from refs
+ Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}
+ Sents, sentToRef, sentToTokens = {}, {}, {}
+ for ref in self.data["refs"]:
+ # ids
+ ref_id = ref["ref_id"]
+ ann_id = ref["ann_id"]
+ category_id = ref["category_id"]
+ image_id = ref["image_id"]
+
+ # add mapping related to ref
+ Refs[ref_id] = ref
+ imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]
+ catToRefs[category_id] = catToRefs.get(category_id, []) + [ref]
+ refToAnn[ref_id] = Anns[ann_id]
+ annToRef[ann_id] = ref
+
+ # add mapping of sent
+ for sent in ref["sentences"]:
+ Sents[sent["sent_id"]] = sent
+ sentToRef[sent["sent_id"]] = ref
+ sentToTokens[sent["sent_id"]] = sent["tokens"]
+
+ # create class members
+ self.Refs = Refs
+ self.Anns = Anns
+ self.Imgs = Imgs
+ self.Cats = Cats
+ self.Sents = Sents
+ self.imgToRefs = imgToRefs
+ self.imgToAnns = imgToAnns
+ self.refToAnn = refToAnn
+ self.annToRef = annToRef
+ self.catToRefs = catToRefs
+ self.sentToRef = sentToRef
+ self.sentToTokens = sentToTokens
+ print("index created.")
+
+ def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=""):
+ image_ids = image_ids if type(image_ids) == list else [image_ids]
+ cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]
+ ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]
+
+ if len(image_ids) == len(cat_ids) == len(ref_ids) == len(split) == 0:
+ refs = self.data["refs"]
+ else:
+ if not len(image_ids) == 0:
+ refs = [self.imgToRefs[image_id] for image_id in image_ids]
+ else:
+ refs = self.data["refs"]
+ if not len(cat_ids) == 0:
+ refs = [ref for ref in refs if ref["category_id"] in cat_ids]
+ if not len(ref_ids) == 0:
+ refs = [ref for ref in refs if ref["ref_id"] in ref_ids]
+ if not len(split) == 0:
+ if split in ["testA", "testB", "testC"]:
+ refs = [
+ ref for ref in refs if split[-1] in ref["split"]
+ ] # we also consider testAB, testBC, ...
+ elif split in ["testAB", "testBC", "testAC"]:
+ refs = [
+ ref for ref in refs if ref["split"] == split
+ ] # rarely used I guess...
+ elif split == "test":
+ refs = [ref for ref in refs if "test" in ref["split"]]
+ elif split == "train" or split == "val":
+ refs = [ref for ref in refs if ref["split"] == split]
+ else:
+ print("No such split [%s]" % split)
+ sys.exit()
+ ref_ids = [ref["ref_id"] for ref in refs]
+ return ref_ids
+
+ def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]):
+ image_ids = image_ids if type(image_ids) == list else [image_ids]
+ cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]
+ ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]
+
+ if len(image_ids) == len(cat_ids) == len(ref_ids) == 0:
+ ann_ids = [ann["id"] for ann in self.data["annotations"]]
+ else:
+ if not len(image_ids) == 0:
+ lists = [
+ self.imgToAnns[image_id]
+ for image_id in image_ids
+ if image_id in self.imgToAnns
+ ] # list of [anns]
+ anns = list(itertools.chain.from_iterable(lists))
+ else:
+ anns = self.data["annotations"]
+ if not len(cat_ids) == 0:
+ anns = [ann for ann in anns if ann["category_id"] in cat_ids]
+ ann_ids = [ann["id"] for ann in anns]
+ if not len(ref_ids) == 0:
+ ids = set(ann_ids).intersection(
+ set([self.Refs[ref_id]["ann_id"] for ref_id in ref_ids])
+ )
+ return ann_ids
+
+ def getImgIds(self, ref_ids=[]):
+ ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]
+
+ if not len(ref_ids) == 0:
+ image_ids = list(set([self.Refs[ref_id]["image_id"] for ref_id in ref_ids]))
+ else:
+ image_ids = self.Imgs.keys()
+ return image_ids
+
+ def getCatIds(self):
+ return self.Cats.keys()
+
+ def loadRefs(self, ref_ids=[]):
+ if type(ref_ids) == list:
+ return [self.Refs[ref_id] for ref_id in ref_ids]
+ elif type(ref_ids) == int:
+ return [self.Refs[ref_ids]]
+
+ def loadAnns(self, ann_ids=[]):
+ if type(ann_ids) == list:
+ return [self.Anns[ann_id] for ann_id in ann_ids]
+ elif type(ann_ids) == int or type(ann_ids) == unicode:
+ return [self.Anns[ann_ids]]
+
+ def loadImgs(self, image_ids=[]):
+ if type(image_ids) == list:
+ return [self.Imgs[image_id] for image_id in image_ids]
+ elif type(image_ids) == int:
+ return [self.Imgs[image_ids]]
+
+ def loadCats(self, cat_ids=[]):
+ if type(cat_ids) == list:
+ return [self.Cats[cat_id] for cat_id in cat_ids]
+ elif type(cat_ids) == int:
+ return [self.Cats[cat_ids]]
+
+ def getRefBox(self, ref_id):
+ ref = self.Refs[ref_id]
+ ann = self.refToAnn[ref_id]
+ return ann["bbox"] # [x, y, w, h]
+
+ def showRef(self, ref, seg_box="seg"):
+ ax = plt.gca()
+ # show image
+ image = self.Imgs[ref["image_id"]]
+ I = io.imread(osp.join(self.IMAGE_DIR, image["file_name"]))
+ ax.imshow(I)
+ # show refer expression
+ for sid, sent in enumerate(ref["sentences"]):
+ print("%s. %s" % (sid + 1, sent["sent"]))
+ # show segmentations
+ if seg_box == "seg":
+ ann_id = ref["ann_id"]
+ ann = self.Anns[ann_id]
+ polygons = []
+ color = []
+ c = "none"
+ if type(ann["segmentation"][0]) == list:
+ # polygon used for refcoco*
+ for seg in ann["segmentation"]:
+ poly = np.array(seg).reshape((len(seg) / 2, 2))
+ polygons.append(Polygon(poly, True, alpha=0.4))
+ color.append(c)
+ p = PatchCollection(
+ polygons,
+ facecolors=color,
+ edgecolors=(1, 1, 0, 0),
+ linewidths=3,
+ alpha=1,
+ )
+ ax.add_collection(p) # thick yellow polygon
+ p = PatchCollection(
+ polygons,
+ facecolors=color,
+ edgecolors=(1, 0, 0, 0),
+ linewidths=1,
+ alpha=1,
+ )
+ ax.add_collection(p) # thin red polygon
+ else:
+ # mask used for refclef
+ rle = ann["segmentation"]
+ m = mask.decode(rle)
+ img = np.ones((m.shape[0], m.shape[1], 3))
+ color_mask = np.array([2.0, 166.0, 101.0]) / 255
+ for i in range(3):
+ img[:, :, i] = color_mask[i]
+ ax.imshow(np.dstack((img, m * 0.5)))
+ # show bounding-box
+ elif seg_box == "box":
+ ann_id = ref["ann_id"]
+ ann = self.Anns[ann_id]
+ bbox = self.getRefBox(ref["ref_id"])
+ box_plot = Rectangle(
+ (bbox[0], bbox[1]),
+ bbox[2],
+ bbox[3],
+ fill=False,
+ edgecolor="green",
+ linewidth=3,
+ )
+ ax.add_patch(box_plot)
+
+ def getMask(self, ref):
+ # return mask, area and mask-center
+ ann = self.refToAnn[ref["ref_id"]]
+ image = self.Imgs[ref["image_id"]]
+ if type(ann["segmentation"][0]) == list: # polygon
+ rle = mask.frPyObjects(ann["segmentation"], image["height"], image["width"])
+ else:
+ rle = ann["segmentation"]
+ m = mask.decode(rle)
+ m = np.sum(
+ m, axis=2
+ ) # sometimes there are multiple binary map (corresponding to multiple segs)
+ m = m.astype(np.uint8) # convert to np.uint8
+ # compute area
+ area = sum(mask.area(rle)) # should be close to ann['area']
+ return {"mask": m, "area": area}
+ # # position
+ # position_x = np.mean(np.where(m==1)[1]) # [1] means columns (matlab style) -> x (c style)
+ # position_y = np.mean(np.where(m==1)[0]) # [0] means rows (matlab style) -> y (c style)
+ # # mass position (if there were multiple regions, we use the largest one.)
+ # label_m = label(m, connectivity=m.ndim)
+ # regions = regionprops(label_m)
+ # if len(regions) > 0:
+ # largest_id = np.argmax(np.array([props.filled_area for props in regions]))
+ # largest_props = regions[largest_id]
+ # mass_y, mass_x = largest_props.centroid
+ # else:
+ # mass_x, mass_y = position_x, position_y
+ # # if centroid is not in mask, we find the closest point to it from mask
+ # if m[mass_y, mass_x] != 1:
+ # print('Finding closes mask point ...')
+ # kernel = np.ones((10, 10),np.uint8)
+ # me = cv2.erode(m, kernel, iterations = 1)
+ # points = zip(np.where(me == 1)[0].tolist(), np.where(me == 1)[1].tolist()) # row, col style
+ # points = np.array(points)
+ # dist = np.sum((points - (mass_y, mass_x))**2, axis=1)
+ # id = np.argsort(dist)[0]
+ # mass_y, mass_x = points[id]
+ # # return
+ # return {'mask': m, 'area': area, 'position_x': position_x, 'position_y': position_y, 'mass_x': mass_x, 'mass_y': mass_y}
+ # # show image and mask
+ # I = io.imread(osp.join(self.IMAGE_DIR, image['file_name']))
+ # plt.figure()
+ # plt.imshow(I)
+ # ax = plt.gca()
+ # img = np.ones( (m.shape[0], m.shape[1], 3) )
+ # color_mask = np.array([2.0,166.0,101.0])/255
+ # for i in range(3):
+ # img[:,:,i] = color_mask[i]
+ # ax.imshow(np.dstack( (img, m*0.5) ))
+ # plt.show()
+
+ def showMask(self, ref):
+ M = self.getMask(ref)
+ msk = M["mask"]
+ ax = plt.gca()
+ ax.imshow(msk)
+
+
+if __name__ == "__main__":
+ refer = REFER(dataset="refcocog", splitBy="google")
+ ref_ids = refer.getRefIds()
+ print(len(ref_ids))
+
+ print(len(refer.Imgs))
+ print(len(refer.imgToRefs))
+
+ ref_ids = refer.getRefIds(split="train")
+ print("There are %s training referred objects." % len(ref_ids))
+
+ for ref_id in ref_ids:
+ ref = refer.loadRefs(ref_id)[0]
+ if len(ref["sentences"]) < 2:
+ continue
+
+ pprint(ref)
+ print("The label is %s." % refer.Cats[ref["category_id"]])
+ plt.figure()
+ refer.showRef(ref, seg_box="box")
+ plt.show()
+
+ # plt.figure()
+ # refer.showMask(ref)
+ # plt.show()
diff --git a/utils/refer_seg_dataset.py b/utils/refer_seg_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..e28273d9d54a6882ee0f7388c4b85ebc974d6cbf
--- /dev/null
+++ b/utils/refer_seg_dataset.py
@@ -0,0 +1,277 @@
+import os
+import random
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from pycocotools import mask
+from transformers import CLIPImageProcessor
+
+from model.llava import conversation as conversation_lib
+from model.segment_anything.utils.transforms import ResizeLongestSide
+
+from .grefer import G_REFER
+from .refer import REFER
+from .utils import ANSWER_LIST, SHORT_QUESTION_LIST
+
+
+class ReferSegDataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ samples_per_epoch=500 * 8 * 2 * 10,
+ precision: str = "fp32",
+ image_size: int = 224,
+ num_classes_per_sample: int = 3,
+ exclude_val=False,
+ refer_seg_data="refclef||refcoco||refcoco+||refcocog",
+ ):
+ self.exclude_val = exclude_val
+ self.samples_per_epoch = samples_per_epoch
+ self.num_classes_per_sample = num_classes_per_sample
+
+ self.base_image_dir = base_image_dir
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.precision = precision
+ self.transform = ResizeLongestSide(image_size)
+ self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
+
+ self.short_question_list = SHORT_QUESTION_LIST
+ self.answer_list = ANSWER_LIST
+
+ DATA_DIR = os.path.join(base_image_dir, "refer_seg")
+ self.refer_seg_ds_list = refer_seg_data.split(
+ "||"
+ ) # ['refclef', 'refcoco', 'refcoco+', 'refcocog']
+ self.refer_seg_data = {}
+ for ds in self.refer_seg_ds_list:
+ if ds == "refcocog":
+ splitBy = "umd"
+ else:
+ splitBy = "unc"
+
+ if ds == "grefcoco":
+ refer_api = G_REFER(DATA_DIR, ds, splitBy)
+ else:
+ refer_api = REFER(DATA_DIR, ds, splitBy)
+ ref_ids_train = refer_api.getRefIds(split="train")
+ images_ids_train = refer_api.getImgIds(ref_ids=ref_ids_train)
+ refs_train = refer_api.loadRefs(ref_ids=ref_ids_train)
+
+ refer_seg_ds = {}
+ refer_seg_ds["images"] = []
+ loaded_images = refer_api.loadImgs(image_ids=images_ids_train)
+
+ for item in loaded_images:
+ item = item.copy()
+ if ds == "refclef":
+ item["file_name"] = os.path.join(
+ DATA_DIR, "images/saiapr_tc-12", item["file_name"]
+ )
+ else:
+ item["file_name"] = os.path.join(
+ DATA_DIR, "images/mscoco/images/train2014", item["file_name"]
+ )
+ refer_seg_ds["images"].append(item)
+ refer_seg_ds["annotations"] = refer_api.Anns # anns_train
+
+ print(
+ "dataset {} (refs {}) (train split) has {} images and {} annotations.".format(
+ ds,
+ splitBy,
+ len(refer_seg_ds["images"]),
+ len(refer_seg_ds["annotations"]),
+ )
+ )
+
+ img2refs = {}
+ for ref in refs_train:
+ image_id = ref["image_id"]
+ img2refs[image_id] = img2refs.get(image_id, []) + [
+ ref,
+ ]
+ refer_seg_ds["img2refs"] = img2refs
+ self.refer_seg_data[ds] = refer_seg_ds
+
+ def __len__(self):
+ return self.samples_per_epoch
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.img_size - h
+ padw = self.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+ def __getitem__(self, idx):
+ ds = random.randint(0, len(self.refer_seg_ds_list) - 1)
+ ds = self.refer_seg_ds_list[ds]
+ refer_seg_ds = self.refer_seg_data[ds]
+ images = refer_seg_ds["images"]
+ annotations = refer_seg_ds["annotations"]
+ img2refs = refer_seg_ds["img2refs"]
+ idx = random.randint(0, len(images) - 1)
+ image_info = images[idx]
+ image_path = image_info["file_name"]
+ image_id = image_info["id"]
+ refs = img2refs[image_id]
+ if len(refs) == 0:
+ return self.__getitem__(0)
+
+ sents = []
+ ann_ids = []
+ for ref in refs:
+ for sent in ref["sentences"]:
+ text = sent["sent"]
+ sents.append(text)
+ ann_ids.append(ref["ann_id"])
+ if len(sents) >= self.num_classes_per_sample:
+ sampled_inds = np.random.choice(
+ list(range(len(sents))), size=self.num_classes_per_sample, replace=False
+ )
+ else:
+ sampled_inds = list(range(len(sents)))
+ sampled_sents = np.vectorize(sents.__getitem__)(sampled_inds).tolist()
+ # sampled_ann_ids = np.vectorize(ann_ids.__getitem__)(sampled_inds).tolist()
+ sampled_ann_ids = [ann_ids[ind] for ind in sampled_inds]
+ sampled_classes = sampled_sents
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+
+ # preprocess image for clip
+ image_clip = self.clip_image_processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][0]
+
+ image = self.transform.apply_image(image) # preprocess image for sam
+ resize = image.shape[:2]
+
+ questions = []
+ answers = []
+ for text in sampled_classes:
+ text = text.strip()
+ assert len(text.split("||")) == 1
+ question_template = random.choice(self.short_question_list)
+ questions.append(question_template.format(class_name=text.lower()))
+ answers.append(random.choice(self.answer_list))
+
+ conversations = []
+ conv = conversation_lib.default_conversation.copy()
+
+ i = 0
+ while i < len(questions):
+ conv.messages = []
+ conv.append_message(conv.roles[0], questions[i])
+ conv.append_message(conv.roles[1], answers[i])
+ conversations.append(conv.get_prompt())
+ i += 1
+
+ image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+
+ flag = False
+ masks = []
+ for ann_id in sampled_ann_ids:
+ if isinstance(ann_id, list):
+ flag = True
+ if -1 in ann_id:
+ assert len(ann_id) == 1
+ m = np.zeros((image_info["height"], image_info["width"])).astype(
+ np.uint8
+ )
+ else:
+ m_final = np.zeros(
+ (image_info["height"], image_info["width"])
+ ).astype(np.uint8)
+ for ann_id_i in ann_id:
+ ann = annotations[ann_id_i]
+
+ if len(ann["segmentation"]) == 0:
+ m = np.zeros(
+ (image_info["height"], image_info["width"])
+ ).astype(np.uint8)
+ else:
+ if type(ann["segmentation"][0]) == list: # polygon
+ rle = mask.frPyObjects(
+ ann["segmentation"],
+ image_info["height"],
+ image_info["width"],
+ )
+ else:
+ rle = ann["segmentation"]
+ for i in range(len(rle)):
+ if not isinstance(rle[i]["counts"], bytes):
+ rle[i]["counts"] = rle[i]["counts"].encode()
+ m = mask.decode(rle)
+ m = np.sum(
+ m, axis=2
+ ) # sometimes there are multiple binary map (corresponding to multiple segs)
+ m = m.astype(np.uint8) # convert to np.uint8
+ m_final = m_final | m
+ m = m_final
+ masks.append(m)
+ continue
+
+ ann = annotations[ann_id]
+
+ if len(ann["segmentation"]) == 0:
+ m = np.zeros((image_info["height"], image_info["width"])).astype(
+ np.uint8
+ )
+ masks.append(m)
+ continue
+
+ if type(ann["segmentation"][0]) == list: # polygon
+ rle = mask.frPyObjects(
+ ann["segmentation"], image_info["height"], image_info["width"]
+ )
+ else:
+ rle = ann["segmentation"]
+ for i in range(len(rle)):
+ if not isinstance(rle[i]["counts"], bytes):
+ rle[i]["counts"] = rle[i]["counts"].encode()
+ m = mask.decode(rle)
+ m = np.sum(
+ m, axis=2
+ ) # sometimes there are multiple binary map (corresponding to multiple segs)
+ m = m.astype(np.uint8) # convert to np.uint8
+ masks.append(m)
+
+ masks = np.stack(masks, axis=0)
+
+ # if ds == 'grefcoco' and flag:
+ # import shutil
+ # image_name = image_path.split("/")[-1]
+ # save_dir = os.path.join("/group/30042/xlai/LISA_refactor_final/debug", image_name.split(".")[0])
+ # os.makedirs(save_dir, exist_ok=True)
+ # shutil.copy(image_path, save_dir)
+ # for i in range(masks.shape[0]):
+ # cv2.imwrite(os.path.join(save_dir, "{}_{}_{}.jpg".format(image_name, i, sampled_classes[i])), masks[i].astype(np.int32) * 100)
+
+ masks = torch.from_numpy(masks)
+ label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label
+
+ return (
+ image_path,
+ image,
+ image_clip,
+ conversations,
+ masks,
+ label,
+ resize,
+ questions,
+ sampled_classes,
+ )
diff --git a/utils/sem_seg_dataset.py b/utils/sem_seg_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..db2ba063cdf4014c26b46d5cff4e9eaba26bc8ec
--- /dev/null
+++ b/utils/sem_seg_dataset.py
@@ -0,0 +1,335 @@
+import glob
+import json
+import os
+import random
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from PIL import Image
+from pycocotools.coco import COCO
+from transformers import CLIPImageProcessor
+
+from model.llava import conversation as conversation_lib
+from model.segment_anything.utils.transforms import ResizeLongestSide
+
+from .utils import ANSWER_LIST, SHORT_QUESTION_LIST
+
+
+def init_mapillary(base_image_dir):
+ mapillary_data_root = os.path.join(base_image_dir, "mapillary")
+ with open(os.path.join(mapillary_data_root, "config_v2.0.json")) as f:
+ mapillary_classes = json.load(f)["labels"]
+ mapillary_classes = [x["readable"].lower() for x in mapillary_classes]
+ mapillary_classes = np.array(mapillary_classes)
+ mapillary_labels = sorted(
+ glob.glob(
+ os.path.join(mapillary_data_root, "training", "v2.0", "labels", "*.png")
+ )
+ )
+ mapillary_images = [
+ x.replace(".png", ".jpg").replace("v2.0/labels", "images")
+ for x in mapillary_labels
+ ]
+ print("mapillary: ", len(mapillary_images))
+ return mapillary_classes, mapillary_images, mapillary_labels
+
+
+def init_ade20k(base_image_dir):
+ with open("utils/ade20k_classes.json", "r") as f:
+ ade20k_classes = json.load(f)
+ ade20k_classes = np.array(ade20k_classes)
+ image_ids = sorted(
+ os.listdir(os.path.join(base_image_dir, "ade20k/images", "training"))
+ )
+ ade20k_image_ids = []
+ for x in image_ids:
+ if x.endswith(".jpg"):
+ ade20k_image_ids.append(x[:-4])
+ ade20k_images = []
+ for image_id in ade20k_image_ids: # self.descriptions:
+ ade20k_images.append(
+ os.path.join(
+ base_image_dir,
+ "ade20k",
+ "images",
+ "training",
+ "{}.jpg".format(image_id),
+ )
+ )
+ ade20k_labels = [
+ x.replace(".jpg", ".png").replace("images", "annotations")
+ for x in ade20k_images
+ ]
+ print("ade20k: ", len(ade20k_images))
+ return ade20k_classes, ade20k_images, ade20k_labels
+
+
+def init_cocostuff(base_image_dir):
+ cocostuff_classes = []
+ with open("utils/cocostuff_classes.txt") as f:
+ for line in f.readlines()[1:]:
+ cocostuff_classes.append(line.strip().split(": ")[-1])
+ cocostuff_classes = np.array(cocostuff_classes)
+ cocostuff_images = []
+
+ cocostuff_labels = glob.glob(
+ os.path.join(base_image_dir, "cocostuff", "train2017", "*.png")
+ )
+ cocostuff_images = [
+ x.replace(".png", ".jpg").replace("cocostuff", "coco") for x in cocostuff_labels
+ ]
+
+ print("cocostuff: ", len(cocostuff_images))
+ return cocostuff_classes, cocostuff_images, cocostuff_labels
+
+
+def init_paco_lvis(base_image_dir):
+ coco_api_paco_lvis = COCO(
+ os.path.join(
+ base_image_dir, "vlpart", "paco", "annotations", "paco_lvis_v1_train.json"
+ )
+ )
+ all_classes = coco_api_paco_lvis.loadCats(coco_api_paco_lvis.getCatIds())
+ class_map_paco_lvis = {}
+ for cat in all_classes:
+ cat_split = cat["name"].strip().split(":")
+ if len(cat_split) == 1:
+ name = cat_split[0].split("_(")[0]
+ else:
+ assert len(cat_split) == 2
+ obj, part = cat_split
+ obj = obj.split("_(")[0]
+ part = part.split("_(")[0]
+ name = (obj, part)
+ class_map_paco_lvis[cat["id"]] = name
+ img_ids = coco_api_paco_lvis.getImgIds()
+ print("paco_lvis: ", len(img_ids))
+ return class_map_paco_lvis, img_ids, coco_api_paco_lvis
+
+
+def init_pascal_part(base_image_dir):
+ coco_api_pascal_part = COCO(
+ os.path.join(base_image_dir, "vlpart", "pascal_part", "train.json")
+ )
+ all_classes = coco_api_pascal_part.loadCats(coco_api_pascal_part.getCatIds())
+ class_map_pascal_part = {}
+ for cat in all_classes:
+ cat_main, cat_part = cat["name"].strip().split(":")
+ name = (cat_main, cat_part)
+ class_map_pascal_part[cat["id"]] = name
+ img_ids = coco_api_pascal_part.getImgIds()
+ print("pascal_part: ", len(img_ids))
+ return class_map_pascal_part, img_ids, coco_api_pascal_part
+
+
+class SemSegDataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ samples_per_epoch=500 * 8 * 2 * 10,
+ precision: str = "fp32",
+ image_size: int = 224,
+ num_classes_per_sample: int = 3,
+ exclude_val=False,
+ sem_seg_data="ade20k||cocostuff||partimagenet||pascal_part||paco_lvis||mapillary",
+ ):
+ self.exclude_val = exclude_val
+ self.samples_per_epoch = samples_per_epoch
+ self.num_classes_per_sample = num_classes_per_sample
+
+ self.base_image_dir = base_image_dir
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.precision = precision
+ self.transform = ResizeLongestSide(image_size)
+ self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
+
+ self.short_question_list = SHORT_QUESTION_LIST
+ self.answer_list = ANSWER_LIST
+
+ self.data2list = {}
+ self.data2classes = {}
+
+ self.sem_seg_datas = sem_seg_data.split("||")
+ for ds in self.sem_seg_datas:
+ classes, images, labels = eval("init_{}".format(ds))(base_image_dir)
+ self.data2list[ds] = (images, labels)
+ self.data2classes[ds] = classes
+
+ if "cocostuff" in self.sem_seg_datas:
+ self.cocostuff_class2index = {
+ c: i for i, c in enumerate(self.data2classes["cocostuff"])
+ }
+
+ def __len__(self):
+ return self.samples_per_epoch
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.img_size - h
+ padw = self.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+ def __getitem__(self, idx):
+ ds = random.randint(0, len(self.sem_seg_datas) - 1)
+ ds = self.sem_seg_datas[ds]
+
+ if ds in ["paco_lvis", "pascal_part"]:
+ class_map = self.data2classes[ds]
+ img_ids, coco_api = self.data2list[ds]
+ idx = random.randint(0, len(img_ids) - 1)
+ img_id = img_ids[idx]
+ image_info = coco_api.loadImgs([img_id])[0]
+ file_name = image_info["file_name"]
+ if ds == "pascal_part":
+ file_name = os.path.join(
+ "VOCdevkit", "VOC2010", "JPEGImages", file_name
+ )
+ image_path = os.path.join(self.base_image_dir, "vlpart", ds, file_name)
+ elif ds == "paco_lvis":
+ image_path = os.path.join(self.base_image_dir, "coco", file_name)
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+
+ # preprocess image for clip
+ image_clip = self.clip_image_processor.preprocess(
+ image, return_tensors="pt"
+ )["pixel_values"][0]
+ image = self.transform.apply_image(image) # preprocess image for sam
+ resize = image.shape[:2]
+ annIds = coco_api.getAnnIds(imgIds=image_info["id"])
+ anns = coco_api.loadAnns(annIds)
+ if len(anns) == 0:
+ return self.__getitem__(0)
+ if len(anns) >= self.num_classes_per_sample:
+ sampled_anns = np.random.choice(
+ anns, size=self.num_classes_per_sample, replace=False
+ ).tolist()
+ else:
+ sampled_anns = anns
+ sampled_classes = []
+ for ann in sampled_anns:
+ sampled_cls = class_map[ann["category_id"]]
+ if isinstance(sampled_cls, tuple):
+ obj, part = sampled_cls
+ if random.random() < 0.5:
+ name = obj + " " + part
+ else:
+ name = "the {} of the {}".format(part, obj)
+ else:
+ name = sampled_cls
+ sampled_classes.append(name)
+
+ elif ds in ["ade20k", "cocostuff", "mapillary"]:
+ image, labels = self.data2list[ds]
+ idx = random.randint(0, len(image) - 1)
+ image_path = image[idx]
+ label_path = labels[idx]
+ label = Image.open(label_path)
+ label = np.array(label)
+ if ds == "ade20k":
+ label[label == 0] = 255
+ label -= 1
+ label[label == 254] = 255
+ elif ds == "cocostuff":
+ for c, i in self.cocostuff_class2index.items():
+ if "-" in c:
+ label[label == i] = 255
+ img = cv2.imread(image_path)
+ image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ # preprocess image for clip
+ image_clip = self.clip_image_processor.preprocess(
+ image, return_tensors="pt"
+ )["pixel_values"][0]
+ image = self.transform.apply_image(image) # preprocess image for sam
+ resize = image.shape[:2]
+ unique_label = np.unique(label).tolist()
+ if 255 in unique_label:
+ unique_label.remove(255)
+ if len(unique_label) == 0:
+ return self.__getitem__(0)
+
+ classes = [self.data2classes[ds][class_id] for class_id in unique_label]
+ if len(classes) >= self.num_classes_per_sample:
+ sampled_classes = np.random.choice(
+ classes, size=self.num_classes_per_sample, replace=False
+ ).tolist()
+ else:
+ sampled_classes = classes
+
+ questions = []
+ answers = []
+ class_ids = []
+ for sampled_cls in sampled_classes:
+ text = sampled_cls
+
+ assert len(text.split("||")) == 1
+ question_template = random.choice(self.short_question_list)
+ questions.append(question_template.format(class_name=text.lower()))
+
+ answers.append(random.choice(self.answer_list))
+
+ if ds in ["paco_lvis", "pascal_part"]:
+ continue
+
+ class_id = self.data2classes[ds].tolist().index(sampled_cls)
+ class_ids.append(class_id)
+
+ conversations = []
+ conv = conversation_lib.default_conversation.copy()
+
+ i = 0
+ while i < len(questions):
+ conv.messages = []
+ conv.append_message(conv.roles[0], questions[i])
+ conv.append_message(conv.roles[1], answers[i])
+ conversations.append(conv.get_prompt())
+ i += 1
+
+ image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+
+ if ds in ["paco_lvis", "pascal_part"]:
+ masks = []
+ for ann in sampled_anns:
+ try:
+ masks.append(coco_api.annToMask(ann))
+ except Exception as e:
+ print(e)
+ return self.__getitem__(0)
+
+ masks = np.stack(masks, axis=0)
+ masks = torch.from_numpy(masks)
+ label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label
+
+ else:
+ label = torch.from_numpy(label).long()
+ masks = []
+ for class_id in class_ids:
+ masks.append(label == class_id)
+ masks = torch.stack(masks, dim=0)
+ return (
+ image_path,
+ image,
+ image_clip,
+ conversations,
+ masks,
+ label,
+ resize,
+ questions,
+ sampled_classes,
+ )
diff --git a/utils/utils.py b/utils/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..51930264e75247070ca3668ca029a2da19ec08e9
--- /dev/null
+++ b/utils/utils.py
@@ -0,0 +1,163 @@
+from enum import Enum
+
+import numpy as np
+import torch
+import torch.distributed as dist
+
+IGNORE_INDEX = -100
+IMAGE_TOKEN_INDEX = -200
+DEFAULT_IMAGE_TOKEN = ""
+DEFAULT_IMAGE_PATCH_TOKEN = ""
+DEFAULT_IM_START_TOKEN = ""
+DEFAULT_IM_END_TOKEN = ""
+
+SHORT_QUESTION_LIST = [
+ DEFAULT_IMAGE_TOKEN + "\n" + "Can you segment the {class_name} in this image?",
+ DEFAULT_IMAGE_TOKEN + "\n" + "Please segment the {class_name} in this image.",
+ DEFAULT_IMAGE_TOKEN
+ + "\n"
+ + "What is {class_name} in this image? Please respond with segmentation mask.",
+ DEFAULT_IMAGE_TOKEN
+ + "\n"
+ + "What is {class_name} in this image? Please output segmentation mask.",
+]
+
+LONG_QUESTION_LIST = [
+ DEFAULT_IMAGE_TOKEN + "\n" + "{sent} Please respond with segmentation mask.",
+ DEFAULT_IMAGE_TOKEN + "\n" + "{sent} Please output segmentation mask.",
+]
+
+EXPLANATORY_QUESTION_LIST = [
+ "Please output segmentation mask and explain why.",
+ "Please output segmentation mask and explain the reason.",
+ "Please output segmentation mask and give some explanation.",
+]
+
+ANSWER_LIST = [
+ "It is [SEG].",
+ "Sure, [SEG].",
+ "Sure, it is [SEG].",
+ "Sure, the segmentation result is [SEG].",
+ "[SEG].",
+]
+
+
+class Summary(Enum):
+ NONE = 0
+ AVERAGE = 1
+ SUM = 2
+ COUNT = 3
+
+
+class AverageMeter(object):
+ """Computes and stores the average and current value"""
+
+ def __init__(self, name, fmt=":f", summary_type=Summary.AVERAGE):
+ self.name = name
+ self.fmt = fmt
+ self.summary_type = summary_type
+ self.reset()
+
+ def reset(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+
+ def update(self, val, n=1):
+ self.val = val
+ self.sum += val * n
+ self.count += n
+ self.avg = self.sum / self.count
+
+ def all_reduce(self):
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+ if isinstance(self.sum, np.ndarray):
+ total = torch.tensor(
+ self.sum.tolist()
+ + [
+ self.count,
+ ],
+ dtype=torch.float32,
+ device=device,
+ )
+ else:
+ total = torch.tensor(
+ [self.sum, self.count], dtype=torch.float32, device=device
+ )
+
+ dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False)
+ if total.shape[0] > 2:
+ self.sum, self.count = total[:-1].cpu().numpy(), total[-1].cpu().item()
+ else:
+ self.sum, self.count = total.tolist()
+ self.avg = self.sum / (self.count + 1e-5)
+
+ def __str__(self):
+ fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
+ return fmtstr.format(**self.__dict__)
+
+ def summary(self):
+ fmtstr = ""
+ if self.summary_type is Summary.NONE:
+ fmtstr = ""
+ elif self.summary_type is Summary.AVERAGE:
+ fmtstr = "{name} {avg:.3f}"
+ elif self.summary_type is Summary.SUM:
+ fmtstr = "{name} {sum:.3f}"
+ elif self.summary_type is Summary.COUNT:
+ fmtstr = "{name} {count:.3f}"
+ else:
+ raise ValueError("invalid summary type %r" % self.summary_type)
+
+ return fmtstr.format(**self.__dict__)
+
+
+def intersectionAndUnionGPU(output, target, K, ignore_index=255):
+ # 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.
+ assert output.dim() in [1, 2, 3]
+ assert output.shape == target.shape
+ output = output.view(-1)
+ target = target.view(-1)
+ output[target == ignore_index] = ignore_index
+ intersection = output[output == target]
+ area_intersection = torch.histc(intersection, bins=K, min=0, max=K - 1)
+ area_output = torch.histc(output, bins=K, min=0, max=K - 1)
+ area_target = torch.histc(target, bins=K, min=0, max=K - 1)
+ area_union = area_output + area_target - area_intersection
+ return area_intersection, area_union, area_target
+
+
+class ProgressMeter(object):
+ def __init__(self, num_batches, meters, prefix=""):
+ self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
+ self.meters = meters
+ self.prefix = prefix
+
+ def display(self, batch):
+ entries = [self.prefix + self.batch_fmtstr.format(batch)]
+ entries += [str(meter) for meter in self.meters]
+ print("\t".join(entries))
+
+ def display_summary(self):
+ entries = [" *"]
+ entries += [meter.summary() for meter in self.meters]
+ print(" ".join(entries))
+
+ def _get_batch_fmtstr(self, num_batches):
+ num_digits = len(str(num_batches // 1))
+ fmt = "{:" + str(num_digits) + "d}"
+ return "[" + fmt + "/" + fmt.format(num_batches) + "]"
+
+
+def dict_to_cuda(input_dict):
+ for k, v in input_dict.items():
+ if isinstance(input_dict[k], torch.Tensor):
+ input_dict[k] = v.cuda(non_blocking=True)
+ elif (
+ isinstance(input_dict[k], list)
+ and len(input_dict[k]) > 0
+ and isinstance(input_dict[k][0], torch.Tensor)
+ ):
+ input_dict[k] = [ele.cuda(non_blocking=True) for ele in v]
+ return input_dict
diff --git a/utils/vqa_dataset.py b/utils/vqa_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd9c6e909f25d558cbf3a5de604405ddf617f6af
--- /dev/null
+++ b/utils/vqa_dataset.py
@@ -0,0 +1,135 @@
+import json
+import os
+import random
+
+import cv2
+import torch
+import torch.nn.functional as F
+from transformers import CLIPImageProcessor
+
+from model.llava import conversation as conversation_lib
+from model.segment_anything.utils.transforms import ResizeLongestSide
+
+from .utils import DEFAULT_IMAGE_TOKEN
+
+
+def preprocess_multimodal(source, mm_use_im_start_end):
+ for sentence in source:
+ if DEFAULT_IMAGE_TOKEN in sentence["value"]:
+ sentence["value"] = (
+ sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "").strip()
+ )
+ sentence["value"] = DEFAULT_IMAGE_TOKEN + "\n" + sentence["value"]
+ sentence["value"] = sentence["value"].strip()
+ if "mmtag" in conversation_lib.default_conversation.version:
+ sentence["value"] = sentence["value"].replace(
+ DEFAULT_IMAGE_TOKEN, "" + DEFAULT_IMAGE_TOKEN + ""
+ )
+ return source
+
+
+class VQADataset(torch.utils.data.Dataset):
+ pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)
+ pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)
+ img_size = 1024
+ ignore_label = 255
+
+ def __init__(
+ self,
+ base_image_dir,
+ tokenizer,
+ vision_tower,
+ samples_per_epoch=500 * 8 * 2 * 10,
+ precision: str = "fp32",
+ image_size: int = 224,
+ num_classes_per_sample: int = 3,
+ exclude_val=False,
+ vqa_data="llava_instruct_150k",
+ ):
+ self.exclude_val = exclude_val
+ self.samples_per_epoch = samples_per_epoch
+ self.num_classes_per_sample = num_classes_per_sample
+
+ self.base_image_dir = base_image_dir
+ self.image_size = image_size
+ self.tokenizer = tokenizer
+ self.precision = precision
+ self.transform = ResizeLongestSide(image_size)
+ self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
+
+ DATA_DIR = os.path.join(base_image_dir, "llava_dataset")
+ self.vqa_image_root = os.path.join(base_image_dir, "coco/train2017")
+ with open(os.path.join(DATA_DIR, "{}.json".format(vqa_data))) as f:
+ vqa_data = json.load(f)
+ self.vqa_data = vqa_data
+
+ print("vqa_data: ", len(self.vqa_data))
+
+ def __len__(self):
+ return self.samples_per_epoch
+
+ def preprocess(self, x: torch.Tensor) -> torch.Tensor:
+ """Normalize pixel values and pad to a square input."""
+ # Normalize colors
+ x = (x - self.pixel_mean) / self.pixel_std
+
+ # Pad
+ h, w = x.shape[-2:]
+ padh = self.img_size - h
+ padw = self.img_size - w
+ x = F.pad(x, (0, padw, 0, padh))
+ return x
+
+ def __getitem__(self, idx):
+ idx = random.randint(0, len(self.vqa_data) - 1)
+ item = self.vqa_data[idx]
+ image_path = os.path.join(self.vqa_image_root, item["image"])
+ image = cv2.imread(image_path)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ ori_size = image.shape[:2]
+ image_clip = self.clip_image_processor.preprocess(image, return_tensors="pt")[
+ "pixel_values"
+ ][
+ 0
+ ] # preprocess image for clip
+
+ image = self.transform.apply_image(image) # preprocess image for sam
+ resize = image.shape[:2]
+
+ conv = conversation_lib.default_conversation.copy()
+ source = item["conversations"]
+ source = preprocess_multimodal(
+ source,
+ mm_use_im_start_end=conv.sep_style == conversation_lib.SeparatorStyle.TWO,
+ )
+ roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
+ conversations = []
+ if roles[source[0]["from"]] != conv.roles[0]:
+ # Skip the first one if it is not from human
+ source = source[1:]
+ conv.messages = []
+ for j, sentence in enumerate(source):
+ role = roles[sentence["from"]]
+ assert role == conv.roles[j % 2], f"{i}"
+ conv.append_message(role, sentence["value"])
+ conversations.append(conv.get_prompt())
+
+ questions = conversations
+ sampled_classes = conversations
+
+ image = self.preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
+
+ masks = torch.rand(0, *ori_size)
+ label = torch.ones(ori_size) * self.ignore_label
+
+ return (
+ image_path,
+ image,
+ image_clip,
+ conversations,
+ masks,
+ label,
+ resize,
+ questions,
+ sampled_classes,
+ )
diff --git a/vis_output/.ipynb_checkpoints/00_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/00_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9d356bddfbbb466fa064f42104dc86887babee4d
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/00_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/00_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/00_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2ffa77f18eda27cbe4ea853af263edb20666da6c
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/00_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/0_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/0_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..624b29d43fa7e0f6b9e288bdf535adf9588dbc4d
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/0_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/0_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/0_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b1dadae0d62a910e0a22f7d5f11c892d8478136c
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/0_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/image_primary_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/image_primary_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..afaa7d6dc8cedfa990c2cc803f0133fba45fd4d5
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/image_primary_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/image_primary_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/image_primary_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6fe38f706a42322482346bc2214a1fdab8649946
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/image_primary_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/image_wrist_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/image_wrist_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6fbba1d10748c38d832ab288a8b2687a05d8c149
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/image_wrist_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/image_wrist_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/image_wrist_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fb5a1052ceeb4378c81fb77185ec596883932f29
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/image_wrist_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/my_workspace-checkpoint.JPG b/vis_output/.ipynb_checkpoints/my_workspace-checkpoint.JPG
new file mode 100644
index 0000000000000000000000000000000000000000..a4eb3585b40dca1a41320beee12bbf641eae5724
--- /dev/null
+++ b/vis_output/.ipynb_checkpoints/my_workspace-checkpoint.JPG
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d449cdd7d2dd7730bd835dac1b95c82731c9abe6d4b2fdf6bcebbe77e1d6153c
+size 176953
diff --git a/vis_output/.ipynb_checkpoints/my_workspace_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/my_workspace_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8563b30f7cf3dbbd7a9acd513fd76ec543694546
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/my_workspace_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/my_workspace_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/my_workspace_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ad67a29b0da6ff737302b7dd08c98ccd8a574593
--- /dev/null
+++ b/vis_output/.ipynb_checkpoints/my_workspace_masked_img_0-checkpoint.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3228c1bb0f71a854371f5a0d1a36e1ff508a026ea502e6ad40c5ccc5fb8be2ca
+size 179611
diff --git a/vis_output/.ipynb_checkpoints/sample_10_view_0_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_10_view_0_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..93667fa64154f4d1d3ca3a88a74cb76beca57777
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_10_view_0_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_10_view_0_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_10_view_0_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f505b9056cb2fdc7a479b729ff0f59daa850c2ce
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_10_view_0_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_10_view_1_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_10_view_1_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..164861f640fb6f9d9fe832b1303c4b2c1ea15adf
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_10_view_1_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_10_view_1_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_10_view_1_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a034359d8a42690b4af2913c3a2946a1fc5776dc
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_10_view_1_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_14_view_0_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_14_view_0_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7a3900934c67c56e1fe2f66cc977f523d404202c
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_14_view_0_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_14_view_0_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_14_view_0_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..99982da77d4ff1f6c56a6d5bf00cd7631bf7a07c
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_14_view_0_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_14_view_1_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_14_view_1_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5de6f88cf2d3f264c5d0b89502e1bd51ff58f1b9
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_14_view_1_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_14_view_1_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_14_view_1_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..063eb8575b146e7257c424e8ec364607d37434a2
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_14_view_1_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_1_view_0_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_1_view_0_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..50d83d479313ee98c82615ee0f0e3013ee4a620f
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_1_view_0_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_1_view_0_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_1_view_0_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a9a0ad908a5d7b63623349c87e4c456ed56f8d18
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_1_view_0_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_1_view_1_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_1_view_1_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..449af992db3a574e8be5c570546c2821274409db
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_1_view_1_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_1_view_1_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_1_view_1_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fae37d6db799e15370d03b0c118acc910b1e485a
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_1_view_1_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_2_view_0_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_2_view_0_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..353ac6c9a5f3f7578ef386ef26ae1872ed2f4e17
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_2_view_0_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_2_view_0_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_2_view_0_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0033c88bade80622237bf3b6e5ccf25412a47a63
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_2_view_0_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_2_view_1_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_2_view_1_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d99ed349b4d64c11ce4c193f2e38a17b2208b21a
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_2_view_1_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_2_view_1_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_2_view_1_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..235c80e0705c292a044bf953c142c60f5b2abea9
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_2_view_1_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_5_view_0_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_5_view_0_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9fe3372a4191e23daf82b163be00fe0c40047a96
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_5_view_0_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_5_view_0_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_5_view_0_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..76f4b8a60b56982c662eaad8f1d61578d8d71013
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_5_view_0_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_5_view_1_mask_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_5_view_1_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..76c0ec7d060c9eda52e8882fd18f5337b8891da2
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_5_view_1_mask_0-checkpoint.jpg differ
diff --git a/vis_output/.ipynb_checkpoints/sample_5_view_1_masked_img_0-checkpoint.jpg b/vis_output/.ipynb_checkpoints/sample_5_view_1_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..372d4a36b74b2fa38829dcba0cdabc3b22a35097
Binary files /dev/null and b/vis_output/.ipynb_checkpoints/sample_5_view_1_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output/00_mask_0.jpg b/vis_output/00_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9d356bddfbbb466fa064f42104dc86887babee4d
Binary files /dev/null and b/vis_output/00_mask_0.jpg differ
diff --git a/vis_output/00_masked_img_0.jpg b/vis_output/00_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2ffa77f18eda27cbe4ea853af263edb20666da6c
Binary files /dev/null and b/vis_output/00_masked_img_0.jpg differ
diff --git a/vis_output/0_mask_0.jpg b/vis_output/0_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..624b29d43fa7e0f6b9e288bdf535adf9588dbc4d
Binary files /dev/null and b/vis_output/0_mask_0.jpg differ
diff --git a/vis_output/0_masked_img_0.jpg b/vis_output/0_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b1dadae0d62a910e0a22f7d5f11c892d8478136c
Binary files /dev/null and b/vis_output/0_masked_img_0.jpg differ
diff --git a/vis_output/image_primary_mask_0.jpg b/vis_output/image_primary_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..aca384c9d0363af4a89c07b9250b2eaec757682c
Binary files /dev/null and b/vis_output/image_primary_mask_0.jpg differ
diff --git a/vis_output/image_primary_masked_img_0.jpg b/vis_output/image_primary_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0cdc646f85a7c8f676e9961a48ab6e028fcfc409
Binary files /dev/null and b/vis_output/image_primary_masked_img_0.jpg differ
diff --git a/vis_output/image_wrist_mask_0.jpg b/vis_output/image_wrist_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c823ee4379bb3c3f64bd857923728da4771623dc
Binary files /dev/null and b/vis_output/image_wrist_mask_0.jpg differ
diff --git a/vis_output/image_wrist_masked_img_0.jpg b/vis_output/image_wrist_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5052e310b18635266cd60c88da123ef0712208cc
Binary files /dev/null and b/vis_output/image_wrist_masked_img_0.jpg differ
diff --git a/vis_output/my_workspace.JPG b/vis_output/my_workspace.JPG
new file mode 100644
index 0000000000000000000000000000000000000000..a4eb3585b40dca1a41320beee12bbf641eae5724
--- /dev/null
+++ b/vis_output/my_workspace.JPG
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d449cdd7d2dd7730bd835dac1b95c82731c9abe6d4b2fdf6bcebbe77e1d6153c
+size 176953
diff --git a/vis_output/my_workspace_mask_0.jpg b/vis_output/my_workspace_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8563b30f7cf3dbbd7a9acd513fd76ec543694546
Binary files /dev/null and b/vis_output/my_workspace_mask_0.jpg differ
diff --git a/vis_output/my_workspace_masked_img_0.jpg b/vis_output/my_workspace_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ad67a29b0da6ff737302b7dd08c98ccd8a574593
--- /dev/null
+++ b/vis_output/my_workspace_masked_img_0.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3228c1bb0f71a854371f5a0d1a36e1ff508a026ea502e6ad40c5ccc5fb8be2ca
+size 179611
diff --git a/vis_output/sample_10_view_0_mask_0.jpg b/vis_output/sample_10_view_0_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..93667fa64154f4d1d3ca3a88a74cb76beca57777
Binary files /dev/null and b/vis_output/sample_10_view_0_mask_0.jpg differ
diff --git a/vis_output/sample_10_view_0_masked_img_0.jpg b/vis_output/sample_10_view_0_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f505b9056cb2fdc7a479b729ff0f59daa850c2ce
Binary files /dev/null and b/vis_output/sample_10_view_0_masked_img_0.jpg differ
diff --git a/vis_output/sample_10_view_1_mask_0.jpg b/vis_output/sample_10_view_1_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..164861f640fb6f9d9fe832b1303c4b2c1ea15adf
Binary files /dev/null and b/vis_output/sample_10_view_1_mask_0.jpg differ
diff --git a/vis_output/sample_10_view_1_masked_img_0.jpg b/vis_output/sample_10_view_1_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a034359d8a42690b4af2913c3a2946a1fc5776dc
Binary files /dev/null and b/vis_output/sample_10_view_1_masked_img_0.jpg differ
diff --git a/vis_output/sample_14_view_0_mask_0.jpg b/vis_output/sample_14_view_0_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7a3900934c67c56e1fe2f66cc977f523d404202c
Binary files /dev/null and b/vis_output/sample_14_view_0_mask_0.jpg differ
diff --git a/vis_output/sample_14_view_0_masked_img_0.jpg b/vis_output/sample_14_view_0_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..99982da77d4ff1f6c56a6d5bf00cd7631bf7a07c
Binary files /dev/null and b/vis_output/sample_14_view_0_masked_img_0.jpg differ
diff --git a/vis_output/sample_14_view_1_mask_0.jpg b/vis_output/sample_14_view_1_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5de6f88cf2d3f264c5d0b89502e1bd51ff58f1b9
Binary files /dev/null and b/vis_output/sample_14_view_1_mask_0.jpg differ
diff --git a/vis_output/sample_14_view_1_masked_img_0.jpg b/vis_output/sample_14_view_1_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..063eb8575b146e7257c424e8ec364607d37434a2
Binary files /dev/null and b/vis_output/sample_14_view_1_masked_img_0.jpg differ
diff --git a/vis_output/sample_1_view_0_mask_0.jpg b/vis_output/sample_1_view_0_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..15a05e4bd5609a415468a8296aa5292e8ce40e6b
Binary files /dev/null and b/vis_output/sample_1_view_0_mask_0.jpg differ
diff --git a/vis_output/sample_1_view_0_masked_img_0.jpg b/vis_output/sample_1_view_0_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..93d164ff18490caf0c5dee8e6b0518f3099f8302
Binary files /dev/null and b/vis_output/sample_1_view_0_masked_img_0.jpg differ
diff --git a/vis_output/sample_1_view_1_mask_0.jpg b/vis_output/sample_1_view_1_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a5c7ea8cad7aeb3f7fe18333c9ed6c5e0db902ac
Binary files /dev/null and b/vis_output/sample_1_view_1_mask_0.jpg differ
diff --git a/vis_output/sample_1_view_1_masked_img_0.jpg b/vis_output/sample_1_view_1_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cc3f60fdda50a9b770da4a5f8e490aca29253d15
Binary files /dev/null and b/vis_output/sample_1_view_1_masked_img_0.jpg differ
diff --git a/vis_output/sample_2_view_0_mask_0.jpg b/vis_output/sample_2_view_0_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..586b21b4c359b0eef0171b64a0d60a1dcf2a60d2
Binary files /dev/null and b/vis_output/sample_2_view_0_mask_0.jpg differ
diff --git a/vis_output/sample_2_view_0_masked_img_0.jpg b/vis_output/sample_2_view_0_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..1eb2b7fc87c1c07228e111f2f6c83648ec13d622
Binary files /dev/null and b/vis_output/sample_2_view_0_masked_img_0.jpg differ
diff --git a/vis_output/sample_2_view_1_mask_0.jpg b/vis_output/sample_2_view_1_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d99ed349b4d64c11ce4c193f2e38a17b2208b21a
Binary files /dev/null and b/vis_output/sample_2_view_1_mask_0.jpg differ
diff --git a/vis_output/sample_2_view_1_masked_img_0.jpg b/vis_output/sample_2_view_1_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..235c80e0705c292a044bf953c142c60f5b2abea9
Binary files /dev/null and b/vis_output/sample_2_view_1_masked_img_0.jpg differ
diff --git a/vis_output/sample_5_view_0_mask_0.jpg b/vis_output/sample_5_view_0_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9fe3372a4191e23daf82b163be00fe0c40047a96
Binary files /dev/null and b/vis_output/sample_5_view_0_mask_0.jpg differ
diff --git a/vis_output/sample_5_view_0_masked_img_0.jpg b/vis_output/sample_5_view_0_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..76f4b8a60b56982c662eaad8f1d61578d8d71013
Binary files /dev/null and b/vis_output/sample_5_view_0_masked_img_0.jpg differ
diff --git a/vis_output/sample_5_view_1_mask_0.jpg b/vis_output/sample_5_view_1_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..76c0ec7d060c9eda52e8882fd18f5337b8891da2
Binary files /dev/null and b/vis_output/sample_5_view_1_mask_0.jpg differ
diff --git a/vis_output/sample_5_view_1_masked_img_0.jpg b/vis_output/sample_5_view_1_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..372d4a36b74b2fa38829dcba0cdabc3b22a35097
Binary files /dev/null and b/vis_output/sample_5_view_1_masked_img_0.jpg differ
diff --git a/vis_output_prefill/.ipynb_checkpoints/image_primary_mask_0-checkpoint.jpg b/vis_output_prefill/.ipynb_checkpoints/image_primary_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..9c677fa469368092e2292d5ebcc3d8969f0a7af6
Binary files /dev/null and b/vis_output_prefill/.ipynb_checkpoints/image_primary_mask_0-checkpoint.jpg differ
diff --git a/vis_output_prefill/.ipynb_checkpoints/image_primary_masked_img_0-checkpoint.jpg b/vis_output_prefill/.ipynb_checkpoints/image_primary_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..100bea7d9c65b8a2b31c86da8946db5fedb7ddfd
Binary files /dev/null and b/vis_output_prefill/.ipynb_checkpoints/image_primary_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output_prefill/.ipynb_checkpoints/image_wrist_mask_0-checkpoint.jpg b/vis_output_prefill/.ipynb_checkpoints/image_wrist_mask_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..0a8b8557961e3f543177c708def37d1597c6acde
Binary files /dev/null and b/vis_output_prefill/.ipynb_checkpoints/image_wrist_mask_0-checkpoint.jpg differ
diff --git a/vis_output_prefill/.ipynb_checkpoints/image_wrist_masked_img_0-checkpoint.jpg b/vis_output_prefill/.ipynb_checkpoints/image_wrist_masked_img_0-checkpoint.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c92630396cb72941964c955ba8212e0f8c426ff2
Binary files /dev/null and b/vis_output_prefill/.ipynb_checkpoints/image_wrist_masked_img_0-checkpoint.jpg differ
diff --git a/vis_output_prefill/image_primary_mask_0.jpg b/vis_output_prefill/image_primary_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4da51ee2f9248c7530056280ac28b38eb63451fd
Binary files /dev/null and b/vis_output_prefill/image_primary_mask_0.jpg differ
diff --git a/vis_output_prefill/image_primary_masked_img_0.jpg b/vis_output_prefill/image_primary_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..246316756b5306a9bba5a163ef8366dc4a4a592e
Binary files /dev/null and b/vis_output_prefill/image_primary_masked_img_0.jpg differ
diff --git a/vis_output_prefill/image_wrist_mask_0.jpg b/vis_output_prefill/image_wrist_mask_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e7c2603e806531f46a541685d1a81799b6b062f4
Binary files /dev/null and b/vis_output_prefill/image_wrist_mask_0.jpg differ
diff --git a/vis_output_prefill/image_wrist_masked_img_0.jpg b/vis_output_prefill/image_wrist_masked_img_0.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..2a1a46d693c01ec5357b15bf65eef7a3195b9281
Binary files /dev/null and b/vis_output_prefill/image_wrist_masked_img_0.jpg differ