repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/libs/pointops/functions/attention.py | libs/pointops/functions/attention.py | import torch
from torch.autograd import Function
from pointops._C import (
attention_relation_step_forward_cuda,
attention_relation_step_backward_cuda,
attention_fusion_step_forward_cuda,
attention_fusion_step_backward_cuda,
)
class AttentionRelationStep(Function):
@staticmethod
def forward(ctx, query, key, weight, index_target, index_refer):
"""
input - query: (n, g, c), key: (n, g, c), weight: (c) 1_c for scatter attention,
index_target: (m), index_refer: (m)
output - relation: (M, g)
"""
assert (
query.is_contiguous()
and key.is_contiguous()
and index_target.is_contiguous()
and index_refer.is_contiguous()
and weight.is_contiguous()
)
assert index_target.shape[0] == index_refer.shape[0]
_, g, c = query.shape
m = index_target.shape[0]
output = torch.cuda.FloatTensor(m, g).zero_()
attention_relation_step_forward_cuda(
m, g, c, query, key, weight, index_target.int(), index_refer.int(), output
)
ctx.save_for_backward(query, key, weight, index_target, index_refer)
return output
@staticmethod
def backward(ctx, grad_output):
query, key, weight, index_target, index_refer = ctx.saved_tensors
n, g, c = query.shape
m = index_target.shape[0]
grad_query = torch.cuda.FloatTensor(n, g, c).zero_()
grad_key = torch.cuda.FloatTensor(n, g, c).zero_()
grad_weight = torch.cuda.FloatTensor(c).zero_()
attention_relation_step_backward_cuda(
m,
g,
c,
query,
grad_query,
key,
grad_key,
weight,
grad_weight,
index_target.int(),
index_refer.int(),
grad_output,
)
return grad_query, grad_key, None, None, None
class AttentionFusionStep(Function):
@staticmethod
def forward(ctx, weight, value, index_target, index_refer):
"""
input - weight: (m, g), value: (n, g, c)
index_target: (m), index_value: (m)
output - output: (n, g, c)
"""
assert (
weight.is_contiguous()
and value.is_contiguous()
and index_target.is_contiguous()
and index_refer.is_contiguous()
and weight.is_contiguous()
)
assert index_target.shape[0] == index_refer.shape[0]
n, g, c = value.shape
m = index_refer.shape[0]
output = torch.cuda.FloatTensor(n, g, c).zero_()
attention_fusion_step_forward_cuda(
m, g, c, weight, value, index_target.int(), index_refer.int(), output
)
ctx.save_for_backward(weight, value, index_target, index_refer)
return output
@staticmethod
def backward(ctx, grad_output):
"""
input: grad_output: (n, g, c)
output: grad_weight: (m, g), grad_value: (n, g, c), none, none
"""
weight, value, index_target, index_refer = ctx.saved_tensors
n, g, c = value.shape
m = index_target.shape[0]
grad_weight = torch.cuda.FloatTensor(m, g).zero_()
grad_value = torch.cuda.FloatTensor(n, g, c).zero_()
attention_fusion_step_backward_cuda(
m,
g,
c,
weight,
grad_weight,
value,
grad_value,
index_target.int(),
index_refer.int(),
grad_output,
)
return grad_weight, grad_value, None, None
attention_relation_step = AttentionRelationStep.apply
attention_fusion_step = AttentionFusionStep.apply
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/libs/pointops/functions/aggregation.py | libs/pointops/functions/aggregation.py | import torch
from torch.autograd import Function
from pointops._C import aggregation_forward_cuda, aggregation_backward_cuda
class Aggregation(Function):
@staticmethod
def forward(ctx, input, position, weight, idx):
"""
input: input: (n, c), position: (n, nsample, c), weight : (n, nsample, c'), idx: (n, nsample)
output: (n, c)
"""
assert (
input.is_contiguous()
and position.is_contiguous()
and weight.is_contiguous()
)
n, nsample, c = position.shape
w_c = weight.shape[-1]
output = torch.cuda.FloatTensor(n, c).zero_()
aggregation_forward_cuda(
n, nsample, c, w_c, input, position, weight, idx, output
)
ctx.save_for_backward(input, position, weight, idx)
return output
@staticmethod
def backward(ctx, grad_output):
"""
input: grad_out: (n, c)
output: grad_input: (n, c), grad_position: (n, nsample, c), grad_weight : (n, nsample, c')
"""
input, position, weight, idx = ctx.saved_tensors
n, nsample, c = position.shape
w_c = weight.shape[-1]
grad_input = torch.cuda.FloatTensor(n, c).zero_()
grad_position = torch.cuda.FloatTensor(n, nsample, c).zero_()
grad_weight = torch.cuda.FloatTensor(n, nsample, w_c).zero_()
aggregation_backward_cuda(
n,
nsample,
c,
w_c,
input,
position,
weight,
idx,
grad_output,
grad_input,
grad_position,
grad_weight,
)
return grad_input, grad_position, grad_weight, None
aggregation = Aggregation.apply
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/train_net.py | train_net.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This script is a simplified version of the training script in detectron2/tools.
"""
from functools import partial
import copy
import itertools
import logging
import os
from collections import OrderedDict
from typing import Any, Dict, List, Set
import detectron2.utils.comm as comm
import torch
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import (
DefaultTrainer,
default_argument_parser,
default_setup,
launch,
)
from detectron2.evaluation import (
CityscapesSemSegEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
verify_results,
)
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
from detectron2.solver.build import maybe_add_gradient_clipping
from detectron2.utils.logger import setup_logger
from detectron2.utils.events import CommonMetricPrinter, JSONWriter
# MaskFormer
from mask2former import SemanticSegmentorWithTTA, add_mask_former_config
from mask2former.data import (
COCOFullTaskNewBaselineDatasetMapper,
MaskFormerInstanceDatasetMapper,
MaskFormerPanopticDatasetMapper,
MaskFormerSemanticDatasetMapper,
MaskFormerBinarySemanticDatasetMapper,
MaskFormerBinaryFullDatasetMapper,
ProposalClasificationDatasetMapper,
)
from mask2former.data import (
build_detection_test_loader,
build_detection_train_loader,
dataset_sample_per_class,
dataset_sample_per_task_class,
)
from mask2former.evaluation import (
GeneralizedSemSegEvaluator,
GeneralizedPseudoSemSegEvaluator,
ClassificationEvaluator,
GeneralizedPanopticEvaluator,
InstanceSegEvaluator,
COCOEvaluator,
)
from mask2former.utils.events import WandbWriter, setup_wandb
from mask2former.utils.post_process_utils import dense_crf_post_process
import timm
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to DETR.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each
builtin dataset. For your own dataset, you can simply create an
evaluator manually in your script and do not have to worry about the
hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]:
if cfg.PSEUDO:
evaluator = partial(
GeneralizedPseudoSemSegEvaluator,
with_prior=cfg.PSEUDO_WITH_PRIOR,
reject_threshold=cfg.PSEUDO_REJECT_THRESHOLD,
)
else:
evaluator = GeneralizedSemSegEvaluator
evaluator_list.append(
evaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
post_process_func=dense_crf_post_process
if cfg.TEST.DENSE_CRF
else None,
)
)
# instance segmentation
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
# panoptic segmentation
if evaluator_type == "ade20k_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type in [
"coco_panoptic_seg",
"cityscapes_panoptic_seg",
"mapillary_vistas_panoptic_seg",
]:
if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON:
evaluator_list.append(GeneralizedPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "classification":
evaluator_list.append(ClassificationEvaluator(dataset_name))
# COCO
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON:
evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder))
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
# ADE20K
if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder))
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
dataset = None
mapper = None
# Semantic segmentation dataset mapper
if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic":
mapper = MaskFormerSemanticDatasetMapper(cfg, True)
elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_binary_semantic":
mapper = MaskFormerBinarySemanticDatasetMapper(cfg, True)
dataset = dataset_sample_per_class(cfg)
elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_full_binary_semantic":
mapper = MaskFormerBinaryFullDatasetMapper(cfg, True)
dataset = dataset_sample_per_task_class(cfg)
# Panoptic segmentation dataset mapper
elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic":
mapper = MaskFormerPanopticDatasetMapper(cfg, True)
return build_detection_train_loader(cfg, mapper=mapper)
# Instance segmentation dataset mapper
elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
return build_detection_train_loader(cfg, mapper=mapper)
elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_full_lsj":
mapper = COCOFullTaskNewBaselineDatasetMapper(cfg, True)
return build_detection_train_loader(cfg, mapper=mapper)
return build_detection_train_loader(cfg, mapper=mapper, dataset=dataset)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`detectron2.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
if cfg.ORACLE:
if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic":
mapper = MaskFormerSemanticDatasetMapper(cfg, False)
elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_binary_semantic":
mapper = MaskFormerBinarySemanticDatasetMapper(cfg, False)
elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_full_binary_semantic":
mapper = MaskFormerBinarySemanticDatasetMapper(cfg, False)
elif cfg.INPUT.DATASET_MAPPER_NAME == "propsoal_classification":
mapper = ProposalClasificationDatasetMapper(cfg, False)
else:
mapper = None
return build_detection_test_loader(cfg, dataset_name, mapper=mapper)
def build_writers(self):
"""
Build a list of writers to be used. By default it contains
writers that write metrics to the screen,
a json file, and a tensorboard event file respectively.
If you'd like a different list of writers, you can overwrite it in
your trainer.
Returns:
list[EventWriter]: a list of :class:`EventWriter` objects.
It is now implemented by:
::
return [
CommonMetricPrinter(self.max_iter),
JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "metrics.json")),
TensorboardXWriter(self.cfg.OUTPUT_DIR),
]
"""
# Here the default print/log frequency of each writer is used.
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(self.max_iter),
JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "metrics.json")),
WandbWriter(),
]
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
"""
It now calls :func:`detectron2.solver.build_lr_scheduler`.
Overwrite it if you'd like a different scheduler.
"""
return build_lr_scheduler(cfg, optimizer)
@classmethod
def build_optimizer(cls, cfg, model):
weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM
weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED
defaults = {}
defaults["lr"] = cfg.SOLVER.BASE_LR
defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
# NaiveSyncBatchNorm inherits from BatchNorm2d
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for module_name, module in model.named_modules():
for module_param_name, value in module.named_parameters(recurse=False):
if not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
hyperparams = copy.copy(defaults)
if "backbone" in module_name:
hyperparams["lr"] = (
hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER
)
if (
"relative_position_bias_table" in module_param_name
or "absolute_pos_embed" in module_param_name
):
# print(module_param_name)
hyperparams["weight_decay"] = 0.0
if isinstance(module, norm_module_types):
hyperparams["weight_decay"] = weight_decay_norm
if isinstance(module, torch.nn.Embedding):
hyperparams["weight_decay"] = weight_decay_embed
params.append({"params": [value], **hyperparams})
def maybe_add_full_model_gradient_clipping(optim):
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(
*[x["params"] for x in self.param_groups]
)
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
optimizer_type = cfg.SOLVER.OPTIMIZER
if optimizer_type == "SGD":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
)
elif optimizer_type == "ADAMW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
params, cfg.SOLVER.BASE_LR
)
else:
raise NotImplementedError(f"no optimizer type {optimizer_type}")
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA.
logger.info("Running inference with test-time augmentation ...")
model = SemanticSegmentorWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
# for poly lr schedule
add_deeplab_config(cfg)
add_mask_former_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
# Setup logger for "mask_former" module
if not args.eval_only:
setup_wandb(cfg, args)
setup_logger(
output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="mask_former"
)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
if model._region_clip_adapter is not None:
model._region_clip_adapter.load_state_dict(model.clip_adapter.state_dict())
if cfg.TEST.AUG.ENABLED:
res = Trainer.test_with_TTA(cfg, model)
else:
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/third_party/CLIP/setup.py | third_party/CLIP/setup.py | import os
import pkg_resources
from setuptools import setup, find_packages
setup(
name="clip",
py_modules=["clip"],
version="1.0",
description="",
author="OpenAI",
packages=find_packages(exclude=["tests*"]),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
include_package_data=True,
extras_require={"dev": ["pytest"]},
)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/third_party/CLIP/tests/test_consistency.py | third_party/CLIP/tests/test_consistency.py | import numpy as np
import pytest
import torch
from PIL import Image
import clip
@pytest.mark.parametrize("model_name", clip.available_models())
def test_consistency(model_name):
device = "cpu"
jit_model, transform = clip.load(model_name, device=device, jit=True)
py_model, _ = clip.load(model_name, device=device, jit=False)
image = transform(Image.open("CLIP.png")).unsqueeze(0).to(device)
text = clip.tokenize(["a diagram", "a dog", "a cat"]).to(device)
with torch.no_grad():
logits_per_image, _ = jit_model(image, text)
jit_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
logits_per_image, _ = py_model(image, text)
py_probs = logits_per_image.softmax(dim=-1).cpu().numpy()
assert np.allclose(jit_probs, py_probs, atol=0.01, rtol=0.1)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/third_party/CLIP/clip/model.py | third_party/CLIP/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
from scipy import interpolate
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False,
),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
)
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
self.grid_size = spacial_dim
def forward(self, x, mask=None, return_cls=True):
b, c, gh, gw = x.shape
# remove irrelated feature
if mask is not None:
mask = F.interpolate(mask[:, None, ...], size=(gh, gw)).squeeze(
1
) # [N,H,W] -> [N,grid,grid]
mask = (mask > 0.5).reshape(mask.shape[0], -1)
mask = torch.cat([mask, mask.new_ones(mask.shape[0], 1)], dim=1)
if x.size()[0] == 1:
x = x.expand(mask.shape[0], c, gh, gw)
x = x.reshape(x.shape[0], c, gh * gw).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
positional_embedding = self.positional_embedding
if not (self.positional_embedding.shape[0] == x.shape[0]):
cls_pos = positional_embedding[0:1, :]
per_pos_embedding = (
F.interpolate(
positional_embedding[1:, :]
.permute(1, 0)
.view(1, -1, self.grid_size, self.grid_size),
size=(gh, gw),
mode="bicubic",
align_corners=True
)
.reshape(-1, gh * gw)
.permute(1, 0)
)
positional_embedding = torch.cat([cls_pos, per_pos_embedding])
x = x + positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
key_padding_mask=mask,
)
if return_cls:
return x[0]
else:
return x
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(
input_resolution // 32, embed_dim, heads, output_dim
)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, mask: torch.Tensor = None, return_cls=True):
def stem(x):
for conv, bn in [
(self.conv1, self.bn1),
(self.conv2, self.bn2),
(self.conv3, self.bn3),
]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x) # 1/4,1/4
x = self.layer1(x)
x = self.layer2(x) # 1/8,1/8
x = self.layer3(x) # 1/16,1/16
x = self.layer4(x) # 1/32,1/32
b, c, gh, gw = x.shape
x = self.attnpool(x, mask, return_cls)
if not return_cls:
return x[1:].permute(1, 0, 2).reshape(b, gh, gw, x.shape[-1]) # N,L,C
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor, **kwargs):
self.attn_mask = (
self.attn_mask.to(dtype=x.dtype, device=x.device)
if self.attn_mask is not None
else None
)
return self.attn(
x, x, x, need_weights=False, attn_mask=self.attn_mask, **kwargs
)[0]
def forward(self, x: torch.Tensor, **kwargs):
x = x + self.attention(self.ln_1(x), **kwargs)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(
*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]
)
def forward(self, x: torch.Tensor, **kwargs):
for block in self.resblocks:
x = block(x, **kwargs)
return x
class VisionTransformer(nn.Module):
def __init__(
self,
input_resolution: int,
patch_size: int,
width: int,
layers: int,
heads: int,
output_dim: int,
):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False,
)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(
scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)
)
self.grid_size = input_resolution // patch_size
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(
self,
x: torch.Tensor,
mask: torch.Tensor = None,
inter_method="bicubic",
return_cls=True,
):
x = self.conv1(x) # shape = [*, width, grid, grid]
b, _, gh, gw = x.size()
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[
self.class_embedding.to(x.dtype)
+ torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
),
x,
],
dim=1,
) # shape = [*, grid ** 2 + 1, width]
positional_embedding = self.positional_embedding
if not (self.positional_embedding.shape[0] == x.shape[0]):
cls_pos = positional_embedding[0:1, :]
if inter_method in ["bicubic", "bilinear"]:
per_pos_embedding = (
F.interpolate(
positional_embedding[1:, :]
.permute(1, 0)
.view(1, -1, self.grid_size, self.grid_size),
size=(gh, gw),
mode="bicubic",
align_corners=True
)
.reshape(-1, gh * gw)
.permute(1, 0)
)
else:
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, self.grid_size // 2)
if gp > gh // 2:
right = q
else:
left = q
dis = []
cur = 1
for i in range(self.grid_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
if self.grid_size % 2 == 0:
y = r_ids + dis
else:
y = r_ids + [0] + dis
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, self.grid_size // 2)
if gp > gw // 2:
right = q
else:
left = q
dis = []
cur = 1
for i in range(self.grid_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
if self.grid_size % 2 == 0:
x = r_ids + [0] + dis[:-1]
else:
x = r_ids + [0] + dis
dx = np.arange(-gw // 2, gw / 2, 1.0)
dy = np.arange(-gh // 2, gh / 2, 1.0)
all_rel_pos_bias = []
for i in range(positional_embedding.shape[-1]):
z = (
positional_embedding[1:, i]
.view(self.grid_size, self.grid_size)
.float()
.numpy()
)
f_cubic = interpolate.interp2d(x, y, z, kind="cubic")
all_rel_pos_bias.append(
torch.Tensor(f_cubic(dx, dy))
.contiguous()
.view(-1, 1)
.to(positional_embedding.device)
)
per_pos_embedding = torch.cat(all_rel_pos_bias, dim=-1)
positional_embedding = torch.cat([cls_pos, per_pos_embedding])
x = x + positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
# remove irrelated feature
if mask is not None:
mask = F.interpolate(mask[:, None, ...], size=(gh, gw)).squeeze(
1
) # [N,H,W] -> [N,grid,grid]
mask = (mask > 0.5).reshape(mask.shape[0], -1)
mask = torch.cat([mask, mask.new_ones(mask.shape[0], 1)], dim=1)
if x.size()[1] == 1:
x = x.expand(x.size()[0], mask.shape[0], x.size()[2])
x = self.transformer(x, key_padding_mask=mask)
x = x.permute(1, 0, 2) # LND -> NLD
if return_cls:
x = self.ln_post(x[:, 0, :])
else:
x = self.ln_post(x[:, 1:, :])
if self.proj is not None:
x = x @ self.proj
if not return_cls:
x = x.reshape(b, gh, gw, -1)
return x
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width,
)
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, transformer_width)
)
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [
self.visual.layer1,
self.visual.layer2,
self.visual.layer3,
self.visual.layer4,
]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * (
(2 * self.transformer.layers) ** -0.5
)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image, **kwargs):
return self.visual(image.type(self.dtype), **kwargs)
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [
*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
"in_proj_bias",
"bias_k",
"bias_v",
]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round(
(state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5
)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [
len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"visual.layer{b}")
)
)
for b in [1, 2, 3, 4]
]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round(
(state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5
)
vision_patch_size = None
assert (
output_width ** 2 + 1
== state_dict["visual.attnpool.positional_embedding"].shape[0]
)
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"transformer.resblocks")
)
)
model = CLIP(
embed_dim,
image_resolution,
vision_layers,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/third_party/CLIP/clip/simple_tokenizer.py | third_party/CLIP/clip/simple_tokenizer.py | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(
os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz"
)
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split("\n")
merges = merges[1 : 49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + "</w>" for v in vocab]
for merge in merges:
vocab.append("".join(merge))
vocab.extend(["<|startoftext|>", "<|endoftext|>"])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {
"<|startoftext|>": "<|startoftext|>",
"<|endoftext|>": "<|endoftext|>",
}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE,
)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = (
bytearray([self.byte_decoder[c] for c in text])
.decode("utf-8", errors="replace")
.replace("</w>", " ")
)
return text
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/third_party/CLIP/clip/__init__.py | third_party/CLIP/clip/__init__.py | from .clip import *
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/third_party/CLIP/clip/clip.py | third_party/CLIP/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if torch.__version__.split(".") < ["1", "7", "1"]:
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
== expected_sha256
):
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
ncols=80,
unit="iB",
unit_scale=True,
) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if (
hashlib.sha256(open(download_target, "rb").read()).hexdigest()
!= expected_sha256
):
raise RuntimeError(
f"Model has been downloaded but the SHA256 checksum does not not match"
)
return download_target
def _transform(n_px):
return Compose(
[
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize(
(0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711),
),
]
)
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit=False,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model or more hackable non-JIT model (default).
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(
f"Model {name} not found; available models = {available_models()}"
)
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(
f"File {model_path} is not a JIT archive. Loading as a state dict instead"
)
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(
lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]
)
device_node = [
n
for n in device_holder.graph.findAllNodes("prim::Constant")
if "Device" in repr(n)
][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith(
"cuda"
):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(
lambda: torch.ones([]).float(), example_inputs=[]
)
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [
1,
2,
]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(
texts: Union[str, List[str]],
context_length: int = 77,
truncate: bool = False,
return_length: bool = False,
) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
length = []
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
length.append(context_length)
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
else:
length.append(len(tokens))
result[i, : len(tokens)] = torch.tensor(tokens)
if return_length:
return result, length
return result
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/tools/parse_name.py | tools/parse_name.py | import sys
cfg_path = sys.argv[1]
print(cfg_path.split(".")[0].replace("/", "_"))
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/tools/convert-torchvision-to-d2.py | tools/convert-torchvision-to-d2.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle as pkl
import sys
import torch
"""
Usage:
# download one of the ResNet{18,34,50,101,152} models from torchvision:
wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth
# run the conversion
./convert-torchvision-to-d2.py r50.pth r50.pkl
# Then, use r50.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/r50.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
RESNETS:
DEPTH: 50
STRIDE_IN_1X1: False
INPUT:
FORMAT: "RGB"
These models typically produce slightly worse results than the
pre-trained ResNets we use in official configs, which are the
original ResNet models released by MSRA.
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")
newmodel = {}
for k in list(obj.keys()):
old_k = k
if "layer" not in k:
k = "stem." + k
for t in [1, 2, 3, 4]:
k = k.replace("layer{}".format(t), "res{}".format(t + 1))
for t in [1, 2, 3]:
k = k.replace("bn{}".format(t), "conv{}.norm".format(t))
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
print(old_k, "->", k)
newmodel[k] = obj.pop(old_k).detach().numpy()
res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
if obj:
print("Unconverted keys:", obj.keys())
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/tools/mask_cls_collect.py | tools/mask_cls_collect.py | import os
import glob
import functools
from mmcv.utils import track_parallel_progress
import numpy as np
from PIL import Image
import json
import warnings
import fire
from itertools import chain
def count_cls(file_path, ignore_index=[255], depth=1):
cls_label = np.unique(np.asarray(Image.open(file_path))).tolist()
cls_label = [l for l in cls_label if l not in ignore_index]
return [os.path.join(*file_path.split(os.sep)[-depth:]), cls_label]
def main(gt_dir, map_file_save_path, ignore_index=[255], ext=".png", recursive=False):
if not os.path.isdir(gt_dir):
warnings.warn(f"{gt_dir} is not a valid directory")
return
gt_file_list = glob.glob(os.path.join(gt_dir, "*" + ext), recursive=recursive)
print(f"Find {len(gt_file_list)}")
_func = functools.partial(count_cls, ignore_index=ignore_index)
results = track_parallel_progress(_func, gt_file_list, nproc=16)
results = {r[0]: r[1] for r in results}
with open(map_file_save_path, "w") as f:
json.dump(results, f)
def main_ctyscapes(
gt_dir, map_file_save_path, ignore_index=[255], ext=".png", recursive=False
):
if not os.path.isdir(gt_dir):
warnings.warn(f"{gt_dir} is not a valid directory")
return
cities = os.listdir(gt_dir)
gt_file_list = list(
chain.from_iterable(
[
glob.glob(
os.path.join(gt_dir, city, "*" + ext),
)
for city in cities
]
)
)
print(gt_file_list[0])
print(f"Find {len(gt_file_list)}")
_func = functools.partial(count_cls, ignore_index=ignore_index, depth=2)
results = track_parallel_progress(_func, gt_file_list, nproc=16)
results = {r[0]: r[1] for r in results}
with open(map_file_save_path, "w") as f:
json.dump(results, f)
if __name__ == "__main__":
fire.Fire(main)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/tools/convert-pretrained-clip-model-to-d2.py | tools/convert-pretrained-clip-model-to-d2.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle as pkl
import sys
import torch
"""
Usage:
# download pretrained swin model:
wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth
# run the conversion
./convert-pretrained-model-to-d2.py swin_tiny_patch4_window7_224.pth swin_tiny_patch4_window7_224.pkl
# Then, use swin_tiny_patch4_window7_224.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/swin_tiny_patch4_window7_224.pkl"
INPUT:
FORMAT: "RGB"
"""
def transform(path):
model = torch.load(path, map_location="cpu")
print(f"loading {path}......")
state_dict = model["model"]
state_dict = {
k.replace("visual_model.", ""): v
for k, v in state_dict.items()
if k.startswith("visual_model")
}
source_keys = [k for k in state_dict.keys() if "relative_coords" in k]
for k in source_keys:
state_dict[
k.replace("relative_coords", "relative_position_index")
] = state_dict[k]
del state_dict[k]
source_keys = [k for k in state_dict.keys() if "atten_mask_matrix" in k]
for k in source_keys:
state_dict[k.replace("atten_mask_matrix", "attn_mask")] = state_dict[k]
del state_dict[k]
source_keys = [k for k in state_dict.keys() if "rel_pos_embed_table" in k]
for k in source_keys:
state_dict[
k.replace("rel_pos_embed_table", "relative_position_bias_table")
] = state_dict[k]
del state_dict[k]
source_keys = [k for k in state_dict.keys() if "channel_reduction" in k]
for k in source_keys:
state_dict[k.replace("channel_reduction", "reduction")] = state_dict[k]
del state_dict[k]
return {
k if k.startswith("backbone.") else "backbone." + k: v
for k, v in state_dict.items()
}
if __name__ == "__main__":
input = sys.argv[1]
res = {
"model": transform(input),
"__author__": "third_party",
"matching_heuristics": True,
}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/tools/convert-pretrained-swin-model-to-d2.py | tools/convert-pretrained-swin-model-to-d2.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pickle as pkl
import sys
import torch
"""
Usage:
# download pretrained swin model:
wget https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth
# run the conversion
./convert-pretrained-model-to-d2.py swin_tiny_patch4_window7_224.pth swin_tiny_patch4_window7_224.pkl
# Then, use swin_tiny_patch4_window7_224.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/swin_tiny_patch4_window7_224.pkl"
INPUT:
FORMAT: "RGB"
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")["model"]
res = {"model": obj, "__author__": "third_party", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/datasets/prepare_coco_alldata.py | datasets/prepare_coco_alldata.py | import argparse
import os
import os.path as osp
import json
import copy
import shutil
from functools import partial
from glob import glob
from panopticapi.utils import rgb2id, id2rgb
import mmcv
import numpy as np
from PIL import Image
import torch
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
COCO_LEN = 123287
full_clsID_to_trID = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
12: 11,
13: 12,
14: 13,
15: 14,
16: 15,
17: 16,
18: 17,
19: 18,
20: 19,
21: 20,
22: 21,
23: 22,
24: 23,
26: 24,
27: 25,
30: 26,
31: 27,
32: 28,
33: 29,
34: 30,
35: 31,
36: 32,
37: 33,
38: 34,
39: 35,
40: 36,
41: 37,
42: 38,
43: 39,
45: 40,
46: 41,
47: 42,
48: 43,
49: 44,
50: 45,
51: 46,
52: 47,
53: 48,
54: 49,
55: 50,
56: 51,
57: 52,
58: 53,
59: 54,
60: 55,
61: 56,
62: 57,
63: 58,
64: 59,
66: 60,
69: 61,
71: 62,
72: 63,
73: 64,
74: 65,
75: 66,
76: 67,
77: 68,
78: 69,
79: 70,
80: 71,
81: 72,
83: 73,
84: 74,
85: 75,
86: 76,
87: 77,
88: 78,
89: 79,
91: 80,
92: 81,
93: 82,
94: 83,
95: 84,
96: 85,
97: 86,
98: 87,
99: 88,
100: 89,
101: 90,
102: 91,
103: 92,
104: 93,
105: 94,
106: 95,
107: 96,
108: 97,
109: 98,
110: 99,
111: 100,
112: 101,
113: 102,
114: 103,
115: 104,
116: 105,
117: 106,
118: 107,
119: 108,
120: 109,
121: 110,
122: 111,
123: 112,
124: 113,
125: 114,
126: 115,
127: 116,
128: 117,
129: 118,
130: 119,
131: 120,
132: 121,
133: 122,
134: 123,
135: 124,
136: 125,
137: 126,
138: 127,
139: 128,
140: 129,
141: 130,
142: 131,
143: 132,
144: 133,
145: 134,
146: 135,
147: 136,
148: 137,
149: 138,
150: 139,
151: 140,
152: 141,
153: 142,
154: 143,
155: 144,
156: 145,
157: 146,
158: 147,
159: 148,
160: 149,
161: 150,
162: 151,
163: 152,
164: 153,
165: 154,
166: 155,
167: 156,
168: 157,
169: 158,
170: 159,
171: 160,
172: 161,
173: 162,
174: 163,
175: 164,
176: 165,
177: 166,
178: 167,
179: 168,
180: 169,
181: 170,
255: 255,
}
panoptic_clsID_to_trID = {
183: 171,
184: 172,
185: 173,
186: 174,
187: 175,
188: 176,
189: 177,
190: 178,
191: 179,
192: 180,
193: 181,
194: 182,
195: 183,
196: 184,
197: 185,
198: 186,
199: 187,
}
novel_clsID = [20, 24, 32, 33, 40, 56, 86, 99, 105, 123, 144, 147, 148, 168, 171]
base_clsID = [k for k in full_clsID_to_trID.keys() if k not in novel_clsID + [255]]
pan_clsID = [183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199]
novel_clsID_to_trID = {k: i for i, k in enumerate(novel_clsID)}
base_clsID_to_trID = {k: i for i, k in enumerate(base_clsID)}
pan_clsID_to_trID = {k: i for i, k in enumerate(pan_clsID)}
def convert_to_trainID(
maskpath, out_mask_dir, panoptic_root, out_panoptic_mask_dir, is_train, clsID_to_trID=full_clsID_to_trID, suffix=""
):
# add panoptic cls to stuff
mask = np.array(Image.open(maskpath))
mask_copy = np.ones_like(mask, dtype=np.uint8) * 255
if is_train:
obj_annos = train_obj_annos_copy
else:
obj_annos = val_obj_annos_copy
file_name = osp.basename(maskpath)
obj_anno = obj_annos[file_name]
segments = obj_anno["segments_info"]
panoptic = osp.join(panoptic_root, file_name)
panoptic_mask = np.asarray(Image.open(panoptic), dtype=np.uint32)
panoptic_mask = rgb2id(panoptic_mask)
panoptic_copy = np.zeros_like(panoptic_mask, dtype=np.uint32)
for clsID, trID in clsID_to_trID.items():
mask_copy[mask == clsID] = trID
seg_filename = (
osp.join(out_mask_dir, "train2017" + suffix, osp.basename(maskpath))
if is_train
else osp.join(out_mask_dir, "val2017" + suffix, osp.basename(maskpath))
)
if len(np.unique(mask_copy)) == 1 and np.unique(mask_copy)[0] == 255:
return
Image.fromarray(mask_copy).save(seg_filename, "PNG")
mask_array = np.asarray(Image.open(maskpath).convert("RGB"), dtype=np.uint32)
mask_id = rgb2id(mask_array)
mask_one = np.ones_like(mask, dtype=np.uint8)
categories = [seg["category_id"] for seg in segments]
new_segments = []
for seg in segments:
cat_id = seg["category_id"]
id_cls = seg["id"]
if cat_id-1 in clsID_to_trID.keys():
panoptic_copy[panoptic_mask == id_cls] = id_cls
new_segments.append(seg)
for clsID, trID in clsID_to_trID.items():
if clsID > 90 and clsID < 255 and clsID+1 not in categories:
id = np.unique(mask_id[mask == clsID])
panoptic_copy[mask == clsID] = id
area = mask_one[mask == clsID].sum()
if area > 0:
mask_box = np.zeros_like(mask, dtype=np.uint8)
mask_box[mask == clsID] = 1
mask_box = torch.Tensor(mask_box).unsqueeze(0)
bbox = BitMasks(mask_box > 0).get_bounding_boxes()
out_box = [int(idx) for idx in bbox.tensor[0]]
segment_info = {'id': int(id[0]),
'category_id': int(clsID+1),
'iscrowd': 0,
'bbox': out_box,
'area': int(area)}
new_segments.append(segment_info)
if is_train:
train_obj_annos_copy[file_name]["segments_info"] = new_segments
else:
val_obj_annos_copy[file_name]["segments_info"] = new_segments
panoptic_filename = (
osp.join(out_panoptic_mask_dir, "train2017" + suffix, osp.basename(maskpath))
if is_train
else osp.join(out_panoptic_mask_dir, "val2017" + suffix, osp.basename(maskpath))
)
panoptic_copy = id2rgb(panoptic_copy)
Image.fromarray(panoptic_copy).save(panoptic_filename, "PNG")
def parse_args():
parser = argparse.ArgumentParser(
description="Convert COCO annotations to mmsegmentation format"
) # noqa
parser.add_argument("coco_path", help="coco path")
parser.add_argument("-o", "--out_dir", help="output path")
parser.add_argument("--nproc", default=16, type=int, help="number of process")
args = parser.parse_args()
return args
def main():
args = parse_args()
coco_path = args.coco_path
panoptic_train_json_path = osp.join(coco_path, "annotations", "panoptic_train2017.json")
panoptic_val_json_path = osp.join(coco_path, "annotations", "panoptic_val2017.json")
panoptic_train_root = osp.join(coco_path, "panoptic_train2017")
panoptic_val_root = osp.join(coco_path, "panoptic_val2017")
nproc = args.nproc
print(full_clsID_to_trID)
print(base_clsID_to_trID)
print(novel_clsID_to_trID)
out_dir = args.out_dir or coco_path
out_mask_dir = osp.join(out_dir, "stuffthingmaps_panoptic_detectron2")
out_panoptic_mask_dir = osp.join(out_dir, "panoptic_detectron2")
for dir_name in [
"train2017",
"val2017",
"train2017_base",
"train2017_novel",
"val2017_base",
"val2017_novel",
]:
os.makedirs(osp.join(out_mask_dir, dir_name), exist_ok=True)
os.makedirs(osp.join(out_panoptic_mask_dir, dir_name), exist_ok=True)
train_list = glob(osp.join(coco_path, "stuffthingmaps", "train2017", "*.png"))
test_list = glob(osp.join(coco_path, "stuffthingmaps", "val2017", "*.png"))
assert (
len(train_list) + len(test_list)
) == COCO_LEN, "Wrong length of list {} & {}".format(
len(train_list), len(test_list)
)
with open(panoptic_train_json_path) as f:
train_obj = json.load(f)
train_obj_annos = {anno["file_name"]: anno for anno in train_obj["annotations"]}
train_obj_all = copy.deepcopy(train_obj)
with open(panoptic_val_json_path) as f:
val_obj = json.load(f)
val_obj_annos = {anno["file_name"]: anno for anno in val_obj["annotations"]}
val_obj_all = copy.deepcopy(val_obj)
global train_obj_annos_copy
global val_obj_annos_copy
if args.nproc > 1:
train_obj_annos_copy = copy.deepcopy(train_obj_annos)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_train_root,
out_panoptic_mask_dir=out_panoptic_mask_dir,
is_train=True,
),
train_list,
nproc=nproc,
)
suffix = ""
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_train2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
train_obj_all["annotations"] = list(train_obj_annos_copy.values())
json.dump(train_obj_all, f)
val_obj_annos_copy = copy.deepcopy(val_obj_annos)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_val_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=False,
),
test_list,
nproc=nproc,
)
suffix = ""
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_val2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
val_obj_all["annotations"] = list(val_obj_annos_copy.values())
json.dump(val_obj_all, f)
train_obj_annos_copy = copy.deepcopy(train_obj_annos)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_train_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=True,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
train_list,
nproc=nproc,
)
suffix = "_base"
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_train2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
train_obj_all["annotations"] = list(train_obj_annos_copy.values())
json.dump(train_obj_all, f)
val_obj_annos_copy = copy.deepcopy(val_obj_annos)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_val_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=False,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
test_list,
nproc=nproc,
)
suffix = "_base"
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_val2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
val_obj_all["annotations"] = list(val_obj_annos_copy.values())
json.dump(val_obj_all, f)
train_obj_annos_copy = copy.deepcopy(train_obj_annos)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_train_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=True,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
train_list,
nproc=nproc,
)
suffix = "_novel"
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_train2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
train_obj_all["annotations"] = list(train_obj_annos_copy.values())
json.dump(train_obj_all, f)
val_obj_annos_copy = copy.deepcopy(val_obj_annos)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_val_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=False,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
test_list,
nproc=nproc,
)
suffix = "_novel"
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_val2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
val_obj_all["annotations"] = list(val_obj_annos_copy.values())
json.dump(val_obj_all, f)
else:
train_obj_annos_copy = copy.deepcopy(train_obj_annos)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_train_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=True),
train_list,
)
suffix = ""
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_train2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
train_obj_all["annotations"] = list(train_obj_annos_copy.values())
json.dump(train_obj_all, f)
val_obj_annos_copy = copy.deepcopy(val_obj_annos)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_val_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=False),
test_list,
)
suffix = ""
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_val2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
val_obj_all["annotations"] = list(val_obj_annos_copy.values())
json.dump(val_obj_all, f)
train_obj_annos_copy = copy.deepcopy(train_obj_annos)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_train_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=True,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
train_list,
)
suffix = "_base"
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_train2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
train_obj_all["annotations"] = list(train_obj_annos_copy.values())
json.dump(train_obj_all, f)
val_obj_annos_copy = copy.deepcopy(val_obj_annos)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_val_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=False,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
test_list,
)
suffix = "_base"
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_val2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
val_obj_all["annotations"] = list(val_obj_annos_copy.values())
json.dump(val_obj_all, f)
train_obj_annos_copy = copy.deepcopy(train_obj_annos)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_train_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=True,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
train_list,
)
suffix = "_novel"
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_train2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
train_obj_all["annotations"] = list(train_obj_annos_copy.values())
json.dump(train_obj_all, f)
val_obj_annos_copy = copy.deepcopy(val_obj_annos)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
panoptic_root=panoptic_val_root,
out_panoptic_mask_dir = out_panoptic_mask_dir,
is_train=False,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
test_list,
)
suffix = "_novel"
panoptic_json_filename = osp.join(out_panoptic_mask_dir, "panoptic_val2017" + suffix + ".json")
with open(panoptic_json_filename, "w") as f:
val_obj_all["annotations"] = list(val_obj_annos_copy.values())
json.dump(val_obj_all, f)
print("Done!")
if __name__ == "__main__":
main()
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/datasets/prepare_ade20k_pan_seg_novel.py | datasets/prepare_ade20k_pan_seg_novel.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import json
import os
from collections import Counter
import numpy as np
import tqdm
from panopticapi.utils import IdGenerator, save_json
from PIL import Image
ADE20K_SEM_SEG_CATEGORIES = [
"wall",
"building",
"sky",
"floor",
"tree",
"ceiling",
"road, route",
"bed",
"window ",
"grass",
"cabinet",
"sidewalk, pavement",
"person",
"earth, ground",
"door",
"table",
"mountain, mount",
"plant",
"curtain",
"chair",
"car",
"water",
"painting, picture",
"sofa",
"shelf",
"house",
"sea",
"mirror",
"rug",
"field",
"armchair",
"seat",
"fence",
"desk",
"rock, stone",
"wardrobe, closet, press",
"lamp",
"tub",
"rail",
"cushion",
"base, pedestal, stand",
"box",
"column, pillar",
"signboard, sign",
"chest of drawers, chest, bureau, dresser",
"counter",
"sand",
"sink",
"skyscraper",
"fireplace",
"refrigerator, icebox",
"grandstand, covered stand",
"path",
"stairs",
"runway",
"case, display case, showcase, vitrine",
"pool table, billiard table, snooker table",
"pillow",
"screen door, screen",
"stairway, staircase",
"river",
"bridge, span",
"bookcase",
"blind, screen",
"coffee table",
"toilet, can, commode, crapper, pot, potty, stool, throne",
"flower",
"book",
"hill",
"bench",
"countertop",
"stove",
"palm, palm tree",
"kitchen island",
"computer",
"swivel chair",
"boat",
"bar",
"arcade machine",
"hovel, hut, hutch, shack, shanty",
"bus",
"towel",
"light",
"truck",
"tower",
"chandelier",
"awning, sunshade, sunblind",
"street lamp",
"booth",
"tv",
"plane",
"dirt track",
"clothes",
"pole",
"land, ground, soil",
"bannister, banister, balustrade, balusters, handrail",
"escalator, moving staircase, moving stairway",
"ottoman, pouf, pouffe, puff, hassock",
"bottle",
"buffet, counter, sideboard",
"poster, posting, placard, notice, bill, card",
"stage",
"van",
"ship",
"fountain",
"conveyer belt, conveyor belt, conveyer, conveyor, transporter",
"canopy",
"washer, automatic washer, washing machine",
"plaything, toy",
"pool",
"stool",
"barrel, cask",
"basket, handbasket",
"falls",
"tent",
"bag",
"minibike, motorbike",
"cradle",
"oven",
"ball",
"food, solid food",
"step, stair",
"tank, storage tank",
"trade name",
"microwave",
"pot",
"animal",
"bicycle",
"lake",
"dishwasher",
"screen",
"blanket, cover",
"sculpture",
"hood, exhaust hood",
"sconce",
"vase",
"traffic light",
"tray",
"trash can",
"fan",
"pier",
"crt screen",
"plate",
"monitor",
"bulletin board",
"shower",
"radiator",
"glass, drinking glass",
"clock",
"flag", # noqa
]
PALETTE = [
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 200],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
]
ALL_ID = [i for i in range(150)]
NOVEL_ID = [9, 15, 30, 37, 49, 60, 74, 81, 89, 99, 112, 128, 136, 143, 149]
BASE_ID = [i for i in ALL_ID if i not in NOVEL_ID]
if __name__ == "__main__":
dataset_dir = os.getenv("DETECTRON2_DATASETS", "datasets")
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(dataset_dir, f"ADEChallengeData2016/images/{dirname}/")
semantic_dir = os.path.join(dataset_dir, f"ADEChallengeData2016/annotations/{dirname}/")
instance_dir = os.path.join(
dataset_dir, f"ADEChallengeData2016/annotations_instance/{dirname}/"
)
# folder to store panoptic PNGs
out_folder = os.path.join(dataset_dir, f"ADEChallengeData2016/ade20k_panoptic_{name}/")
# json with segmentations information
out_file = os.path.join(dataset_dir, f"ADEChallengeData2016/ade20k_panoptic_{name}.json")
if not os.path.isdir(out_folder):
print("Creating folder {} for panoptic segmentation PNGs".format(out_folder))
os.mkdir(out_folder)
# json config
config_file = "datasets/ade20k_instance_imgCatIds.json"
with open(config_file) as f:
config = json.load(f)
# load catid mapping
mapping_file = "datasets/ade20k_instance_catid_mapping.txt"
with open(mapping_file) as f:
map_id = {}
for i, line in enumerate(f.readlines()):
if i == 0:
continue
ins_id, sem_id, _ = line.strip().split()
# shift id by 1 because we want it to start from 0!
# ignore_label becomes 255
map_id[int(ins_id) - 1] = int(sem_id) - 1
base_map_id = {k: v for (k, v) in map_id.items() if v in BASE_ID}
novel_map_id = {k: v for (k, v) in map_id.items() if v in NOVEL_ID}
ADE20K_150_CATEGORIES = []
for cat_id, cat_name in enumerate(ADE20K_SEM_SEG_CATEGORIES):
ADE20K_150_CATEGORIES.append(
{
"name": cat_name,
"id": cat_id,
"isthing": int(cat_id in map_id.values()),
"color": PALETTE[cat_id],
}
)
categories_dict = {cat["id"]: cat for cat in ADE20K_150_CATEGORIES}
base_category_dict = {cat["id"]: cat for cat in ADE20K_150_CATEGORIES if cat["id"] in BASE_ID}
novel_category_dict = {cat["id"]: cat for cat in ADE20K_150_CATEGORIES if cat["id"] in NOVEL_ID}
panoptic_json_categories = ADE20K_150_CATEGORIES[:]
base_panoptic_json_categories = [cat for cat in ADE20K_150_CATEGORIES[:] if cat["id"] in BASE_ID]
novel_panoptic_json_categories = [cat for cat in ADE20K_150_CATEGORIES[:] if cat["id"] in NOVEL_ID]
for sub_name in ["base", "novel"]:
new_name = name + "_" + sub_name
# folder to store panoptic PNGs
out_folder = os.path.join(dataset_dir, f"ADEChallengeData2016/ade20k_panoptic_{new_name}/")
# json with segmentations information
out_file = os.path.join(dataset_dir, f"ADEChallengeData2016/ade20k_panoptic_{new_name}.json")
if not os.path.isdir(out_folder):
print("Creating folder {} for panoptic segmentation PNGs".format(out_folder))
os.mkdir(out_folder)
panoptic_json_images = []
panoptic_json_annotations = []
# cat_dict
if sub_name == "base":
new_categories_dict = base_category_dict
new_map_id = base_map_id
elif sub_name == "novel":
new_categories_dict = novel_category_dict
new_map_id = novel_map_id
filenames = sorted(glob.glob(os.path.join(image_dir, "*.jpg")))
for idx, filename in enumerate(tqdm.tqdm(filenames)):
panoptic_json_image = {}
panoptic_json_annotation = {}
image_id = os.path.basename(filename).split(".")[0]
panoptic_json_image["id"] = image_id
panoptic_json_image["file_name"] = os.path.basename(filename)
original_format = np.array(Image.open(filename))
panoptic_json_image["width"] = original_format.shape[1]
panoptic_json_image["height"] = original_format.shape[0]
pan_seg = np.zeros(
(original_format.shape[0], original_format.shape[1], 3), dtype=np.uint8
)
id_generator = IdGenerator(new_categories_dict)
filename_semantic = os.path.join(semantic_dir, image_id + ".png")
filename_instance = os.path.join(instance_dir, image_id + ".png")
sem_seg = np.asarray(Image.open(filename_semantic))
ins_seg = np.asarray(Image.open(filename_instance))
assert sem_seg.dtype == np.uint8
assert ins_seg.dtype == np.uint8
semantic_cat_ids = sem_seg - 1
instance_cat_ids = ins_seg[..., 0] - 1
# instance id starts from 1!
# because 0 is reserved as VOID label
instance_ins_ids = ins_seg[..., 1]
segm_info = []
# NOTE: there is some overlap between semantic and instance annotation
# thus we paste stuffs first
# process stuffs
for semantic_cat_id in np.unique(semantic_cat_ids):
if semantic_cat_id == 255:
continue
if semantic_cat_id not in new_categories_dict.keys():
continue
if new_categories_dict[semantic_cat_id]["isthing"]:
continue
mask = semantic_cat_ids == semantic_cat_id
# should not have any overlap
assert pan_seg[mask].sum() == 0
segment_id, color = id_generator.get_id_and_color(semantic_cat_id)
pan_seg[mask] = color
area = np.sum(mask) # segment area computation
# bbox computation for a segment
hor = np.sum(mask, axis=0)
hor_idx = np.nonzero(hor)[0]
x = hor_idx[0]
width = hor_idx[-1] - x + 1
vert = np.sum(mask, axis=1)
vert_idx = np.nonzero(vert)[0]
y = vert_idx[0]
height = vert_idx[-1] - y + 1
bbox = [int(x), int(y), int(width), int(height)]
segm_info.append(
{
"id": int(segment_id),
"category_id": int(semantic_cat_id),
"area": int(area),
"bbox": bbox,
"iscrowd": 0,
}
)
# process things
for thing_id in np.unique(instance_ins_ids):
if thing_id == 0:
continue
mask = instance_ins_ids == thing_id
instance_cat_id = np.unique(instance_cat_ids[mask])
assert len(instance_cat_id) == 1
if instance_cat_id[0] not in new_map_id.keys():
continue
semantic_cat_id = new_map_id[instance_cat_id[0]]
segment_id, color = id_generator.get_id_and_color(semantic_cat_id)
pan_seg[mask] = color
area = np.sum(mask) # segment area computation
# bbox computation for a segment
hor = np.sum(mask, axis=0)
hor_idx = np.nonzero(hor)[0]
x = hor_idx[0]
width = hor_idx[-1] - x + 1
vert = np.sum(mask, axis=1)
vert_idx = np.nonzero(vert)[0]
y = vert_idx[0]
height = vert_idx[-1] - y + 1
bbox = [int(x), int(y), int(width), int(height)]
segm_info.append(
{
"id": int(segment_id),
"category_id": int(semantic_cat_id),
"area": int(area),
"bbox": bbox,
"iscrowd": 0,
}
)
panoptic_json_annotation = {
"image_id": image_id,
"file_name": image_id + ".png",
"segments_info": segm_info,
}
Image.fromarray(pan_seg).save(os.path.join(out_folder, image_id + ".png"))
panoptic_json_images.append(panoptic_json_image)
panoptic_json_annotations.append(panoptic_json_annotation)
# save this
d = {
"images": panoptic_json_images,
"annotations": panoptic_json_annotations,
"categories": panoptic_json_categories,
}
save_json(d, out_file)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/datasets/prepare_ade20k_sem_seg_novel.py | datasets/prepare_ade20k_sem_seg_novel.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from pathlib import Path
import numpy as np
import tqdm
from PIL import Image
ALL_ID = [i for i in range(150)]
NOVEL_ID = [9, 15, 30, 37, 49, 60, 74, 81, 89, 99, 112, 128, 136, 143, 149]
BASE_ID = [i for i in ALL_ID if i not in NOVEL_ID]
def convert(input, output, index=None):
img = np.asarray(Image.open(input))
assert img.dtype == np.uint8
img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1
if index is not None:
mapping = {i: k for k, i in enumerate(index)}
img = np.vectorize(lambda x: mapping[x] if x in mapping else 255)(
img.astype(np.float)
).astype(np.uint8)
Image.fromarray(img).save(output)
if __name__ == "__main__":
dataset_dir = (
Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016"
)
for name in ["training", "validation"]:
annotation_dir = dataset_dir / "annotations" / name
output_dir = dataset_dir / "annotations_detectron2" / name
output_dir.mkdir(parents=True, exist_ok=True)
for file in tqdm.tqdm(list(annotation_dir.iterdir())):
output_file = output_dir / file.name
convert(file, output_file)
base_name = name + "_base"
output_dir = dataset_dir / "annotations_detectron2" / base_name
output_dir.mkdir(parents=True, exist_ok=True)
for file in tqdm.tqdm(list(annotation_dir.iterdir())):
output_file = output_dir / file.name
convert(file, output_file, BASE_ID)
novel_name = name + "_novel"
output_dir = dataset_dir / "annotations_detectron2" / novel_name
output_dir.mkdir(parents=True, exist_ok=True)
for file in tqdm.tqdm(list(annotation_dir.iterdir())):
output_file = output_dir / file.name
convert(file, output_file, NOVEL_ID)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/datasets/prepare_voc_sem_seg.py | datasets/prepare_voc_sem_seg.py | import argparse
import os
import os.path as osp
import shutil
from functools import partial
from glob import glob
import mmcv
import numpy as np
from PIL import Image
full_clsID_to_trID = {
0: 255,
1: 0,
2: 1,
3: 2,
4: 3,
5: 4,
6: 5,
7: 6,
8: 7,
9: 8,
10: 9,
11: 10,
12: 11,
13: 12,
14: 13,
15: 14,
16: 15,
17: 16,
18: 17,
19: 18,
20: 19,
255: 255,
}
novel_clsID = [16, 17, 18, 19, 20]
base_clsID = [k for k in full_clsID_to_trID.keys() if k not in novel_clsID + [0, 255]]
novel_clsID_to_trID = {k: i for i, k in enumerate(novel_clsID)}
base_clsID_to_trID = {k: i for i, k in enumerate(base_clsID)}
def convert_to_trainID(
maskpath, out_mask_dir, is_train, clsID_to_trID=full_clsID_to_trID, suffix=""
):
mask = np.array(Image.open(maskpath))
mask_copy = np.ones_like(mask, dtype=np.uint8) * 255
for clsID, trID in clsID_to_trID.items():
mask_copy[mask == clsID] = trID
seg_filename = (
osp.join(out_mask_dir, "train" + suffix, osp.basename(maskpath))
if is_train
else osp.join(out_mask_dir, "val" + suffix, osp.basename(maskpath))
)
if len(np.unique(mask_copy)) == 1 and np.unique(mask_copy)[0] == 255:
return
Image.fromarray(mask_copy).save(seg_filename, "PNG")
def parse_args():
parser = argparse.ArgumentParser(
description="Convert VOC2021 annotations to mmsegmentation format"
) # noqa
parser.add_argument("voc_path", help="voc path")
parser.add_argument("-o", "--out_dir", help="output path")
parser.add_argument("--nproc", default=16, type=int, help="number of process")
args = parser.parse_args()
return args
def main():
args = parse_args()
voc_path = args.voc_path
nproc = args.nproc
print(full_clsID_to_trID)
print(base_clsID_to_trID)
print(novel_clsID_to_trID)
out_dir = args.out_dir or voc_path
# out_img_dir = osp.join(out_dir, 'images')
out_mask_dir = osp.join(out_dir, "annotations_detectron2")
out_image_dir = osp.join(out_dir, "images_detectron2")
for dir_name in [
"train",
"val",
"train_base",
"train_novel",
"val_base",
"val_novel",
]:
os.makedirs(osp.join(out_mask_dir, dir_name), exist_ok=True)
if dir_name in ["train", "val"]:
os.makedirs(osp.join(out_image_dir, dir_name), exist_ok=True)
train_list = [
osp.join(voc_path, "SegmentationClassAug", f + ".png")
for f in np.loadtxt(osp.join(voc_path, "train.txt"), dtype=np.str).tolist()
]
test_list = [
osp.join(voc_path, "SegmentationClassAug", f + ".png")
for f in np.loadtxt(osp.join(voc_path, "val.txt"), dtype=np.str).tolist()
]
if args.nproc > 1:
mmcv.track_parallel_progress(
partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True),
train_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False),
test_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=True,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
train_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=True,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
train_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=True,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
train_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=False,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
test_list,
nproc=nproc,
)
else:
mmcv.track_progress(
partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True),
train_list,
)
mmcv.track_progress(
partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False),
test_list,
)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=True,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
train_list,
)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=False,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
test_list,
)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=True,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
train_list,
)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=False,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
test_list,
)
print("Done!")
if __name__ == "__main__":
main()
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/datasets/prepare_ade20k_ins_seg_novel.py | datasets/prepare_ade20k_ins_seg_novel.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import json
import os
from collections import Counter
import numpy as np
import tqdm
from panopticapi.utils import IdGenerator, save_json
from PIL import Image
import pycocotools.mask as mask_util
ALL_ID = [i for i in range(150)]
NOVEL_ID = [9, 15, 30, 37, 49, 60, 74, 81, 89, 99, 112, 128, 136, 143, 149]
BASE_ID = [i for i in ALL_ID if i not in NOVEL_ID]
if __name__ == "__main__":
dataset_dir = os.getenv("DETECTRON2_DATASETS", "datasets")
for name, dirname in [("train", "training"), ("val", "validation")]:
image_dir = os.path.join(dataset_dir, f"ADEChallengeData2016/images/{dirname}/")
instance_dir = os.path.join(
dataset_dir, f"ADEChallengeData2016/annotations_instance/{dirname}/"
)
# img_id = 0
ann_id = 1
# json
out_file = os.path.join(dataset_dir, f"ADEChallengeData2016/ade20k_instance_{name}.json")
# json config
instance_config_file = "datasets/ade20k_instance_imgCatIds.json"
with open(instance_config_file) as f:
category_dict = json.load(f)["categories"]
# load catid mapping
# it is important to share category id for both instance and panoptic annotations
mapping_file = "datasets/ade20k_instance_catid_mapping.txt"
with open(mapping_file) as f:
map_id = {}
for i, line in enumerate(f.readlines()):
if i == 0:
continue
ins_id, sem_id, _ = line.strip().split()
# shift id by 1 because we want it to start from 0!
# ignore_label becomes 255
map_id[int(ins_id)] = int(sem_id) - 1
for cat in category_dict:
cat["id"] = map_id[cat["id"]]
base_category_dict = [cat for cat in category_dict if cat["id"] in BASE_ID]
novel_category_dict = [cat for cat in category_dict if cat["id"] in NOVEL_ID]
base_map_id = {k: v for (k, v) in map_id.items() if v in BASE_ID}
novel_map_id = {k: v for (k, v) in map_id.items() if v in NOVEL_ID}
filenames = sorted(glob.glob(os.path.join(image_dir, "*.jpg")))
for sub_name in ["base", "novel"]:
# json
new_name = name + "_" + sub_name
out_file = os.path.join(dataset_dir, f"ADEChallengeData2016/ade20k_instance_{new_name}.json")
# cat_dict
if sub_name == "base":
new_category_dict = base_category_dict
new_map_id = base_map_id
elif sub_name == "novel":
new_category_dict = novel_category_dict
new_map_id = novel_map_id
ann_dict = {}
images = []
annotations = []
for idx, filename in enumerate(tqdm.tqdm(filenames)):
image = {}
image_id = os.path.basename(filename).split(".")[0]
image["id"] = image_id
image["file_name"] = os.path.basename(filename)
original_format = np.array(Image.open(filename))
image["width"] = original_format.shape[1]
image["height"] = original_format.shape[0]
images.append(image)
filename_instance = os.path.join(instance_dir, image_id + ".png")
ins_seg = np.asarray(Image.open(filename_instance))
assert ins_seg.dtype == np.uint8
instance_cat_ids = ins_seg[..., 0]
# instance id starts from 1!
# because 0 is reserved as VOID label
instance_ins_ids = ins_seg[..., 1]
# process things
for thing_id in np.unique(instance_ins_ids):
if thing_id == 0:
continue
mask = instance_ins_ids == thing_id
instance_cat_id = np.unique(instance_cat_ids[mask])
assert len(instance_cat_id) == 1
if instance_cat_id[0] not in new_map_id.keys():
continue
anno = {}
anno['id'] = ann_id
ann_id += 1
anno['image_id'] = image['id']
anno["iscrowd"] = int(0)
anno["category_id"] = int(new_map_id[instance_cat_id[0]])
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = [int(xmin), int(ymin), int(xmax - xmin + 1), int(ymax - ymin + 1)]
# if xmax <= xmin or ymax <= ymin:
# continue
rle = mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
rle["counts"] = rle["counts"].decode("utf-8")
anno["segmentation"] = rle
anno["area"] = int(mask_util.area(rle))
annotations.append(anno)
# save this
ann_dict['images'] = images
ann_dict['categories'] = new_category_dict
ann_dict['annotations'] = annotations
save_json(ann_dict, out_file)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/datasets/prepare_coco_stuff_164k_sem_seg.py | datasets/prepare_coco_stuff_164k_sem_seg.py | import argparse
import os
import os.path as osp
import shutil
from functools import partial
from glob import glob
import mmcv
import numpy as np
from PIL import Image
COCO_LEN = 123287
full_clsID_to_trID = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
12: 11,
13: 12,
14: 13,
15: 14,
16: 15,
17: 16,
18: 17,
19: 18,
20: 19,
21: 20,
22: 21,
23: 22,
24: 23,
26: 24,
27: 25,
30: 26,
31: 27,
32: 28,
33: 29,
34: 30,
35: 31,
36: 32,
37: 33,
38: 34,
39: 35,
40: 36,
41: 37,
42: 38,
43: 39,
45: 40,
46: 41,
47: 42,
48: 43,
49: 44,
50: 45,
51: 46,
52: 47,
53: 48,
54: 49,
55: 50,
56: 51,
57: 52,
58: 53,
59: 54,
60: 55,
61: 56,
62: 57,
63: 58,
64: 59,
66: 60,
69: 61,
71: 62,
72: 63,
73: 64,
74: 65,
75: 66,
76: 67,
77: 68,
78: 69,
79: 70,
80: 71,
81: 72,
83: 73,
84: 74,
85: 75,
86: 76,
87: 77,
88: 78,
89: 79,
91: 80,
92: 81,
93: 82,
94: 83,
95: 84,
96: 85,
97: 86,
98: 87,
99: 88,
100: 89,
101: 90,
102: 91,
103: 92,
104: 93,
105: 94,
106: 95,
107: 96,
108: 97,
109: 98,
110: 99,
111: 100,
112: 101,
113: 102,
114: 103,
115: 104,
116: 105,
117: 106,
118: 107,
119: 108,
120: 109,
121: 110,
122: 111,
123: 112,
124: 113,
125: 114,
126: 115,
127: 116,
128: 117,
129: 118,
130: 119,
131: 120,
132: 121,
133: 122,
134: 123,
135: 124,
136: 125,
137: 126,
138: 127,
139: 128,
140: 129,
141: 130,
142: 131,
143: 132,
144: 133,
145: 134,
146: 135,
147: 136,
148: 137,
149: 138,
150: 139,
151: 140,
152: 141,
153: 142,
154: 143,
155: 144,
156: 145,
157: 146,
158: 147,
159: 148,
160: 149,
161: 150,
162: 151,
163: 152,
164: 153,
165: 154,
166: 155,
167: 156,
168: 157,
169: 158,
170: 159,
171: 160,
172: 161,
173: 162,
174: 163,
175: 164,
176: 165,
177: 166,
178: 167,
179: 168,
180: 169,
181: 170,
255: 255,
}
novel_clsID = [20, 24, 32, 33, 40, 56, 86, 99, 105, 123, 144, 147, 148, 168, 171]
base_clsID = [k for k in full_clsID_to_trID.keys() if k not in novel_clsID + [255]]
novel_clsID_to_trID = {k: i for i, k in enumerate(novel_clsID)}
base_clsID_to_trID = {k: i for i, k in enumerate(base_clsID)}
def convert_to_trainID(
maskpath, out_mask_dir, is_train, clsID_to_trID=full_clsID_to_trID, suffix=""
):
mask = np.array(Image.open(maskpath))
mask_copy = np.ones_like(mask, dtype=np.uint8) * 255
for clsID, trID in clsID_to_trID.items():
mask_copy[mask == clsID] = trID
seg_filename = (
osp.join(out_mask_dir, "train2017" + suffix, osp.basename(maskpath))
if is_train
else osp.join(out_mask_dir, "val2017" + suffix, osp.basename(maskpath))
)
if len(np.unique(mask_copy)) == 1 and np.unique(mask_copy)[0] == 255:
return
Image.fromarray(mask_copy).save(seg_filename, "PNG")
def parse_args():
parser = argparse.ArgumentParser(
description="Convert COCO Stuff 164k annotations to mmsegmentation format"
) # noqa
parser.add_argument("coco_path", help="coco stuff path")
parser.add_argument("-o", "--out_dir", help="output path")
parser.add_argument("--nproc", default=16, type=int, help="number of process")
args = parser.parse_args()
return args
def main():
args = parse_args()
coco_path = args.coco_path
nproc = args.nproc
print(full_clsID_to_trID)
print(base_clsID_to_trID)
print(novel_clsID_to_trID)
out_dir = args.out_dir or coco_path
out_mask_dir = osp.join(out_dir, "stuffthingmaps_detectron2")
for dir_name in [
"train2017",
"val2017",
"train2017_base",
"train2017_novel",
"val2017_base",
"val2017_novel",
]:
os.makedirs(osp.join(out_mask_dir, dir_name), exist_ok=True)
train_list = glob(osp.join(coco_path, "stuffthingmaps", "train2017", "*.png"))
test_list = glob(osp.join(coco_path, "stuffthingmaps", "val2017", "*.png"))
assert (
len(train_list) + len(test_list)
) == COCO_LEN, "Wrong length of list {} & {}".format(
len(train_list), len(test_list)
)
if args.nproc > 1:
mmcv.track_parallel_progress(
partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True),
train_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False),
test_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=True,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
train_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=False,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
test_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=True,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
train_list,
nproc=nproc,
)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=False,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
test_list,
nproc=nproc,
)
else:
mmcv.track_progress(
partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True),
train_list,
)
mmcv.track_progress(
partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False),
test_list,
)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=True,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
train_list,
)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=False,
clsID_to_trID=base_clsID_to_trID,
suffix="_base",
),
test_list,
)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=True,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
train_list,
)
mmcv.track_progress(
partial(
convert_to_trainID,
out_mask_dir=out_mask_dir,
is_train=False,
clsID_to_trID=novel_clsID_to_trID,
suffix="_novel",
),
test_list,
)
print("Done!")
if __name__ == "__main__":
main()
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/open_vocabulary_model.py | mask2former/open_vocabulary_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
from cgitb import text
import logging
import copy
import random
import os
from typing import Tuple
from PIL import Image
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from detectron2.utils.memory import retry_if_cuda_oom
from detectron2.utils.logger import log_first_n
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data.transforms import ResizeTransform
from .modeling.clip_adapter import (
ClipAdapter,
MaskFormerClipAdapter,
build_prompt_learner,
)
from .mask_former_model import MaskFormer
@META_ARCH_REGISTRY.register()
class OpenVocabulary(MaskFormer):
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
clip_adapter: nn.Module,
region_clip_adapter: nn.Module = None,
task_names: list,
criterion: nn.Module,
num_queries: int,
semantic_on: bool,
instance_on: bool,
panoptic_on: bool,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
clip_ensemble: bool,
clip_ensemble_weight: float,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
test_topk_per_image: int,
cfg,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
clip_adapter: adapter for clip-based mask classification
num_queries: int, number of queries
panoptic_on: bool, whether to output panoptic segmentation prediction
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
"""
super().__init__(
backbone=backbone,
sem_seg_head=sem_seg_head,
criterion=criterion,
num_queries=num_queries,
semantic_on=semantic_on,
instance_on=instance_on,
panoptic_on=panoptic_on,
object_mask_threshold=object_mask_threshold,
overlap_threshold=overlap_threshold,
metadata=metadata,
size_divisibility=size_divisibility,
sem_seg_postprocess_before_inference=sem_seg_postprocess_before_inference,
pixel_mean=pixel_mean,
pixel_std=pixel_std,
)
self.clip_adapter: ClipAdapter = clip_adapter
self._region_clip_adapter = region_clip_adapter
self.task_names = task_names
self.clip_ensemble: bool = clip_ensemble
self.clip_ensemble_weight: float = clip_ensemble_weight
self.test_topk_per_image = test_topk_per_image
@classmethod
def from_config(cls, cfg):
init_kwargs = MaskFormer.from_config(cfg)
prompt_learner = build_prompt_learner(cfg.MODEL.CLIP_ADAPTER, cfg.INPUT.TASK_NAME)
region_clip_adapter = None
if cfg.MODEL.CLIP_ADAPTER.SEPERATE_ADAPTER:
log_first_n(
logging.WARNING,
"Using different head for region classification and query classification",
)
cls_prompt_learner = build_prompt_learner(
cfg.MODEL.CLIP_ADAPTER, cfg.INPUT.TASK_NAME
)
region_clip_adapter = MaskFormerClipAdapter(
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.CLIP_MODEL_NAME,
cls_prompt_learner,
mask_fill=cfg.MODEL.CLIP_ADAPTER.MASK_FILL,
mask_expand_ratio=cfg.MODEL.CLIP_ADAPTER.MASK_EXPAND_RATIO,
mask_thr=0.4,
mask_matting=cfg.MODEL.CLIP_ADAPTER.MASK_MATTING,
region_resized=cfg.MODEL.CLIP_ADAPTER.REGION_RESIZED,
)
clip_adapter = MaskFormerClipAdapter(
cfg.MODEL.CLIP_ADAPTER.CLIP_MODEL_NAME,
prompt_learner,
mask_fill=cfg.MODEL.CLIP_ADAPTER.MASK_FILL,
mask_expand_ratio=cfg.MODEL.CLIP_ADAPTER.MASK_EXPAND_RATIO,
mask_thr=cfg.MODEL.CLIP_ADAPTER.MASK_THR,
mask_matting=cfg.MODEL.CLIP_ADAPTER.MASK_MATTING,
region_resized=cfg.MODEL.CLIP_ADAPTER.REGION_RESIZED,
)
init_kwargs["clip_adapter"] = clip_adapter
init_kwargs["region_clip_adapter"] = region_clip_adapter
init_kwargs["task_names"] = cfg.INPUT.TASK_NAME
init_kwargs["clip_ensemble"] = cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE
init_kwargs[
"clip_ensemble_weight"
] = cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE_WEIGHT
init_kwargs["test_topk_per_image"] = cfg.TEST.DETECTIONS_PER_IMAGE
init_kwargs["metadata"] = MetadataCatalog.get(cfg.DATASETS.TEST[0])
init_kwargs["semantic_on"] = "semantic segmentation." in cfg.INPUT.TASK_NAME
init_kwargs["instance_on"] = "instance segmentation." in cfg.INPUT.TASK_NAME
init_kwargs["panoptic_on"] = "panoptic segmentation." in cfg.INPUT.TASK_NAME
init_kwargs["cfg"] = cfg
return init_kwargs
def forward(self, batched_inputs, text_labels=None):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
if text_labels == None:
dataset_name = [x["meta"]["dataset_name"] for x in batched_inputs]
assert len(set(dataset_name)) == 1
dataset_name = dataset_name[0]
else:
dataset_name = " "
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
features = self.backbone(images.tensor)
if text_labels == None:
class_names = self.get_class_name_list(dataset_name)
else:
class_names = text_labels
if self.training:
task_name = random.choice(self.task_names)
text_features = self.clip_adapter.get_text_features(class_names, task_name)
outputs, fused_text_features = self.sem_seg_head(features, text_features)
outputs["pred_logits"] = self.clip_adapter.get_sim_logits(
text_features, self.clip_adapter.normalize_feature(outputs["pred_logits"])
)
if "aux_outputs" in outputs.keys():
for i in range(len(outputs["aux_outputs"])):
outputs["aux_outputs"][i][
"pred_logits"
] = self.clip_adapter.get_sim_logits(
text_features,
self.clip_adapter.normalize_feature(
outputs["aux_outputs"][i]["pred_logits"]
),
)
# mask classification target
if task_name == "semantic segmentation.":
gt_instances = [x["sem_instances"].to(self.device) for x in batched_inputs]
elif task_name == "instance segmentation.":
gt_instances = [x["ins_instances"].to(self.device) for x in batched_inputs]
elif task_name == "panoptic segmentation.":
gt_instances = [x["pan_instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances, images)
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
return losses
else:
task_name = "semantic segmentation."
text_features = self.clip_adapter.get_text_features(class_names, task_name)
outputs, fused_text_features = self.sem_seg_head(features, text_features)
outputs["pred_logits"] = self.clip_adapter.get_sim_logits(
text_features, self.clip_adapter.normalize_feature(outputs["pred_logits"])
)
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
mask_pred_results = F.interpolate(
mask_pred_results,
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=True,
)
processed_results = []
for mask_cls_result, mask_pred_result, input_per_image, image_size in zip(
mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes
):
height = image_size[0]
width = image_size[1]
mask_pred_result = sem_seg_postprocess(
mask_pred_result, image_size, height, width
)
image = input_per_image["image"].to(self.device)
# semantic segmentation inference
r = self.semantic_inference(
mask_cls_result, mask_pred_result, image, class_names, task_name, dataset_name
)
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = sem_seg_postprocess(r, image_size, height, width)
processed_results.append({"sem_seg": r})
# instance segmentation inference
if self.instance_on:
task_name = "instance segmentation."
text_features = self.clip_adapter.get_text_features(class_names, task_name)
outputs, fused_text_features = self.sem_seg_head(features, text_features)
outputs["pred_logits"] = self.clip_adapter.get_sim_logits(
text_features, self.clip_adapter.normalize_feature(outputs["pred_logits"])
)
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
mask_pred_results = F.interpolate(
mask_pred_results,
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=True,
)
for i, (mask_cls_result, mask_pred_result, input_per_image, image_size) in enumerate(zip(
mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes
)):
height = image_size[0]
width = image_size[1]
mask_pred_result = sem_seg_postprocess(
mask_pred_result, image_size, height, width
)
image = input_per_image["image"].to(self.device)
instance_r = self.instance_inference(
mask_cls_result, mask_pred_result, image, class_names, task_name, dataset_name
)
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
# process results
if instance_r.pred_masks.shape[0] > 0:
cur_device = instance_r.pred_masks.device
instance_mask = instance_r.pred_masks.permute(1, 2, 0).cpu().numpy().astype(np.uint8)
ori_h, ori_w, num_mask = instance_mask.shape[0], instance_mask.shape[1], instance_mask.shape[2]
transform = ResizeTransform(ori_h, ori_w, height, width)
if num_mask > 3:
instance_mask_list = [transform.apply_segmentation(instance_mask[:, :, p1-3:p1]) for p1 in range(3, num_mask+1, 3)]
if np.mod(num_mask, 3) > 0:
mask_last = transform.apply_segmentation(instance_mask[:, :, -np.mod(num_mask, 3):])
instance_mask_list.append(mask_last)
instance_mask = np.concatenate(instance_mask_list, axis=2)
else:
instance_mask = transform.apply_segmentation(instance_mask)
instance_mask = torch.tensor(instance_mask).permute(2, 0, 1).to(cur_device)
instance_r.pred_masks = instance_mask
if not instance_r.pred_boxes is None:
instance_boxes = instance_r.pred_boxes.tensor
x1_coords, x2_coords = instance_boxes[:, :2], instance_boxes[:, 2:]
x1_coords = transform.apply_coords(x1_coords)
x2_coords = transform.apply_coords(x2_coords)
instance_boxes = torch.cat((x1_coords, x2_coords), dim=1)
instance_r.pred_boxes = Boxes(instance_boxes)
processed_results[i]["instances"] = instance_r
# panoptic segmentation inference
if self.panoptic_on:
task_name = "panoptic segmentation."
text_features = self.clip_adapter.get_text_features(class_names, task_name)
outputs, fused_text_features = self.sem_seg_head(features, text_features)
outputs["pred_logits"] = self.clip_adapter.get_sim_logits(
text_features, self.clip_adapter.normalize_feature(outputs["pred_logits"])
)
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
mask_pred_results = F.interpolate(
mask_pred_results,
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=True,
)
for i, (mask_cls_result, mask_pred_result, input_per_image, image_size) in enumerate(zip(
mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes
)):
height = image_size[0]
width = image_size[1]
mask_pred_result = sem_seg_postprocess(
mask_pred_result, image_size, height, width
)
image = input_per_image["image"].to(self.device)
panoptic_r = self.panoptic_inference(
mask_cls_result, mask_pred_result, image, class_names, task_name, dataset_name
)
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
# process results
cur_device = panoptic_r[0].device
panoptic_mask = panoptic_r[0].cpu().numpy().astype(np.uint8)
ori_h, ori_w = panoptic_mask.shape[0], panoptic_mask.shape[1]
transform = ResizeTransform(ori_h, ori_w, height, width)
panoptic_mask = transform.apply_segmentation(panoptic_mask)
panoptic_r[0] = torch.tensor(panoptic_mask).to(cur_device)
segment_info = panoptic_r[1]
cur_seg_ids = list(torch.unique(panoptic_r[0]))
segment_info = [seg_info for seg_info in segment_info if seg_info["id"] in cur_seg_ids]
panoptic_r[1] = segment_info
processed_results[i]["panoptic_seg"] = panoptic_r
return processed_results
def semantic_inference(self, mask_cls, mask_pred, image, class_names, task_name, dataset_name):
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1]
mask_pred = mask_pred.sigmoid()
# get the classification result from clip model
if self.clip_ensemble:
clip_cls, valid_flag = self.region_clip_adapter(
image, class_names, task_name, mask_pred, normalize=True
)
if clip_cls is None:
clip_cls = torch.empty(0, mask_cls.shape[-1] + 1, device=self.device)
clip_cls = F.softmax(clip_cls[:, :-1], dim=-1)
if self.clip_ensemble_weight > 0:
map_back_clip_cls = mask_cls.new_ones(mask_cls.shape)
map_back_clip_cls[valid_flag] = clip_cls
if hasattr(MetadataCatalog.get(dataset_name), "trainable_flag"):
trained_mask = torch.Tensor(
MetadataCatalog.get(dataset_name).trainable_flag
).to(mask_cls.device)[None, :]
else:
trained_mask = mask_cls.new_zeros(mask_cls.shape)
mask_cls = trained_mask * torch.pow(
mask_cls, self.clip_ensemble_weight
) * torch.pow(map_back_clip_cls, 1 - self.clip_ensemble_weight) + (
1 - trained_mask
) * torch.pow(
mask_cls, 1 - self.clip_ensemble_weight
) * torch.pow(
map_back_clip_cls, self.clip_ensemble_weight
)
else:
mask_cls = clip_cls
mask_pred = mask_pred[valid_flag]
semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred)
return semseg
def panoptic_inference(self, mask_cls, mask_pred, image, class_names, task_name, dataset_name):
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1]
mask_pred = mask_pred.sigmoid()
if self.clip_ensemble:
clip_cls, valid_flag = self.region_clip_adapter(
image, class_names, task_name, mask_pred, normalize=True
)
if clip_cls is None:
clip_cls = torch.empty(0, mask_cls.shape[-1] + 1, device=self.device)
clip_cls = F.softmax(clip_cls[:, :-1], dim=-1)
if self.clip_ensemble_weight > 0:
map_back_clip_cls = mask_cls.new_ones(mask_cls.shape)
map_back_clip_cls[valid_flag] = clip_cls
if hasattr(MetadataCatalog.get(dataset_name), "trainable_flag"):
trained_mask = torch.Tensor(
MetadataCatalog.get(dataset_name).trainable_flag
).to(mask_cls.device)[None, :]
else:
trained_mask = mask_cls.new_zeros(mask_cls.shape)
mask_cls = trained_mask * torch.pow(
mask_cls, self.clip_ensemble_weight
) * torch.pow(map_back_clip_cls, 1 - self.clip_ensemble_weight) + (
1 - trained_mask
) * torch.pow(
mask_cls, 1 - self.clip_ensemble_weight
) * torch.pow(
map_back_clip_cls, self.clip_ensemble_weight
)
else:
mask_cls = clip_cls
mask_pred = mask_pred[valid_flag]
sem_maps = torch.einsum("qc,qhw->chw", mask_cls, mask_pred).argmax(0)
scores, labels = F.softmax(mask_cls / 0.01, dim=-1).max(-1)
keep = labels.ne(self.sem_seg_head.num_classes)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_masks = mask_pred[keep]
cur_mask_cls = mask_cls[keep]
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks
h, w = cur_masks.shape[-2:]
panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device)
segments_info = []
current_segment_id = 0
if cur_masks.shape[0] == 0:
return [panoptic_seg, segments_info]
else:
# take argmax
cur_mask_ids = cur_prob_masks.argmax(0)
stuff_memory_list = {}
for k in range(cur_classes.shape[0]):
pred_class = cur_classes[k].item()
pred_class_name = class_names[pred_class]
isthing = pred_class_name in self.metadata.thing_classes
original_area = (cur_masks[k] >= 0.5).sum().item()
mask = (cur_masks[k] >= 0.5) & (sem_maps == pred_class)
mask_area = mask.sum().item()
if original_area > 0 and mask.sum().item() > 0:
if mask_area / original_area < self.overlap_threshold:
continue
if isthing and cur_scores[k] < 0.5:
continue
# merge stuff regions
if not isthing:
if int(pred_class) in stuff_memory_list.keys():
panoptic_seg[mask] = stuff_memory_list[int(pred_class)]
continue
else:
stuff_memory_list[int(pred_class)] = current_segment_id + 1
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": bool(isthing),
"category_id": int(pred_class),
}
)
panoptic_res = [panoptic_seg, segments_info]
return panoptic_res
def instance_inference(self, mask_cls, mask_pred, image, class_names, task_name, dataset_name):
image_size = mask_pred.shape[-2:]
num_classes = mask_cls.shape[-1]
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1]
if self.clip_ensemble:
clip_cls, valid_flag = self.region_clip_adapter(
image, class_names, task_name, mask_pred.sigmoid(), normalize=True
)
if clip_cls is None:
clip_cls = torch.empty(0, mask_cls.shape[-1] + 1, device=self.device)
clip_cls = F.softmax(clip_cls[:, :-1], dim=-1)
if self.clip_ensemble_weight > 0:
map_back_clip_cls = mask_cls.new_ones(mask_cls.shape)
map_back_clip_cls[valid_flag] = clip_cls
if hasattr(MetadataCatalog.get(dataset_name), "trainable_flag"):
trained_mask = torch.Tensor(
MetadataCatalog.get(dataset_name).trainable_flag
).to(mask_cls.device)[None, :]
else:
trained_mask = mask_cls.new_zeros(mask_cls.shape)
mask_cls = trained_mask * torch.pow(
mask_cls, self.clip_ensemble_weight
) * torch.pow(map_back_clip_cls, 1 - self.clip_ensemble_weight) + (
1 - trained_mask
) * torch.pow(
mask_cls, 1 - self.clip_ensemble_weight
) * torch.pow(
map_back_clip_cls, self.clip_ensemble_weight
)
else:
mask_cls = clip_cls
mask_pred = mask_pred[valid_flag]
sem_maps = torch.einsum("qc,qhw->chw", mask_cls, mask_pred.sigmoid()).argmax(0)
scores = F.softmax(mask_cls / 0.01, dim=-1)[:, :-1]
scores_per_image, labels_per_image = scores.max(-1)
if self.panoptic_on:
keep = torch.zeros_like(scores_per_image).bool()
for i, lab in enumerate(labels_per_image):
pred_class_name = class_names[lab]
keep[i] = pred_class_name in self.metadata.thing_classes
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
class_mask_memory = {}
keep = torch.zeros_like(scores_per_image).bool()
for k in range(labels_per_image.shape[0]):
pred_class = labels_per_image[k]
original_area = (mask_pred[k] >= 0.5).sum().item()
mask = (mask_pred[k] >= 0.5) & (sem_maps == pred_class)
mask_area = mask.sum().item()
if mask_area > 0 and original_area > 0 and scores_per_image[k] > 0.5:
if mask_area / original_area > self.overlap_threshold:
keep[k] = True
if lab in class_mask_memory.keys():
class_mask_memory[lab].append(k)
else:
class_mask_memory[lab] = [k]
for cls_id, idx_list in class_mask_memory.items():
mask_area_list = [(mask_pred[i] >= 0.5).sum().item() for i in idx_list]
max_area = np.max(np.array(mask_area_list))
max_idx = np.argmax(np.array(mask_area_list))
union_mask = torch.zeros_like(mask_pred[0]).bool()
for i, idx in enumerate(idx_list):
if i != max_idx:
union_mask = (union_mask ==True) | (mask_pred[idx] >= 0.5)
union_mask_area = union_mask.sum().item()
if union_mask_area / max_area > 0.8:
keep[idx_list[max_idx]] = False
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
result = Instances(image_size)
result.pred_masks = (mask_pred > 0).float()
result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4))
# Uncomment the following to get boxes from masks (this is slow)
# result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes()
# calculate average mask prob
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6)
result.scores = scores_per_image * mask_scores_per_image
result.pred_classes = labels_per_image
return result
def get_class_name_list(self, dataset_name):
class_names = [
c.strip() for c in MetadataCatalog.get(dataset_name).stuff_classes
]
return class_names
@property
def region_clip_adapter(self):
if self._region_clip_adapter is None:
return self.clip_adapter
return self._region_clip_adapter
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/proposal_classification.py | mask2former/proposal_classification.py | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
import torch
import numpy as np
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY
from detectron2.utils.logger import log_every_n, log_first_n
from detectron2.utils.events import get_event_storage
from detectron2.utils.visualizer import Visualizer
from torch import nn
from torch.nn import functional as F
from .modeling.clip_adapter import (
ClipAdapter,
PredefinedPromptExtractor,
LearnablePromptExtractor,
)
from .modeling.clip_adapter.clip import CLIP
@META_ARCH_REGISTRY.register()
class ProposalClipClassifier(nn.Module):
@configurable
def __init__(self, clip_adapter, task_names):
super().__init__()
self.clip_adapter = clip_adapter
# store text features
self.text_features = dict()
self.task_names = task_names
self.register_buffer(
"pixel_mean", torch.Tensor(CLIP.PIXEL_MEAN).view(1, -1, 1, 1), False
)
self.register_buffer(
"pixel_std", torch.Tensor(CLIP.PIXEL_STD).view(1, -1, 1, 1), False
)
names = []
for name, param in self.named_parameters():
if param.requires_grad:
names.append(name)
log_first_n(logging.INFO, names)
@classmethod
def from_config(cls, cfg):
if cfg.MODEL.CLIP_ADAPTER.PROMPT_LEARNER == "predefined":
prompt_learner = PredefinedPromptExtractor(
cfg.MODEL.CLIP_ADAPTER.PREDEFINED_PROMPT_TEMPLATES
)
elif cfg.MODEL.CLIP_ADAPTER.PROMPT_LEARNER == "learnable":
prompt_learner = LearnablePromptExtractor(
prompt_dim=cfg.MODEL.CLIP_ADAPTER.PROMPT_DIM,
prompt_shape=cfg.MODEL.CLIP_ADAPTER.PROMPT_SHAPE,
task_prompt_shape=cfg.MODEL.CLIP_ADAPTER.TASK_PROMPT_SHAPE,
task_names=cfg.INPUT.TASK_NAME,
)
else:
raise NotImplementedError(
"Prompt learner {} is not supported".format(
cfg.MODEL.CLIP_ADAPTER.PROMPT_LEARNER
)
)
clip_adapter = ClipAdapter(
cfg.MODEL.CLIP_ADAPTER.CLIP_MODEL_NAME, prompt_learner
)
return {
"clip_adapter": clip_adapter,
"task_names": cfg.INPUT.TASK_NAME,
}
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
"""
dataset_name = [x["meta"]["dataset_name"] for x in batched_inputs]
assert len(set(dataset_name)) == 1
dataset_name = dataset_name[0]
images = [x["image"].to(self.device) for x in batched_inputs]
images = torch.stack(images)
masks = (
torch.stack([x["instances"].gt_masks[0] for x in batched_inputs])
.to(self.device)
.type(images.dtype)
)
if "task" in batched_inputs[0].keys():
if batched_inputs[0]["task"] == "sem_seg":
task_name = "semantic segmentation."
elif batched_inputs[0]["task"] == "ins_seg":
task_name = "instance segmentation."
elif batched_inputs[0]["task"] == "pan_seg":
task_name = "panoptic segmentation."
else:
task_name = "semantic segmentation."
class_names = [
c.strip() for c in MetadataCatalog.get(dataset_name).stuff_classes
]
# normalize
images = (images / 255.0 - self.pixel_mean) / self.pixel_std
images = (
images * masks[:, None, ...] + (1 - masks[:, None, ...]) * self.pixel_mean
)
logits = self.clip_adapter(images, class_names, task_name)
metadata = MetadataCatalog.get(dataset_name)
if self.training:
target = torch.cat([x["instances"].gt_classes for x in batched_inputs])
loss_cls = F.cross_entropy(logits, target.to(self.device))
storage = get_event_storage()
if storage.iter % 100 == 0:
vis = Visualizer(
batched_inputs[0]["image"].permute(1, 2, 0).cpu().numpy().copy(),
metadata,
)
vis_mask = target.new_ones(batched_inputs[0]["image"].shape[1:]) * 255
vis_mask[batched_inputs[0]["instances"].gt_masks[0]] = batched_inputs[
0
]["instances"].gt_classes[0]
vis.draw_sem_seg(vis_mask)
pvis = Visualizer(
batched_inputs[0]["image"].permute(1, 2, 0).cpu().numpy().copy(),
metadata,
)
vis_mask = target.new_ones(batched_inputs[0]["image"].shape[1:]) * 255
vis_mask[batched_inputs[0]["instances"].gt_masks[0]] = (
logits[0].argmax().detach().cpu()
)
pvis.draw_sem_seg(vis_mask)
storage.put_image(
"train_data",
np.concatenate(
[vis.get_output().get_image(), pvis.get_output().get_image()],
axis=1,
),
)
storage.put_scalar(
"train_acc",
100.0
* (logits.detach().argmax(dim=1).cpu() == target).sum()
/ len(target),
)
# import pdb; pdb.set_trace()
return {"loss_cls": loss_cls}
else:
sim = [{"classification": logit[None].detach()} for logit in logits]
return sim
@property
def device(self):
return self.pixel_mean.device
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/config.py | mask2former/config.py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
from detectron2.config import CfgNode as CN
def add_mask_former_default_config(cfg):
# data config
# select the dataset mapper
cfg.INPUT.DATASET_MAPPER_NAME = "mask_former_semantic"
# Color augmentation
cfg.INPUT.COLOR_AUG_SSD = False
# We retry random cropping until no single category in semantic segmentation GT occupies more
# than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
# Pad image and segmentation GT in dataset mapper.
cfg.INPUT.SIZE_DIVISIBILITY = -1
# solver config
# weight decay on embedding
cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0
# optimizer
cfg.SOLVER.OPTIMIZER = "ADAMW"
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
# mask_former model config
cfg.MODEL.MASK_FORMER = CN()
# loss
cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True
cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1
cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0
cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0
cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0
# transformer config
cfg.MODEL.MASK_FORMER.NHEADS = 8
cfg.MODEL.MASK_FORMER.DROPOUT = 0.1
cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048
cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0
cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6
cfg.MODEL.MASK_FORMER.PRE_NORM = False
cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256
cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100
cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = "res5"
cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False
# mask_former inference config
cfg.MODEL.MASK_FORMER.TEST = CN()
cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = False
cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False
cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False
cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0
cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0
cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False
# you can use this config to override
cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32
# pixel decoder config
cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256
# adding transformer in pixel decoder
cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0
# pixel decoder
cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = "BasePixelDecoder"
# swin transformer backbone
cfg.MODEL.SWIN = CN()
cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224
cfg.MODEL.SWIN.PATCH_SIZE = 4
cfg.MODEL.SWIN.EMBED_DIM = 96
cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]
cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
cfg.MODEL.SWIN.WINDOW_SIZE = 7
cfg.MODEL.SWIN.MLP_RATIO = 4.0
cfg.MODEL.SWIN.QKV_BIAS = True
cfg.MODEL.SWIN.QK_SCALE = None
cfg.MODEL.SWIN.DROP_RATE = 0.0
cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0
cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3
cfg.MODEL.SWIN.APE = False
cfg.MODEL.SWIN.PATCH_NORM = True
cfg.MODEL.SWIN.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
# NOTE: maskformer2 extra configs
# transformer module
cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = "MultiScaleMaskedTransformerDecoder"
# LSJ aug
cfg.INPUT.IMAGE_SIZE = 1024
cfg.INPUT.MIN_SCALE = 0.1
cfg.INPUT.MAX_SCALE = 2.0
# MSDeformAttn encoder configs
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = ["res3", "res4", "res5"]
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8
# point loss configs
# Number of points sampled during training for a mask point head.
cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112
# Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
# original paper.
cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0
# Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
# the original paper.
cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75
def add_our_config(cfg):
cfg.ORACLE = False
cfg.PSEUDO = False
cfg.PSEUDO_WITH_PRIOR = True
cfg.PSEUDO_REJECT_THRESHOLD = 0.0
cfg.TEST.SLIDING_WINDOW = False
cfg.TEST.SLIDING_TILE_SIZE = 224
cfg.TEST.SLIDING_OVERLAP = 2 / 3.0
cfg.PSEUDO_FLAG_NAME = "trainable_flag"
cfg.SOLVER.TEST_IMS_PER_BATCH = 1
cfg.DATASETS.SAMPLE_PER_CLASS = -1
cfg.DATASETS.SAMPLE_SEED = 0
cfg.TEST.OPTIM = CN()
cfg.TEST.OPTIM.LR = 0.001
cfg.INPUT.TASK_NAME = ["semantic segmentation."]
# whether to use dense crf
cfg.TEST.DENSE_CRF = False
# embedding head
cfg.MODEL.SEM_SEG_HEAD.EMBEDDING_DIM = 512
cfg.MODEL.SEM_SEG_HEAD.EMBED_HIDDEN_DIM = 1024
cfg.MODEL.SEM_SEG_HEAD.EMBED_LAYERS = 2
# clip_adapter
cfg.MODEL.CLIP_ADAPTER = CN()
cfg.MODEL.CLIP_ADAPTER.PROMPT_LEARNER = "learnable"
# for predefined
cfg.MODEL.CLIP_ADAPTER.PREDEFINED_PROMPT_TEMPLATES = ["a sculpture of a {}."]
# for learnable prompt
cfg.MODEL.CLIP_ADAPTER.PROMPT_DIM = 512
cfg.MODEL.CLIP_ADAPTER.PROMPT_SHAPE = (16, 0)
cfg.MODEL.CLIP_ADAPTER.TASK_PROMPT_SHAPE = 8
cfg.MODEL.CLIP_ADAPTER.PROMPT_CHECKPOINT = ""
cfg.MODEL.CLIP_ADAPTER.CLIP_MODEL_NAME = "ViT-B/16"
cfg.MODEL.CLIP_ADAPTER.MASK_FILL = "mean"
cfg.MODEL.CLIP_ADAPTER.MASK_EXPAND_RATIO = 1.0
cfg.MODEL.CLIP_ADAPTER.MASK_THR = 0.5
cfg.MODEL.CLIP_ADAPTER.MASK_MATTING = False
cfg.MODEL.CLIP_ADAPTER.REGION_RESIZED = True
cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE = True
cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE_WEIGHT = 0.8
#
cfg.MODEL.CLIP_ADAPTER.SEPERATE_ADAPTER = False
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER = CN()
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.CLIP_MODEL_NAME = "ViT-B/16"
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PROMPT_LEARNER = "predefined"
# for predefined
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PREDEFINED_PROMPT_TEMPLATES = [
"a photo of a {}."
]
# for learnable prompt
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PROMPT_DIM = 512
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PROMPT_SHAPE = (16, 0)
cfg.MODEL.CLIP_ADAPTER.REGION_CLIP_ADAPTER.PROMPT_CHECKPOINT = ""
cfg.MODEL.SEM_SEG_HEAD.EMB_SIZE = 256
cfg.MODEL.SEM_SEG_HEAD.EMBED_DIM = 2048
cfg.MODEL.SEM_SEG_HEAD.NUM_HEADS = 8
cfg.MODEL.SEM_SEG_HEAD.USE_LAYER_SCALE = True
# wandb
cfg.WANDB = CN()
cfg.WANDB.PROJECT = "zero_shot_seg"
cfg.WANDB.NAME = None
def add_mask_former_config(cfg):
"""
Add config for MASK_FORMER.
"""
add_mask_former_default_config(cfg)
add_our_config(cfg)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/__init__.py | mask2former/__init__.py | from . import data
from . import modeling
from .config import add_mask_former_config
from .test_time_augmentation import SemanticSegmentorWithTTA
from .mask_former_model import MaskFormer
from .open_vocabulary_model import OpenVocabulary
from .proposal_classification import ProposalClipClassifier
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/mask_former_model.py | mask2former/mask_former_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import ImageList
from .modeling.criterion import SetCriterion
from .modeling.matcher import HungarianMatcher
@META_ARCH_REGISTRY.register()
class MaskFormer(nn.Module):
"""
Main class for mask classification semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
semantic_on: bool,
instance_on: bool,
panoptic_on: bool,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
panoptic_on: bool, whether to output panoptic segmentation prediction
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.semantic_on = semantic_on
self.instance_on = instance_on
self.panoptic_on = panoptic_on
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer(
"pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False
)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion
matcher = HungarianMatcher(
cost_class=1,
cost_mask=mask_weight,
cost_dice=dice_weight,
)
weight_dict = {"loss_ce": 1, "loss_mask": mask_weight, "loss_dice": dice_weight}
if deep_supervision:
dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "masks"]
criterion = SetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"semantic_on": cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON,
"instance_on": cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON,
"panoptic_on": cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": (
cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE
or cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON
),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features)
if self.training:
# mask classification target
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
targets = self.prepare_targets(gt_instances, images)
else:
targets = None
# bipartite matching-based loss
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
return losses
else:
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
# upsample masks
mask_pred_results = F.interpolate(
mask_pred_results,
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=False,
)
processed_results = []
for mask_cls_result, mask_pred_result, input_per_image, image_size in zip(
mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
if self.sem_seg_postprocess_before_inference:
mask_pred_result = sem_seg_postprocess(
mask_pred_result, image_size, height, width
)
# semantic segmentation inference
r = self.semantic_inference(mask_cls_result, mask_pred_result)
if not self.sem_seg_postprocess_before_inference:
r = sem_seg_postprocess(r, image_size, height, width)
processed_results.append({"sem_seg": r})
# panoptic segmentation inference
if self.panoptic_on:
panoptic_r = self.panoptic_inference(
mask_cls_result, mask_pred_result
)
processed_results[-1]["panoptic_seg"] = panoptic_r
# instance segmentation inference
if self.instance_on:
instance_r = self.instance_inference(
mask_cls_result, mask_pred_result)
processed_results[-1]["instances"] = instance_r
return processed_results
def prepare_targets(self, targets, images):
h, w = images.tensor.shape[-2:]
new_targets = []
for targets_per_image in targets:
# pad gt
gt_masks = targets_per_image.gt_masks
padded_masks = torch.zeros(
(gt_masks.shape[0], h, w), dtype=gt_masks.dtype, device=gt_masks.device
)
padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks
new_targets.append(
{
"labels": targets_per_image.gt_classes,
"masks": padded_masks,
}
)
return new_targets
def semantic_inference(self, mask_cls, mask_pred):
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1]
mask_pred = mask_pred.sigmoid()
semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred)
return semseg
def panoptic_inference(self, mask_cls, mask_pred):
scores, labels = F.softmax(mask_cls, dim=-1).max(-1)
mask_pred = mask_pred.sigmoid()
keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_masks = mask_pred[keep]
cur_mask_cls = mask_cls[keep]
cur_mask_cls = cur_mask_cls[:, :-1]
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks
h, w = cur_masks.shape[-2:]
panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device)
segments_info = []
current_segment_id = 0
if cur_masks.shape[0] == 0:
# We didn't detect any mask :(
return panoptic_seg, segments_info
else:
# take argmax
cur_mask_ids = cur_prob_masks.argmax(0)
stuff_memory_list = {}
for k in range(cur_classes.shape[0]):
pred_class = cur_classes[k].item()
isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values()
mask_area = (cur_mask_ids == k).sum().item()
original_area = (cur_masks[k] >= 0.5).sum().item()
mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5)
if mask_area > 0 and original_area > 0 and mask.sum().item() > 0:
if mask_area / original_area < self.overlap_threshold:
continue
# merge stuff regions
if not isthing:
if int(pred_class) in stuff_memory_list.keys():
panoptic_seg[mask] = stuff_memory_list[int(pred_class)]
continue
else:
stuff_memory_list[int(pred_class)] = current_segment_id + 1
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": bool(isthing),
"category_id": int(pred_class),
}
)
return panoptic_seg, segments_info
def instance_inference(self, mask_cls, mask_pred):
# mask_pred is already processed to have the same shape as original input
image_size = mask_pred.shape[-2:]
# [Q, K]
scores = F.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1)
# scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.test_topk_per_image, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = torch.div(topk_indices, self.sem_seg_head.num_classes, rounding_mode='floor')
# mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1)
mask_pred = mask_pred[topk_indices]
# if this is panoptic segmentation, we only keep the "thing" classes
if self.panoptic_on:
keep = torch.zeros_like(scores_per_image).bool()
for i, lab in enumerate(labels_per_image):
keep[i] = lab in self.metadata.thing_dataset_id_to_contiguous_id.values()
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
result = Instances(image_size)
# mask (before sigmoid)
result.pred_masks = (mask_pred > 0).float()
result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4))
# Uncomment the following to get boxes from masks (this is slow)
result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes()
# calculate average mask prob
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6)
result.scores = scores_per_image * mask_scores_per_image
result.pred_classes = labels_per_image
return result
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/test_time_augmentation.py | mask2former/test_time_augmentation.py | # Copyright (c) Facebook, Inc. and its affiliates.
import copy
from itertools import count
import math
import numpy as np
import torch
from fvcore.transforms import HFlipTransform
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from detectron2.data.detection_utils import read_image
from detectron2.modeling import DatasetMapperTTA
from detectron2.modeling.postprocessing import sem_seg_postprocess
import logging
from detectron2.utils.logger import log_every_n, log_first_n
__all__ = [
"SemanticSegmentorWithTTA",
]
class SemanticSegmentorWithTTA(nn.Module):
"""
A SemanticSegmentor with test-time augmentation enabled.
Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.
"""
def __init__(self, cfg, model, tta_mapper=None, batch_size=1):
"""
Args:
cfg (CfgNode):
model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
super().__init__()
if isinstance(model, DistributedDataParallel):
model = model.module
self.cfg = cfg.clone()
self.model = model
if tta_mapper is None:
tta_mapper = DatasetMapperTTA(cfg)
self.tta_mapper = tta_mapper
self.batch_size = batch_size
def _inference_with_model(self, inputs):
if self.cfg.TEST.SLIDING_WINDOW:
log_first_n(logging.INFO, "Using sliding window to test")
outputs = []
for input in inputs:
image_size = input["image"].shape[1:] # h,w
if self.cfg.TEST.SLIDING_TILE_SIZE > 0:
tile_size = (
self.cfg.TEST.SLIDING_TILE_SIZE,
self.cfg.TEST.SLIDING_TILE_SIZE,
)
else:
selected_mapping = {256: 224, 512: 256, 768: 512, 896: 512}
tile_size = min(image_size)
tile_size = selected_mapping[tile_size]
tile_size = (tile_size, tile_size)
extra_info = {
k: v
for k, v in input.items()
if k not in ["image", "height", "width"]
}
log_every_n(
logging.INFO, "split {} to {}".format(image_size, tile_size)
)
overlap = self.cfg.TEST.SLIDING_OVERLAP
stride = math.ceil(tile_size[0] * (1 - overlap))
tile_rows = int(
math.ceil((image_size[0] - tile_size[0]) / stride) + 1
) # strided convolution formula
tile_cols = int(math.ceil((image_size[1] - tile_size[1]) / stride) + 1)
full_probs = None
count_predictions = None
tile_counter = 0
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * stride)
y1 = int(row * stride)
x2 = min(x1 + tile_size[1], image_size[1])
y2 = min(y1 + tile_size[0], image_size[0])
x1 = max(
int(x2 - tile_size[1]), 0
) # for portrait images the x1 underflows sometimes
y1 = max(
int(y2 - tile_size[0]), 0
) # for very few rows y1 underflows
img = input["image"][:, y1:y2, x1:x2]
padded_img = nn.functional.pad(
img,
(
0,
tile_size[1] - img.shape[-1],
0,
tile_size[0] - img.shape[-2],
),
)
tile_counter += 1
padded_input = {"image": padded_img}
padded_input.update(extra_info)
padded_prediction = self.model([padded_input])[0]["sem_seg"]
prediction = padded_prediction[
:, 0 : img.shape[1], 0 : img.shape[2]
]
if full_probs is None:
full_probs = prediction.new_zeros(
prediction.shape[0], image_size[0], image_size[1]
)
if count_predictions is None:
count_predictions = prediction.new_zeros(
prediction.shape[0], image_size[0], image_size[1]
)
count_predictions[:, y1:y2, x1:x2] += 1
full_probs[
:, y1:y2, x1:x2
] += prediction # accumulate the predictions also in the overlapping regions
full_probs /= count_predictions
full_probs = sem_seg_postprocess(
full_probs,
image_size,
input.get("height", image_size[0]),
input.get("width", image_size[1]),
)
outputs.append({"sem_seg": full_probs})
return outputs
else:
log_first_n(logging.INFO, "Using whole image to test")
return self.model(inputs)
def _batch_inference(self, batched_inputs):
"""
Execute inference on a list of inputs,
using batch size = self.batch_size, instead of the length of the list.
Inputs & outputs have the same format as :meth:`SemanticSegmentor.forward`
"""
outputs = []
inputs = []
for idx, input in zip(count(), batched_inputs):
inputs.append(input)
if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:
with torch.no_grad():
outputs.extend(self._inference_with_model(inputs))
inputs = []
return outputs
def __call__(self, batched_inputs):
"""
Same input/output format as :meth:`SemanticSegmentor.forward`
"""
def _maybe_read_image(dataset_dict):
ret = copy.copy(dataset_dict)
if "image" not in ret:
image = read_image(ret.pop("file_name"), self.model.input_format)
image = torch.from_numpy(
np.ascontiguousarray(image.transpose(2, 0, 1))
) # CHW
ret["image"] = image
if "height" not in ret and "width" not in ret:
ret["height"] = image.shape[1]
ret["width"] = image.shape[2]
return ret
return [self._inference_one_image(_maybe_read_image(x)) for x in batched_inputs]
def _inference_one_image(self, input):
"""
Args:
input (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
augmented_inputs, tfms = self._get_augmented_inputs(input)
# 1: forward with all augmented images
outputs = self._batch_inference(augmented_inputs)
# Delete now useless variables to avoid being out of memory
del augmented_inputs
# 2: merge the results
# handle flip specially
# outputs = [output.detach() for output in outputs]
return self._merge_auged_output(outputs, tfms)
def _merge_auged_output(self, outputs, tfms):
new_outputs = []
for output, tfm in zip(outputs, tfms):
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
new_outputs.append(output["sem_seg"].flip(dims=[2]))
else:
new_outputs.append(output["sem_seg"])
del outputs
# to avoid OOM with torch.stack
final_predictions = new_outputs[0]
for i in range(1, len(new_outputs)):
final_predictions += new_outputs[i]
final_predictions = final_predictions / len(new_outputs)
del new_outputs
return {"sem_seg": final_predictions}
def _get_augmented_inputs(self, input):
augmented_inputs = self.tta_mapper(input)
tfms = [x.pop("transforms") for x in augmented_inputs]
return augmented_inputs, tfms
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/utils/selective_search.py | mask2former/utils/selective_search.py | # Modified from https://github.com/ranchirino/segmentation-as-selective-search/blob/master/selective_search.py
import numpy as np
from skimage import segmentation
from skimage.feature import local_binary_pattern
from skimage.color import rgb2hsv
from sklearn.preprocessing import normalize
from scipy.ndimage.morphology import binary_dilation
def initial_regions(image, scale):
init_segments = segmentation.felzenszwalb(
image, scale=scale, sigma=0.8, min_size=1000
)
channels = [image[:, :, ch] for ch in np.arange(image.shape[2])]
channels.append(init_segments)
return np.stack(channels, axis=2)
# A color histogram of 25 bins is calculated for each channel of the image
def color_hist(reg_mask, bins=25, lower_range=0.0, upper_range=255.0):
# reg_mask.shape = (region size, channels)
hist = []
for channel in np.arange(reg_mask.shape[1]):
hist.append(
np.histogram(reg_mask[:, channel], bins, (lower_range, upper_range))[0]
)
hist = np.concatenate(hist, axis=0)
hist_norm = normalize(hist.reshape(1, -1), norm="l1")
return hist_norm.ravel()
def texture_descriptor(img):
# we use LBP (local binary pattern)
# LBP is an invariant descriptor that can be used for texture classification
text_img = []
for channel in np.arange(img.shape[2]):
text_img.append(local_binary_pattern(img[:, :, channel], 24, 3))
return np.stack(text_img, axis=2)
def texture_hist(text_reg_mask, bins=80, lower_range=0.0, upper_range=255.0):
# text_reg_mask.shape = (region size, channels)
hist = []
for channel in np.arange(text_reg_mask.shape[1]):
hist.append(
np.histogram(text_reg_mask[:, channel], bins, (lower_range, upper_range))[0]
)
hist = np.concatenate(hist, axis=0)
hist_norm = normalize(hist.reshape(1, -1), norm="l1")
return hist_norm.ravel()
def add_prop_reg(img_and_seg, R):
R_and_prop = R
segments = img_and_seg[:, :, 3]
text_img = texture_descriptor(img_and_seg[:, :, :3])
for seg in np.unique(segments):
# color histogram
reg_mask = img_and_seg[:, :, :3][segments == seg]
col_hist = color_hist(reg_mask)
# texture histogram
text_reg_mask = text_img[segments == seg]
text_hist = texture_hist(text_reg_mask)
R_and_prop[seg]["col_hist"] = col_hist
R_and_prop[seg]["text_hist"] = text_hist
return R_and_prop
def extract_regions(img_and_seg):
R = []
segments = img_and_seg if len(img_and_seg.shape) == 2 else img_and_seg[:, :, 3]
for r in np.unique(segments):
i = np.asarray(np.where(segments == r))
x_min = i[1, :].min()
x_max = i[1, :].max()
y_min = i[0, :].min()
y_max = i[0, :].max()
width = (x_max - x_min) + 1
height = (y_max - y_min) + 1
size = i.shape[1]
R.append(
{
"x_min": x_min,
"x_max": x_max,
"y_min": y_min,
"y_max": y_max,
"width": width,
"height": height,
"size": size,
"label": r,
}
)
return R
def get_bb(window, label):
i = np.asarray(np.where(window == label))
x_min = i[1, :].min()
x_max = i[1, :].max()
y_min = i[0, :].min()
y_max = i[0, :].max()
return x_min, x_max, y_min, y_max
def find_neighbours(reg_bb, label):
mask = np.zeros(reg_bb.shape)
mask[np.where(reg_bb == label)] = 1
struct = np.ones((3, 3))
mask_dilated = binary_dilation(mask, structure=struct)
mask[np.where(mask_dilated == False)] = reg_bb[np.where(mask_dilated == False)]
mask[np.where(mask_dilated == True)] = label
dif = abs(mask - reg_bb)
neig = np.unique(reg_bb[np.where(dif != 0)]).tolist()
return neig
def extract_neighbors(img_and_seg, regions):
N = [] # region, neighbours
h = img_and_seg.shape[0] # rows
w = img_and_seg.shape[1] # columns
segments = img_and_seg[:, :, 3]
for r in regions:
x_min = r["x_min"] - 1 if r["x_min"] != 0 else r["x_min"] # +1 padding
x_max = r["x_max"] + 2 if r["x_max"] != w else r["x_max"] # +1 padding
y_min = r["y_min"] - 1 if r["y_min"] != 0 else r["y_min"] # +1 padding
y_max = r["y_max"] + 2 if r["y_max"] != h else r["y_max"] # +1 padding
reg_bb = segments[y_min:y_max, x_min:x_max] # region bounding box
neig = find_neighbours(reg_bb, r["label"])
N.append({"region": r["label"], "neig": neig})
return N
def calc_BB(r1, r2):
# calculate the tight bounding box around r1 and r2
x_min_BB = min(r1["x_min"], r2["x_min"])
x_max_BB = max(r1["x_max"], r2["x_max"])
y_min_BB = min(r1["y_min"], r2["y_min"])
y_max_BB = max(r1["y_max"], r2["y_max"])
BB_size = (y_max_BB - y_min_BB) * (x_max_BB - x_min_BB)
return x_min_BB, x_max_BB, y_min_BB, y_max_BB, BB_size
def sim_size(r1, r2, img_size):
# calculate the size similarity over the image
r1_size = r1["size"]
r2_size = r2["size"]
return 1.0 - ((r1_size + r2_size) / img_size)
# Color similarity of two regions is based on histogram intersection
def sim_color(r1, r2):
hist_r1 = r1["col_hist"]
hist_r2 = r2["col_hist"]
return sum([min(a, b) for a, b in zip(hist_r1, hist_r2)])
def sim_texture(r1, r2):
hist_r1 = r1["text_hist"]
hist_r2 = r2["text_hist"]
return sum([min(a, b) for a, b in zip(hist_r1, hist_r2)])
def sim_fill(r1, r2, img_size):
# measure how well region r1 and r2 fit into each other
r1_size = r1["size"]
r2_size = r2["size"]
_, _, _, _, BB_size = calc_BB(r1, r2)
return 1.0 - ((BB_size - r1_size - r2_size) / img_size)
def calc_sim(r1, r2, img_and_seg, measure=(1, 1, 1, 1)):
# measure = (s, c, t, f)
s_size, s_color, s_texture, s_fill = 0, 0, 0, 0
img_size = img_and_seg.shape[0] * img_and_seg.shape[1]
if measure[0]:
s_size = sim_size(r1, r2, img_size)
if measure[1]:
s_color = sim_color(r1, r2)
if measure[2]:
s_texture = sim_texture(r1, r2)
if measure[3]:
s_fill = sim_fill(r1, r2, img_size)
return (s_size + s_color + s_texture + s_fill) / np.nonzero(measure)[0].size
# calculate initial similarities
def initial_sim(img_and_seg, R, N, measure):
S = []
for r in N:
r1 = [x for x in R if x["label"] == r["region"]][0]
for n in r["neig"]:
r2 = [x for x in R if x["label"] == n][0]
if n > r["region"]:
s = calc_sim(r1, r2, img_and_seg, measure=measure)
S.append({"regions": [r["region"], n], "sim": s})
return S
# calculate new region similarities
def new_sim(img_and_seg, R, rt, measure):
S = []
r1 = [x for x in R if x["label"] == rt["region"]][0]
for n in rt["neig"]:
r2 = [x for x in R if x["label"] == n][0]
s = calc_sim(r1, r2, img_and_seg, measure=measure)
S.append({"regions": [rt["region"], n], "sim": s})
return S
def merge_regions(img_and_seg, regions, R, N):
ri = [x for x in R if x["label"] == regions[0]][0]
rj = [x for x in R if x["label"] == regions[1]][0]
idx_ri = [i for i, x in enumerate(R) if x["label"] == regions[0]][0]
idx_rj = [i for i, x in enumerate(R) if x["label"] == regions[1]][0]
# new region rt = ri UNION rj
img_and_seg[:, :, 3][img_and_seg[:, :, 3] == regions[1]] = regions[
0
] # rt = ri + (rj = ri)
x_min_rt, x_max_rt, y_min_rt, y_max_rt, _ = calc_BB(ri, rj)
width_rt = (x_max_rt - x_min_rt) + 1
height_rt = (y_max_rt - y_min_rt) + 1
size_rt = ri["size"] + rj["size"]
col_hist_rt = (ri["size"] * ri["col_hist"] + rj["size"] * rj["col_hist"]) / size_rt
col_hist_rt = normalize(col_hist_rt.reshape(1, -1), norm="l1")[0]
text_hist_rt = (
ri["size"] * ri["text_hist"] + rj["size"] * rj["text_hist"]
) / size_rt
text_hist_rt = normalize(text_hist_rt.reshape(1, -1), norm="l1")[0]
R[idx_ri]["x_min"] = x_min_rt
R[idx_ri]["x_max"] = x_max_rt
R[idx_ri]["y_min"] = y_min_rt
R[idx_ri]["y_max"] = y_max_rt
R[idx_ri]["width"] = width_rt
R[idx_ri]["height"] = height_rt
R[idx_ri]["size"] = size_rt
R[idx_ri]["col_hist"] = col_hist_rt
R[idx_ri]["text_hist"] = text_hist_rt
# neighborhood
idxN_ri = [i for i, x in enumerate(N) if x["region"] == regions[0]][0]
idxN_rj = [i for i, x in enumerate(N) if x["region"] == regions[1]][0]
N[idxN_ri]["neig"].remove(regions[1])
N[idxN_rj]["neig"].remove(regions[0])
for n in N[idxN_rj]["neig"]:
if n not in N[idxN_ri]["neig"]:
N[idxN_ri]["neig"].append(n)
idx_n = [i for i, x in enumerate(N) if x["region"] == n][0]
N[idx_n]["neig"].remove(regions[1])
if regions[0] not in N[idx_n]["neig"]:
N[idx_n]["neig"].append(regions[0])
del R[idx_rj]
del N[idxN_rj]
return img_and_seg, R, N
def selective_search(
image, colour_space="hsv", scale=20, measure=(1, 1, 1, 1), sim_threshold=0.65
):
"""
Parameters
----------
:param image: (height,wdith, 3) ndarray
Input image.
:param colour_space: {"rgb", "hsv"}
Colour space to perform our hierarchical grouping algorithm.
:param scale: float
Free parameter. Higher means larger clusters in the initial segmentation (Felsenszwalb's segmentation).
:param measure: (size, colour, texture, fill) tuple
Define the similarity measures to use.
:param sim_threshold: float
Indicates the threshold of similarity between regions in range [0,1].
Returns
-------
:segment_mask: (width, height) ndarray
Ndarray with the same width and height that the input image with labeled regions.
:regions: list
List of dict with the properties of the regions
"""
if colour_space == "hsv":
image = rgb2hsv(image)
image = image - image.min() # min = 0
image = image / image.max() # max = 1
image = image * 255
image = image.astype(np.uint8)
# obtain initial regions
# return image and initial segments, shape[2] = (channel1, channel2, channel3, segments)
img_and_seg = initial_regions(image, scale)
print(len(np.unique(img_and_seg[:, :, 3])), "initial regions")
R = extract_regions(img_and_seg)
R = add_prop_reg(img_and_seg, R)
# extract neighboring regions
N = extract_neighbors(img_and_seg, R)
# calculate similarity
init_S = initial_sim(img_and_seg, R, N, measure)
# hierarchical grouping algorithm
under_thres = False
while under_thres == False:
# get highest similarity
s = [x["sim"] for x in init_S]
if len(s) > 0:
max_sim = max(s)
else:
max_sim = 0
if max_sim >= sim_threshold:
regions = init_S[np.where(s == max_sim)[0][0]]["regions"]
# merge corresponding regions
img_and_seg, R, N = merge_regions(img_and_seg, regions, R, N)
# remove similarities
del_ind = []
for i, r in enumerate(init_S):
if any([regions[0] in r["regions"], regions[1] in r["regions"]]):
del_ind.append(i)
init_S = np.delete(init_S, del_ind).tolist()
# calculate similarity set between rt and its neighbours
rt = [x for x in N if x["region"] == regions[0]][0]
new_S = new_sim(img_and_seg, R, rt, measure)
init_S = init_S + new_S
else:
under_thres = True
return img_and_seg[:, :, 3], R
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/utils/post_process_utils.py | mask2former/utils/post_process_utils.py | import torch
from torch.nn import functional as F
import numpy as np
try:
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import (
unary_from_softmax,
unary_from_labels,
create_pairwise_bilateral,
create_pairwise_gaussian,
)
except:
dcrf = None
def dense_crf_post_process(
logits,
image,
n_labels=None,
max_iters=5,
pos_xy_std=(3, 3),
pos_w=3,
bi_xy_std=(80, 80),
bi_rgb_std=(13, 13, 13),
bi_w=10,
):
"""
logits : [C,H,W]
image : [3,H,W]
"""
if dcrf is None:
raise FileNotFoundError(
"pydensecrf is required to perform dense crf inference."
)
if isinstance(logits, torch.Tensor):
logits = F.softmax(logits, dim=0).detach().cpu().numpy()
U = unary_from_softmax(logits)
n_labels = logits.shape[0]
elif logits.ndim == 3:
U = unary_from_softmax(logits)
n_labels = logits.shape[0]
else:
assert n_labels is not None
U = unary_from_labels(logits, n_labels, zero_unsure=False)
d = dcrf.DenseCRF2D(image.shape[1], image.shape[0], n_labels)
d.setUnaryEnergy(U)
# This adds the color-independent term, features are the locations only.
d.addPairwiseGaussian(
sxy=pos_xy_std,
compat=pos_w,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC,
)
# This adds the color-dependent term, i.e. features are (x,y,r,g,b).
d.addPairwiseBilateral(
sxy=bi_xy_std,
srgb=bi_rgb_std,
rgbim=image,
compat=bi_w,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC,
)
# Run five inference steps.
logits = d.inference(max_iters)
logits = np.asarray(logits).reshape((n_labels, image.shape[0], image.shape[1]))
return torch.from_numpy(logits)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/utils/events.py | mask2former/utils/events.py | import os
import wandb
from detectron2.utils import comm
from detectron2.utils.events import EventWriter, get_event_storage
def setup_wandb(cfg, args):
if comm.is_main_process():
init_args = {
k.lower(): v
for k, v in cfg.WANDB.items()
if isinstance(k, str) and k not in ["config", "name"]
}
# only include most related part to avoid too big table
# TODO: add configurable params to select which part of `cfg` should be saved in config
if "config_exclude_keys" in init_args:
init_args["config"] = cfg
init_args["config"]["cfg_file"] = args.config_file
else:
init_args["config"] = {
"model": cfg.MODEL,
"solver": cfg.SOLVER,
"cfg_file": args.config_file,
}
if ("name" not in init_args) or (init_args["name"] is None):
init_args["name"] = os.path.basename(args.config_file)
wandb.init(**init_args)
class BaseRule(object):
def __call__(self, target):
return target
class IsIn(BaseRule):
def __init__(self, keyword: str):
self.keyword = keyword
def __call__(self, target):
return self.keyword in target
class Prefix(BaseRule):
def __init__(self, keyword: str):
self.keyword = keyword
def __call__(self, target):
return "/".join([self.keyword, target])
class WandbWriter(EventWriter):
"""
Write all scalars to a tensorboard file.
"""
def __init__(self):
"""
Args:
log_dir (str): the directory to save the output events
kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`
"""
self._last_write = -1
self._group_rules = [
(IsIn("/"), BaseRule()),
(IsIn("loss"), Prefix("train")),
]
def write(self):
storage = get_event_storage()
def _group_name(scalar_name):
for (rule, op) in self._group_rules:
if rule(scalar_name):
return op(scalar_name)
return scalar_name
stats = {
_group_name(name): scalars[0]
for name, scalars in storage.latest().items()
if scalars[1] > self._last_write
}
if len(stats) > 0:
self._last_write = max([v[1] for k, v in storage.latest().items()])
# storage.put_{image,histogram} is only meant to be used by
# tensorboard writer. So we access its internal fields directly from here.
if len(storage._vis_data) >= 1:
stats["image"] = [
wandb.Image(img, caption=img_name)
for img_name, img, step_num in storage._vis_data
]
# Storage stores all image data and rely on this writer to clear them.
# As a result it assumes only one writer will use its image data.
# An alternative design is to let storage store limited recent
# data (e.g. only the most recent image) that all writers can access.
# In that case a writer may not see all image data if its period is long.
storage.clear_images()
if len(storage._histograms) >= 1:
def create_bar(tag, bucket_limits, bucket_counts, **kwargs):
data = [
[label, val] for (label, val) in zip(bucket_limits, bucket_counts)
]
table = wandb.Table(data=data, columns=["label", "value"])
return wandb.plot.bar(table, "label", "value", title=tag)
stats["hist"] = [create_bar(**params) for params in storage._histograms]
storage.clear_histograms()
if len(stats) == 0:
return
wandb.log(stats, step=storage.iter)
def close(self):
wandb.finish()
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/utils/misc.py | mask2former/utils/misc.py | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
from typing import List, Optional
import torch
import torch.distributed as dist
import torchvision
from torch import Tensor
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise ValueError("not supported")
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(
img, (0, padding[2], 0, padding[1], 0, padding[0])
)
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(
m, (0, padding[2], 0, padding[1]), "constant", 1
)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/utils/__init__.py | mask2former/utils/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
from .events import setup_wandb, WandbWriter
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/criterion.py | mask2former/modeling/criterion.py | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
MaskFormer criterion.
"""
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list
def dice_loss(inputs, targets, num_masks):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks
def sigmoid_focal_loss(
inputs, targets, num_masks, alpha: float = 0.25, gamma: float = 2
):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_masks
class SetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
if eos_coef > 0:
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
self.use_ignore_idx = False
else:
self.use_ignore_idx = True
self.cur_target = []
def loss_labels(self, outputs, targets, indices, num_masks):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"]
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat(
[t["labels"][J] for t, (_, J) in zip(targets, indices)]
)
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device,
)
target_classes[idx] = target_classes_o
if self.use_ignore_idx:
loss_ce = F.cross_entropy(
src_logits.transpose(1, 2),
target_classes,
ignore_index=self.num_classes,
)
else:
if "empty_weight" in outputs:
empty_weight = torch.cat(
[outputs["empty_weight"], self.empty_weight[-1:]]
).detach()
else:
empty_weight = self.empty_weight
loss_ce = F.cross_entropy(
src_logits.transpose(1, 2), target_classes, empty_weight
)
losses = {"loss_ce": loss_ce}
return losses
def loss_masks(self, outputs, targets, indices, num_masks):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(src_masks)
target_masks = target_masks[tgt_idx]
# upsample predictions to the target size
src_masks = F.interpolate(
src_masks[:, None],
size=target_masks.shape[-2:],
mode="bilinear",
align_corners=False,
)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(src_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_masks),
"loss_dice": dice_loss(src_masks, target_masks, num_masks),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat(
[torch.full_like(src, i) for i, (src, _) in enumerate(indices)]
)
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat(
[torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]
)
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_masks):
loss_map = {"labels": self.loss_labels, "masks": self.loss_masks}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_masks)
def forward(self, outputs, targets):
"""This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_masks = sum(len(t["labels"]) for t in targets)
num_masks = torch.as_tensor(
[num_masks], dtype=torch.float, device=next(iter(outputs.values())).device
)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_masks)
num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
l_dict = self.get_loss(
loss, aux_outputs, targets, indices, num_masks
)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
def clean_buffer(self):
self.cur_target = []
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/matcher.py | mask2former/modeling/matcher.py | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/matcher.py
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn
def batch_dice_loss(inputs, targets):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * torch.einsum("nc,mc->nm", inputs, targets)
denominator = inputs.sum(-1)[:, None] + targets.sum(-1)[None, :]
loss = 1 - (numerator + 1) / (denominator + 1)
return loss
def batch_sigmoid_focal_loss(inputs, targets, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
hw = inputs.shape[1]
prob = inputs.sigmoid()
focal_pos = ((1 - prob) ** gamma) * F.binary_cross_entropy_with_logits(
inputs, torch.ones_like(inputs), reduction="none"
)
focal_neg = (prob ** gamma) * F.binary_cross_entropy_with_logits(
inputs, torch.zeros_like(inputs), reduction="none"
)
if alpha >= 0:
focal_pos = focal_pos * alpha
focal_neg = focal_neg * (1 - alpha)
loss = torch.einsum("nc,mc->nm", focal_pos, targets) + torch.einsum(
"nc,mc->nm", focal_neg, (1 - targets)
)
return loss / hw
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(
self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1
):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost
cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
assert (
cost_class != 0 or cost_mask != 0 or cost_dice != 0
), "all costs cant be 0"
@torch.no_grad()
def memory_efficient_forward(self, outputs, targets):
"""More memory-friendly matching"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# Work out the mask padding size
masks = [v["masks"] for v in targets]
h_max = max([m.shape[1] for m in masks])
w_max = max([m.shape[2] for m in masks])
indices = []
# Iterate through batch size
for b in range(bs):
out_prob = outputs["pred_logits"][b].softmax(
-1
) # [num_queries, num_classes]
out_mask = outputs["pred_masks"][b] # [num_queries, H_pred, W_pred]
tgt_ids = targets[b]["labels"]
# gt masks are already padded when preparing target
tgt_mask = targets[b]["masks"].to(out_mask)
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Downsample gt masks to save memory
tgt_mask = F.interpolate(
tgt_mask[:, None], size=out_mask.shape[-2:], mode="nearest"
)
# Flatten spatial dimension
out_mask = out_mask.flatten(1) # [batch_size * num_queries, H*W]
tgt_mask = tgt_mask[:, 0].flatten(1) # [num_total_targets, H*W]
# Compute the focal loss between masks
cost_mask = batch_sigmoid_focal_loss(out_mask, tgt_mask)
# Compute the dice loss betwen masks
cost_dice = batch_dice_loss(out_mask, tgt_mask)
# Final cost matrix
C = (
self.cost_mask * cost_mask
+ self.cost_class * cost_class
+ self.cost_dice * cost_dice
)
C = C.reshape(num_queries, -1).cpu()
indices.append(linear_sum_assignment(C))
return [
(
torch.as_tensor(i, dtype=torch.int64),
torch.as_tensor(j, dtype=torch.int64),
)
for i, j in indices
]
@torch.no_grad()
def forward(self, outputs, targets):
"""Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_masks": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"masks": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
return self.memory_efficient_forward(outputs, targets)
def __repr__(self):
head = "Matcher " + self.__class__.__name__
body = [
"cost_class: {}".format(self.cost_class),
"cost_mask: {}".format(self.cost_mask),
"cost_dice: {}".format(self.cost_dice),
]
_repr_indent = 4
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/__init__.py | mask2former/modeling/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
from .backbone.swin import D2SwinTransformer
from .backbone.clip_resnet import D2ModifiedResNet
from .heads.mask_former_head import MaskFormerHead
from .heads.mask_former_interaction_head import MaskFormerInteractionHead
from .heads.per_pixel_baseline import (
PerPixelBaselineHead,
PerPixelBaselinePlusHead,
)
from .heads.pixel_decoder import BasePixelDecoder
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/mask_former_interaction_head.py | mask2former/modeling/heads/mask_former_interaction_head.py | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer.open_transformer_predictor import OpenTransformerPredictor
from .pixel_decoder import build_pixel_decoder
from .module import ContextInteraction
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerInteractionHead(nn.Module):
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
# newk = k.replace(prefix, prefix + "pixel_decoder.")
newk = k
# logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
interaction,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
self.interaction = interaction
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
interaction = ContextInteraction(q_dim=cfg.MODEL.SEM_SEG_HEAD.EMB_SIZE,
k_dim=cfg.MODEL.CLIP_ADAPTER.PROMPT_DIM,
embed_dim=cfg.MODEL.SEM_SEG_HEAD.EMBED_DIM,
num_heads=cfg.MODEL.SEM_SEG_HEAD.NUM_HEADS,
hidden_dim=cfg.MODEL.SEM_SEG_HEAD.EMBED_HIDDEN_DIM,
use_layer_scale=cfg.MODEL.SEM_SEG_HEAD.USE_LAYER_SCALE)
return {
"input_shape": {
k: v
for k, v in input_shape.items()
if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
"transformer_predictor": OpenTransformerPredictor(
cfg,
transformer_predictor_in_channels,
mask_classification=True,
),
"interaction": interaction
}
def forward(self, features, text_feat, mask=None):
return self.layers2(features, text_feat, mask)
def layers2(self, features, text_feat=None, mask=None):
mask_features, transformer_encoder_features, multi_scale_features = self.pixel_decoder.forward_features(features)
mask_features, transformer_encoder_features, multi_scale_features = self.interaction(mask_features, transformer_encoder_features, multi_scale_features, text_feat, mask)
if self.transformer_in_feature == "multi_scale_pixel_decoder":
predictions = self.predictor(multi_scale_features, mask_features, mask)
else:
if self.transformer_in_feature == "transformer_encoder":
assert (
transformer_encoder_features is not None
), "Please use the TransformerEncoderPixelDecoder."
predictions = self.predictor(transformer_encoder_features, mask_features, mask)
elif self.transformer_in_feature == "pixel_embedding":
predictions = self.predictor(mask_features, mask_features, mask)
else:
predictions = self.predictor(features[self.transformer_in_feature], mask_features, mask)
return predictions, text_feat
def freeze_pretrained(self):
for name, module in self.named_children():
if name not in ["predictor"]:
for param in module.parameters():
param.requires_grad = False
else:
module.freeze_pretrained()
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/module.py | mask2former/modeling/heads/module.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import DropPath
class MultiHeadAttention(nn.Module):
"""
Multi-head attention module for both image and text
"""
def __init__(self, q_dim, k_dim, embed_dim, num_heads, dropout=0.1,
clamp_min_for_underflow = False, clamp_max_for_overflow = False):
super(MultiHeadAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.q_dim = q_dim
self.k_dim = k_dim
assert (
self.head_dim * self.num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
self.scale = self.head_dim ** (-0.5)
self.dropout = dropout
self.q_proj = nn.Linear(self.q_dim, self.embed_dim)
self.k_proj = nn.Linear(self.k_dim, self.embed_dim)
self.v_proj = nn.Linear(self.k_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.q_dim)
self.clamp_min_for_underflow = clamp_min_for_underflow
self.clamp_max_for_overflow = clamp_max_for_overflow
self._reset_parameters()
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def _reset_parameters(self):
nn.init.xavier_uniform_(self.q_proj.weight)
self.q_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.k_proj.weight)
self.k_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.v_proj.weight)
self.v_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.out_proj.weight)
self.out_proj.bias.data.fill_(0)
def forward(self, q, k, v, attention_mask=None, return_attention=False):
if len(q.size()) == 3:
bsz, tgt_len, embed_dim = q.size()
elif len(q.size()) == 2:
tgt_len, embed_dim = q.size()
bsz = k.shape[0]
q = q.expand(bsz, tgt_len, embed_dim)
query_states = self.q_proj(q) * self.scale
key_states = self._shape(self.k_proj(k), -1, bsz)
value_states = self._shape(self.v_proj(v), -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if self.clamp_min_for_underflow:
attn_weights = torch.clamp(attn_weights, min=-50000) # Do not increase -50000, data type half has quite limited range
if self.clamp_max_for_overflow:
attn_weights = torch.clamp(attn_weights, max=50000) # Do not increase 50000, data type half has quite limited range
if attention_mask is not None:
# [bsz, src_len]
assert (attention_mask.dim() == 2)
attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
attention_mask = attention_mask.expand(bsz, 1, tgt_len, src_len)
attention_mask = attention_mask.masked_fill(attention_mask == 0, -9e15)
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if return_attention:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
class ContextInteraction(nn.Module):
def __init__(self, q_dim, k_dim, embed_dim, num_heads, hidden_dim=None, dropout=0.1,
drop_path=.0, init_values=1e-1, use_layer_scale = False,
clamp_min_for_underflow = False, clamp_max_for_overflow = False):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super(ContextInteraction, self).__init__()
# pre_layer norm
self.layer_norm_q_1 = nn.LayerNorm(q_dim)
self.layer_norm_k_1 = nn.LayerNorm(k_dim)
self.attn = MultiHeadAttention(q_dim=q_dim,
k_dim=k_dim,
embed_dim=embed_dim,
num_heads=num_heads,
clamp_min_for_underflow=clamp_min_for_underflow,
clamp_max_for_overflow=clamp_max_for_overflow)
# add layer scale for training stability
self.use_layer_scale = use_layer_scale
if self.use_layer_scale:
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.gamma = nn.Parameter(init_values * torch.ones((q_dim)), requires_grad=True)
def forward(self, mask_features, transformer_encoder_features, multi_scale_features, text_feat, attention_mask):
q0, q1, q2 = multi_scale_features[0], multi_scale_features[1], multi_scale_features[2]
q3 = mask_features
k = text_feat
v = text_feat
output = []
bs, _, h, w = q0.shape
k = k.expand(bs, k.shape[0], k.shape[1])
v = v.expand(bs, v.shape[0], v.shape[1])
for q_index, q in enumerate([q0, q1, q2, q3]):
bs, _, h, w = q.shape
q = q.flatten(2).transpose(1, 2)
q = self.layer_norm_q_1(q)
k, v = self.layer_norm_k_1(k), self.layer_norm_k_1(v)
delta_q = self.attn(q, k, v, attention_mask=attention_mask)[0]
if self.use_layer_scale:
q = q + self.drop_path(self.gamma * delta_q)
else:
q = q + delta_q
q = q.transpose(1, 2).contiguous().view(bs, -1, h, w)
output.append(q)
transed_multi_scale_features = output[:3]
transed_mask_features = output[3]
return (transed_mask_features, transformer_encoder_features, transed_multi_scale_features)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/per_pixel_baseline.py | mask2former/modeling/heads/per_pixel_baseline.py | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer.transformer_predictor import TransformerPredictor
from .pixel_decoder import build_pixel_decoder
@SEM_SEG_HEADS_REGISTRY.register()
class PerPixelBaselineHead(nn.Module):
_version = 2
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
logger = logging.getLogger(__name__)
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
# logger.warning(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = Conv2d(
self.pixel_decoder.mask_dim, num_classes, kernel_size=1, stride=1, padding=0
)
weight_init.c2_msra_fill(self.predictor)
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
return {
"input_shape": {
k: v
for k, v in input_shape.items()
if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
}
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x = self.layers(features)
if self.training:
return None, self.losses(x, targets)
else:
x = F.interpolate(
x, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return x, {}
def layers(self, features):
x, _ = self.pixel_decoder.forward_features(features)
x = self.predictor(x)
return x
def losses(self, predictions, targets):
predictions = (
predictions.float()
) # https://github.com/pytorch/pytorch/issues/48163
predictions = F.interpolate(
predictions,
scale_factor=self.common_stride,
mode="bilinear",
align_corners=False,
)
loss = F.cross_entropy(
predictions, targets, reduction="mean", ignore_index=self.ignore_value
)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
@SEM_SEG_HEADS_REGISTRY.register()
class PerPixelBaselinePlusHead(PerPixelBaselineHead):
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
deep_supervision: bool,
# inherit parameters
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
deep_supervision: whether or not to add supervision to the output of
every transformer decoder layer
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
"""
super().__init__(
input_shape,
num_classes=num_classes,
pixel_decoder=pixel_decoder,
loss_weight=loss_weight,
ignore_value=ignore_value,
)
del self.predictor
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.deep_supervision = deep_supervision
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = super().from_config(cfg, input_shape)
ret["transformer_in_feature"] = cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
in_channels = input_shape[ret["transformer_in_feature"]].channels
ret["transformer_predictor"] = TransformerPredictor(
cfg, in_channels, mask_classification=False
)
ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
return ret
def forward(self, features, targets=None):
"""
Returns:
In training, returns (None, dict of losses)
In inference, returns (CxHxW logits, {})
"""
x, aux_outputs = self.layers(features)
if self.training:
if self.deep_supervision:
losses = self.losses(x, targets)
for i, aux_output in enumerate(aux_outputs):
losses["loss_sem_seg" + f"_{i}"] = self.losses(
aux_output["pred_masks"], targets
)["loss_sem_seg"]
return None, losses
else:
return None, self.losses(x, targets)
else:
x = F.interpolate(
x, scale_factor=self.common_stride, mode="bilinear", align_corners=False
)
return x, {}
def layers(self, features):
(
mask_features,
transformer_encoder_features,
) = self.pixel_decoder.forward_features(features)
if self.transformer_in_feature == "transformer_encoder":
assert (
transformer_encoder_features is not None
), "Please use the TransformerEncoderPixelDecoder."
predictions = self.predictor(transformer_encoder_features, mask_features)
else:
predictions = self.predictor(
features[self.transformer_in_feature], mask_features
)
if self.deep_supervision:
return predictions["pred_masks"], predictions["aux_outputs"]
else:
return predictions["pred_masks"], None
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/msdeformattn.py | mask2former/modeling/heads/msdeformattn.py | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer.position_encoding import PositionEmbeddingSine
from ..transformer.transformer import _get_clones, _get_activation_fn
from .ops.modules import MSDeformAttn
# MSDeformAttn Transformer encoder in deformable detr
class MSDeformAttnTransformerEncoderOnly(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, dim_feedforward=1024, dropout=0.1,
activation="relu",
num_feature_levels=4, enc_n_points=4,
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
encoder_layer = MSDeformAttnTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points)
self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
normal_(self.level_embed)
def get_valid_ratio(self, mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def forward(self, srcs, pos_embeds):
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in srcs]
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
return memory, spatial_shapes, level_start_index
class MSDeformAttnTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class MSDeformAttnTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
output = src
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
for _, layer in enumerate(self.layers):
output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
return output
@SEM_SEG_HEADS_REGISTRY.register()
class MSDeformAttnPixelDecoder(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
# deformable transformer encoder args
transformer_in_features: List[str],
common_stride: int,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_dropout: dropout probability in transformer
transformer_nheads: number of heads in transformer
transformer_dim_feedforward: dimension of feedforward network
transformer_enc_layers: number of transformer encoder layers
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
transformer_input_shape = {
k: v for k, v in input_shape.items() if k in transformer_in_features
}
# this is the input shape of pixel decoder
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
self.feature_strides = [v.stride for k, v in input_shape]
self.feature_channels = [v.channels for k, v in input_shape]
# this is the input shape of transformer encoder (could use less features than pixel decoder
transformer_input_shape = sorted(transformer_input_shape.items(), key=lambda x: x[1].stride)
self.transformer_in_features = [k for k, v in transformer_input_shape] # starting from "res2" to "res5"
transformer_in_channels = [v.channels for k, v in transformer_input_shape]
self.transformer_feature_strides = [v.stride for k, v in transformer_input_shape] # to decide extra FPN layers
self.transformer_num_feature_levels = len(self.transformer_in_features)
if self.transformer_num_feature_levels > 1:
input_proj_list = []
# from low resolution to high resolution (res5 -> res2)
for in_channels in transformer_in_channels[::-1]:
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
))
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(transformer_in_channels[-1], conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
)])
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
self.transformer = MSDeformAttnTransformerEncoderOnly(
d_model=conv_dim,
dropout=transformer_dropout,
nhead=transformer_nheads,
dim_feedforward=transformer_dim_feedforward,
num_encoder_layers=transformer_enc_layers,
num_feature_levels=self.transformer_num_feature_levels,
)
N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
self.mask_dim = mask_dim
# use 1x1 conv instead
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=1,
stride=1,
padding=0,
)
weight_init.c2_xavier_fill(self.mask_features)
self.maskformer_num_feature_levels = 3 # always use 3 scales
self.common_stride = common_stride
# extra fpn levels
stride = min(self.transformer_feature_strides)
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(self.feature_channels[:self.num_fpn_levels]):
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = {}
ret["input_shape"] = {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
}
ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM
ret["transformer_dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["transformer_nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
# ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret["transformer_dim_feedforward"] = 1024 # use 1024 for deformable transformer encoder
ret[
"transformer_enc_layers"
] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config
ret["transformer_in_features"] = cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES
ret["common_stride"] = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE
return ret
@autocast(enabled=False)
def forward_features(self, features):
srcs = []
pos = []
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.transformer_in_features[::-1]):
x = features[f].float() # deformable detr does not support half precision
srcs.append(self.input_proj[idx](x))
pos.append(self.pe_layer(x))
y, spatial_shapes, level_start_index = self.transformer(srcs, pos)
bs = y.shape[0]
split_size_or_sections = [None] * self.transformer_num_feature_levels
for i in range(self.transformer_num_feature_levels):
if i < self.transformer_num_feature_levels - 1:
split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i]
else:
split_size_or_sections[i] = y.shape[1] - level_start_index[i]
y = torch.split(y, split_size_or_sections, dim=1)
out = []
multi_scale_features = []
num_cur_levels = 0
for i, z in enumerate(y):
out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1]))
# append `out` with extra FPN levels
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[:self.num_fpn_levels][::-1]):
x = features[f].float()
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False)
y = output_conv(y)
out.append(y)
for o in out:
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(o)
num_cur_levels += 1
return self.mask_features(out[-1]), out[0], multi_scale_features
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/__init__.py | mask2former/modeling/heads/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/pixel_decoder.py | mask2former/modeling/heads/pixel_decoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer.position_encoding import PositionEmbeddingSine
from ..transformer.transformer import TransformerEncoder, TransformerEncoderLayer
from .msdeformattn import MSDeformAttnPixelDecoder
def build_pixel_decoder(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.
"""
name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME
model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model
@SEM_SEG_HEADS_REGISTRY.register()
class BasePixelDecoder(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
feature_channels = [v.channels for k, v in input_shape]
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(feature_channels):
if idx == len(self.in_features) - 1:
output_norm = get_norm(norm, conv_dim)
output_conv = Conv2d(
in_channels,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(output_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(None)
output_convs.append(output_conv)
else:
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels,
conv_dim,
kernel_size=1,
bias=use_bias,
norm=lateral_norm,
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.mask_dim = mask_dim
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=3,
stride=1,
padding=1,
)
weight_init.c2_xavier_fill(self.mask_features)
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = {}
ret["input_shape"] = {
k: v
for k, v in input_shape.items()
if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
}
ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM
return ret
def forward_features(self, features):
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[::-1]):
x = features[f]
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
if lateral_conv is None:
y = output_conv(x)
else:
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
return self.mask_features(y), None
def forward(self, features, targets=None):
logger = logging.getLogger(__name__)
logger.warning(
"Calling forward() may cause unpredicted behavior of PixelDecoder module."
)
return self.forward_features(features)
class TransformerEncoderOnly(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, num_encoder_layers, encoder_norm
)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
if mask is not None:
mask = mask.flatten(1)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
return memory.permute(1, 2, 0).view(bs, c, h, w)
@SEM_SEG_HEADS_REGISTRY.register()
class TransformerEncoderPixelDecoder(BasePixelDecoder):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
transformer_pre_norm: bool,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_dropout: dropout probability in transformer
transformer_nheads: number of heads in transformer
transformer_dim_feedforward: dimension of feedforward network
transformer_enc_layers: number of transformer encoder layers
transformer_pre_norm: whether to use pre-layernorm or not
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__(input_shape, conv_dim=conv_dim, mask_dim=mask_dim, norm=norm)
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
in_channels = feature_channels[len(self.in_features) - 1]
self.input_proj = Conv2d(in_channels, conv_dim, kernel_size=1)
weight_init.c2_xavier_fill(self.input_proj)
self.transformer = TransformerEncoderOnly(
d_model=conv_dim,
dropout=transformer_dropout,
nhead=transformer_nheads,
dim_feedforward=transformer_dim_feedforward,
num_encoder_layers=transformer_enc_layers,
normalize_before=transformer_pre_norm,
)
N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
# update layer
use_bias = norm == ""
output_norm = get_norm(norm, conv_dim)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(output_conv)
delattr(self, "layer_{}".format(len(self.in_features)))
self.add_module("layer_{}".format(len(self.in_features)), output_conv)
self.output_convs[0] = output_conv
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = super().from_config(cfg, input_shape)
ret["transformer_dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["transformer_nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret[
"transformer_enc_layers"
] = cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS # a separate config
ret["transformer_pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
return ret
def forward_features(self, features):
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[::-1]):
x = features[f]
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
if lateral_conv is None:
transformer = self.input_proj(x)
pos = self.pe_layer(x)
transformer = self.transformer(transformer, None, pos)
y = output_conv(transformer)
# save intermediate feature as input to Transformer decoder
transformer_encoder_features = transformer
else:
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
return self.mask_features(y), transformer_encoder_features
def forward(self, features, targets=None):
logger = logging.getLogger(__name__)
logger.warning(
"Calling forward() may cause unpredicted behavior of PixelDecoder module."
)
return self.forward_features(features)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/mask_former_head.py | mask2former/modeling/heads/mask_former_head.py | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer.transformer_predictor import TransformerPredictor
from .pixel_decoder import build_pixel_decoder
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
# logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
return {
"input_shape": {
k: v
for k, v in input_shape.items()
if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
"transformer_predictor": TransformerPredictor(
cfg,
cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder"
else input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels,
mask_classification=True,
),
}
def forward(self, features):
return self.layers(features)
def layers(self, features):
(
mask_features,
transformer_encoder_features,
) = self.pixel_decoder.forward_features(features)
if self.transformer_in_feature == "transformer_encoder":
assert (
transformer_encoder_features is not None
), "Please use the TransformerEncoderPixelDecoder."
predictions = self.predictor(transformer_encoder_features, mask_features)
else:
predictions = self.predictor(
features[self.transformer_in_feature], mask_features
)
return predictions
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/ops/setup.py | mask2former/modeling/heads/ops/setup.py | # ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
import os
import glob
import torch
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
from setuptools import find_packages
from setuptools import setup
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "src")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
# Force cuda since torch ask for a device, not if cuda is in fact available.
if (os.environ.get('FORCE_CUDA') or torch.cuda.is_available()) and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
if CUDA_HOME is None:
raise NotImplementedError('CUDA_HOME is None. Please set environment variable CUDA_HOME.')
else:
raise NotImplementedError('No CUDA runtime is found. Please set FORCE_CUDA=1 or test it by running torch.cuda.is_available().')
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"MultiScaleDeformableAttention",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="MultiScaleDeformableAttention",
version="1.0",
author="Weijie Su",
url="https://github.com/fundamentalvision/Deformable-DETR",
description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention",
packages=find_packages(exclude=("configs", "tests",)),
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/ops/test.py | mask2former/modeling/heads/ops/test.py | # ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import gradcheck
from functions.ms_deform_attn_func import MSDeformAttnFunction, ms_deform_attn_core_pytorch
N, M, D = 1, 2, 2
Lq, L, P = 2, 2, 2
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros((1, )), shapes.prod(1).cumsum(0)[:-1]))
S = sum([(H*W).item() for H, W in shapes])
torch.manual_seed(3)
@torch.no_grad()
def check_forward_equal_with_pytorch_double():
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
@torch.no_grad()
def check_forward_equal_with_pytorch_float():
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):
value = torch.rand(N, S, M, channels).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
im2col_step = 2
func = MSDeformAttnFunction.apply
value.requires_grad = grad_value
sampling_locations.requires_grad = grad_sampling_loc
attention_weights.requires_grad = grad_attn_weight
gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
print(f'* {gradok} check_gradient_numerical(D={channels})')
if __name__ == '__main__':
check_forward_equal_with_pytorch_double()
check_forward_equal_with_pytorch_float()
for channels in [30, 32, 64, 71, 1025, 2048, 3096]:
check_gradient_numerical(channels, True, True, True)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/ops/modules/ms_deform_attn.py | mask2former/modeling/heads/ops/modules/ms_deform_attn.py | # ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
from ..functions import MSDeformAttnFunction
from ..functions.ms_deform_attn_func import ms_deform_attn_core_pytorch
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
return (n & (n-1) == 0) and n != 0
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation.")
self.im2col_step = 128
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
try:
output = MSDeformAttnFunction.apply(
value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
except:
# CPU
output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
# # For FLOPs calculation only
# output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/ops/modules/__init__.py | mask2former/modeling/heads/ops/modules/__init__.py | # ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from .ms_deform_attn import MSDeformAttn
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/ops/functions/ms_deform_attn_func.py | mask2former/modeling/heads/ops/functions/ms_deform_attn_func.py | # ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
try:
import MultiScaleDeformableAttention as MSDA
except ModuleNotFoundError as e:
info_string = (
"\n\nPlease compile MultiScaleDeformableAttention CUDA op with the following commands:\n"
"\t`cd mask2former/modeling/pixel_decoder/ops`\n"
"\t`sh make.sh`\n"
)
raise ModuleNotFoundError(info_string)
class MSDeformAttnFunction(Function):
@staticmethod
def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
ctx.im2col_step = im2col_step
output = MSDA.ms_deform_attn_forward(
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step)
ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
grad_value, grad_sampling_loc, grad_attn_weight = \
MSDA.ms_deform_attn_backward(
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
# for debug and test only,
# need to use cuda version instead
N_, S_, M_, D_ = value.shape
_, Lq_, M_, L_, P_, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for lid_, (H_, W_) in enumerate(value_spatial_shapes):
# N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
# N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
# N_*M_, D_, Lq_, P_
sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
mode='bilinear', padding_mode='zeros', align_corners=False)
sampling_value_list.append(sampling_value_l_)
# (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
return output.transpose(1, 2).contiguous()
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/heads/ops/functions/__init__.py | mask2former/modeling/heads/ops/functions/__init__.py | # ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from .ms_deform_attn_func import MSDeformAttnFunction
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/transformer/transformer_predictor.py | mask2former/modeling/transformer/transformer_predictor.py | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from .position_encoding import PositionEmbeddingSine
from .transformer import Transformer
class TransformerPredictor(nn.Module):
@configurable
def __init__(
self,
in_channels,
mask_classification=True,
*,
num_classes: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dropout: float,
dim_feedforward: int,
enc_layers: int,
dec_layers: int,
pre_norm: bool,
deep_supervision: bool,
mask_dim: int,
enforce_input_project: bool,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dropout: dropout in Transformer
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
deep_supervision: whether to add supervision to every decoder layers
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
"""
super().__init__()
self.mask_classification = mask_classification
# positional encoding
N_steps = hidden_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
transformer = Transformer(
d_model=hidden_dim,
dropout=dropout,
nhead=nheads,
dim_feedforward=dim_feedforward,
num_encoder_layers=enc_layers,
num_decoder_layers=dec_layers,
normalize_before=pre_norm,
return_intermediate_dec=deep_supervision,
)
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.query_embed = nn.Embedding(num_queries, hidden_dim)
if in_channels != hidden_dim or enforce_input_project:
self.input_proj = Conv2d(in_channels, hidden_dim, kernel_size=1)
weight_init.c2_xavier_fill(self.input_proj)
else:
self.input_proj = nn.Sequential()
self.aux_loss = deep_supervision
# output FFNs
if self.mask_classification:
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
@classmethod
def from_config(cls, cfg, in_channels, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["mask_classification"] = mask_classification
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret["enc_layers"] = cfg.MODEL.MASK_FORMER.ENC_LAYERS
ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS
ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
return ret
def forward(self, x, mask_features):
pos = self.pe_layer(x)
src = x
mask = None
hs, memory = self.transformer(
self.input_proj(src), mask, self.query_embed.weight, pos
)
if self.mask_classification:
outputs_class = self.class_embed(hs)
out = {"pred_logits": outputs_class[-1]}
else:
out = {}
if self.aux_loss:
# [l, bs, queries, embed]
mask_embed = self.mask_embed(hs)
outputs_seg_masks = torch.einsum(
"lbqc,bchw->lbqhw", mask_embed, mask_features
)
out["pred_masks"] = outputs_seg_masks[-1]
out["aux_outputs"] = self._set_aux_loss(
outputs_class if self.mask_classification else None, outputs_seg_masks
)
else:
# FIXME h_boxes takes the last one computed, keep this in mind
# [bs, queries, embed]
mask_embed = self.mask_embed(hs[-1])
outputs_seg_masks = torch.einsum(
"bqc,bchw->bqhw", mask_embed, mask_features
)
out["pred_masks"] = outputs_seg_masks
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_seg_masks):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
if self.mask_classification:
return [
{"pred_logits": a, "pred_masks": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
else:
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/transformer/__init__.py | mask2former/modeling/transformer/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/transformer/position_encoding.py | mask2former/modeling/transformer/position_encoding.py | # Copyright (c) Facebook, Inc. and its affiliates.
# # Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(
self, num_pos_feats=64, temperature=10000, normalize=False, scale=None
):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x, mask=None):
if mask is None:
mask = torch.zeros(
(x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool
)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (torch.div(dim_t, 2, rounding_mode="floor")) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/transformer/mask2former_transformer_predictor.py | mask2former/modeling/transformer/mask2former_transformer_predictor.py | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
import logging
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d
from .position_encoding import PositionEmbeddingSine
class SelfAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
return self.forward_post(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
class CrossAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
class FFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt):
tgt2 = self.norm(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt):
if self.normalize_before:
return self.forward_pre(tgt)
return self.forward_post(tgt)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
# @TRANSFORMER_DECODER_REGISTRY.register()
class MultiScaleMaskedTransformerDecoder(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "static_query" in k:
newk = k.replace("static_query", "query_feat")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
in_channels,
mask_classification=True,
*,
num_classes: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dropout: float,
dim_feedforward: int,
enc_layers: int,
dec_layers: int,
pre_norm: bool,
deep_supervision: bool,
mask_dim: int,
enforce_input_project: bool,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
"""
super().__init__()
# assert mask_classification, "Only support mask classification model"
self.mask_classification = mask_classification
# positional encoding
N_steps = hidden_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
# define Transformer decoder here
self.num_heads = nheads
self.num_layers = dec_layers
self.transformer_self_attention_layers = nn.ModuleList()
self.transformer_cross_attention_layers = nn.ModuleList()
self.transformer_ffn_layers = nn.ModuleList()
for _ in range(self.num_layers):
self.transformer_self_attention_layers.append(
SelfAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_cross_attention_layers.append(
CrossAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_ffn_layers.append(
FFNLayer(
d_model=hidden_dim,
dim_feedforward=dim_feedforward,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.decoder_norm = nn.LayerNorm(hidden_dim)
self.num_queries = num_queries
# learnable query features
self.query_feat = nn.Embedding(num_queries, hidden_dim)
# learnable query p.e.
self.query_embed = nn.Embedding(num_queries, hidden_dim)
# level embedding (we always use 3 scales)
self.num_feature_levels = 3
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
self.input_proj = nn.ModuleList()
for _ in range(self.num_feature_levels):
if in_channels != hidden_dim or enforce_input_project:
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
weight_init.c2_xavier_fill(self.input_proj[-1])
else:
self.input_proj.append(nn.Sequential())
# output FFNs
if self.mask_classification:
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
@classmethod
def from_config(cls, cfg, in_channels, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["mask_classification"] = mask_classification
ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
# NOTE: because we add learnable query features which requires supervision,
# we add minus 1 to decoder layers to be consistent with our loss
# implementation: that is, number of auxiliary losses is always
# equal to number of decoder layers. With learnable query features, the number of
# auxiliary losses equals number of decoders plus 1.
assert cfg.MODEL.MASK_FORMER.DEC_LAYERS >= 1
ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS - 1
ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
return ret
def forward(self, x, mask_features, mask = None):
# x is a list of multi-scale feature
assert len(x) == self.num_feature_levels
src = []
pos = []
size_list = []
# disable mask, it does not affect performance
del mask
for i in range(self.num_feature_levels):
size_list.append(x[i].shape[-2:])
pos.append(self.pe_layer(x[i], None).flatten(2))
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
# flatten NxCxHxW to HWxNxC
pos[-1] = pos[-1].permute(2, 0, 1)
src[-1] = src[-1].permute(2, 0, 1)
_, bs, _ = src[0].shape
# QxNxC
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
output = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
predictions_class = []
predictions_mask = []
# prediction heads on learnable query features
outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0])
predictions_class.append(outputs_class)
predictions_mask.append(outputs_mask)
for i in range(self.num_layers):
level_index = i % self.num_feature_levels
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
# attention: cross-attention first
output = self.transformer_cross_attention_layers[i](
output, src[level_index],
memory_mask=attn_mask,
memory_key_padding_mask=None, # here we do not apply masking on padded region
pos=pos[level_index], query_pos=query_embed
)
output = self.transformer_self_attention_layers[i](
output, tgt_mask=None,
tgt_key_padding_mask=None,
query_pos=query_embed
)
# FFN
output = self.transformer_ffn_layers[i](
output
)
outputs_class, outputs_mask, attn_mask = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels])
predictions_class.append(outputs_class)
predictions_mask.append(outputs_mask)
assert len(predictions_class) == self.num_layers + 1
out = {
'pred_logits': predictions_class[-1],
'pred_masks': predictions_mask[-1],
'aux_outputs': self._set_aux_loss(
predictions_class if self.mask_classification else None, predictions_mask
)
}
return out
def forward_prediction_heads(self, output, mask_features, attn_mask_target_size):
decoder_output = self.decoder_norm(output)
decoder_output = decoder_output.transpose(0, 1)
outputs_class = self.class_embed(decoder_output)
mask_embed = self.mask_embed(decoder_output)
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
# NOTE: prediction is of higher-resolution
# [B, Q, H, W] -> [B, Q, H*W] -> [B, h, Q, H*W] -> [B*h, Q, HW]
attn_mask = F.interpolate(outputs_mask, size=attn_mask_target_size, mode="bilinear", align_corners=False)
# must use bool type
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
attn_mask = (attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool()
attn_mask = attn_mask.detach()
return outputs_class, outputs_mask, attn_mask
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_seg_masks):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
if self.mask_classification:
return [
{"pred_logits": a, "pred_masks": b}
for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])
]
else:
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/transformer/transformer.py | mask2former/modeling/transformer/transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/transformer.py
"""
Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import List, Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
class Transformer(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(
encoder_layer, num_encoder_layers, encoder_norm
)
decoder_layer = TransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
if mask is not None:
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
hs = self.decoder(
tgt,
memory,
memory_key_padding_mask=mask,
pos=pos_embed,
query_pos=query_embed,
)
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
output = src
for layer in self.layers:
output = layer(
output,
src_mask=mask,
src_key_padding_mask=src_key_padding_mask,
pos=pos,
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(
q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(
q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
return self.forward_post(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/transformer/open_transformer_predictor.py | mask2former/modeling/transformer/open_transformer_predictor.py | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
from torch import nn
from detectron2.config import configurable
from .transformer_predictor import TransformerPredictor, MLP
from .mask2former_transformer_predictor import MultiScaleMaskedTransformerDecoder
class OpenTransformerPredictor(MultiScaleMaskedTransformerDecoder):
@configurable
def __init__(
self,
in_channels,
mask_classification=True,
*,
embedding_dim: int,
embed_hidden_dim: int,
embed_layers: int,
hidden_dim: int,
num_queries: int,
nheads: int,
dropout: float,
dim_feedforward: int,
enc_layers: int,
dec_layers: int,
pre_norm: bool,
deep_supervision: bool,
mask_dim: int,
enforce_input_project: bool,
):
super().__init__(
in_channels,
False,
num_classes=embedding_dim,
hidden_dim=hidden_dim,
num_queries=num_queries,
nheads=nheads,
dropout=dropout,
dim_feedforward=dim_feedforward,
enc_layers=enc_layers,
dec_layers=dec_layers,
pre_norm=pre_norm,
deep_supervision=deep_supervision,
mask_dim=mask_dim,
enforce_input_project=enforce_input_project,
)
self.mask_classification = mask_classification
# output FFNs
if self.mask_classification:
self.class_embed = MLP(
hidden_dim, embed_hidden_dim, embedding_dim, embed_layers
)
def freeze_pretrained(self):
for name, module in self.named_children():
if name not in ["class_embed"]:
for param in module.parameters():
param.requires_grad = False
@classmethod
def from_config(cls, cfg, in_channels, mask_classification):
ret = {}
ret["in_channels"] = in_channels
ret["mask_classification"] = mask_classification
ret["embedding_dim"] = cfg.MODEL.SEM_SEG_HEAD.EMBEDDING_DIM
ret["embed_hidden_dim"] = cfg.MODEL.SEM_SEG_HEAD.EMBED_HIDDEN_DIM
ret["embed_layers"] = cfg.MODEL.SEM_SEG_HEAD.EMBED_LAYERS
ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
# Transformer parameters:
ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
ret["dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret["enc_layers"] = cfg.MODEL.MASK_FORMER.ENC_LAYERS
ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS
ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
return ret
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/clip_adapter/__init__.py | mask2former/modeling/clip_adapter/__init__.py | import logging
import torch
from detectron2.utils.logger import log_first_n
from .text_prompt import (
PredefinedPromptExtractor,
LearnablePromptExtractor,
)
from .adapter import ClipAdapter, MaskFormerClipAdapter, PerPixelClipAdapter
def build_prompt_learner(cfg, task_names):
if cfg.PROMPT_LEARNER == "predefined":
prompt_learner = PredefinedPromptExtractor(cfg.PREDEFINED_PROMPT_TEMPLATES)
elif cfg.PROMPT_LEARNER == "learnable":
prompt_learner = LearnablePromptExtractor(
prompt_dim=cfg.PROMPT_DIM,
prompt_shape=cfg.PROMPT_SHAPE,
task_prompt_shape=cfg.TASK_PROMPT_SHAPE,
task_names=task_names,
)
if cfg.PROMPT_CHECKPOINT != "":
checkpoint = torch.load(cfg.PROMPT_CHECKPOINT, map_location="cpu")["model"]
missing, unexpected = prompt_learner.load_state_dict(
{
".".join(k.split(".")[2:]): v
for k, v in checkpoint.items()
if "prompt_learner" in k
},
strict=False,
)
for param in prompt_learner.parameters():
param.requires_grad = False
prompt_learner.with_trainable_params = False
log_first_n(
logging.INFO,
"Load Prompt Learner from {}".format(cfg.PROMPT_CHECKPOINT),
1,
)
log_first_n(logging.WARN, "Missing {}".format(missing), 1)
log_first_n(logging.WARN, "Unexpected {}".format(unexpected), 1)
else:
trainable_params = [
k
for k, v in prompt_learner.named_parameters()
if v.requires_grad == True
]
log_first_n(
logging.INFO,
"Prompt Learner training params: {}".format(trainable_params),
1,
)
else:
raise NotImplementedError(
"Prompt learner {} is not supported".format(cfg.PROMPT_LEARNER)
)
return prompt_learner
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/clip_adapter/text_prompt.py | mask2former/modeling/clip_adapter/text_prompt.py | from typing import List, Tuple
import clip
import torch
from torch import nn
from .clip import CLIP
class PromptExtractor(nn.Module):
def __init__(self):
super().__init__()
self._buffer_init = False
self.with_trainable_params = False
def init_buffer(self, clip_model):
self._buffer_init = True
def forward(self, noun_list: List[str], clip_model: nn.Module):
raise NotImplementedError()
class PredefinedPromptExtractor(PromptExtractor):
def __init__(self, templates: List[str]):
super().__init__()
self.templates = templates
def forward(self, noun_list: List[str], clip_model: nn.Module):
text_features_bucket = []
for template in self.templates:
noun_tokens = [clip.tokenize(template.format(noun)) for noun in noun_list]
target_device = clip_model.text_projection.data.device if torch.is_tensor(clip_model.text_projection) else clip_model.text_projection.weight.device
text_inputs = torch.cat(noun_tokens).to(target_device)
text_features = clip_model.encode_text(text_inputs)
text_features /= text_features.norm(dim=-1, keepdim=True)
text_features_bucket.append(text_features)
del text_inputs
# ensemble by averaging
text_features = torch.stack(text_features_bucket).mean(dim=0)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
return text_features
class LearnablePromptExtractor(PromptExtractor):
def __init__(self, prompt_dim: int, prompt_shape: Tuple[int, int], task_prompt_shape: int, task_names: List[str]):
super().__init__()
assert len(prompt_shape) == 2, "prompt_shape must be a tuple of length 2"
self.prompt_dim = prompt_dim
self.prompt_shape = prompt_shape
self.prefix_prompt = self._init_prompt(self.n_prefix)
self.suffix_prompt = self._init_prompt(self.n_suffix)
self.task_names = task_names
self.task_embeddings = {}
if len(task_names) > 0:
self.task_prompt = self._init_task_prompt(task_prompt_shape) # length of task prompt
else:
self.task_prompt = None
self._buffer_init = False
self.with_trainable_params = True
def _init_prompt(self, length):
if length == 0:
return None
prompt_tensor = torch.empty(length, self.prompt_dim)
nn.init.normal_(prompt_tensor, std=0.02)
return nn.Parameter(prompt_tensor)
def _init_task_prompt(self, length):
if length == 0:
return None
prompt_tensor = torch.empty(length, self.prompt_dim)
nn.init.kaiming_normal_(prompt_tensor, a=2)
return nn.Parameter(prompt_tensor)
def init_task_prompt(self, clip_model):
task_names = [task for task in self.task_names]
with torch.no_grad():
tokens, name_lengths = clip.tokenize(task_names, return_length=True)
name_lengths = [
n - 2 for n in name_lengths
]
text_embeddings = clip_model.token_embedding(
tokens.to(self.device)
).type(clip_model.dtype)
text_embeddings = [
embedding[1 : 1 + length]
for embedding, length in zip(text_embeddings, name_lengths)
]
self.task_embeddings.update(
{
name: embedding
for name, embedding in zip(self.task_names, text_embeddings)
}
)
def init_buffer(self, clip_model):
sentence = "X."
prompt = clip.tokenize(sentence)
with torch.no_grad():
embedding = clip_model.token_embedding(prompt).type(
clip_model.dtype
) # 2,77,512
self.register_buffer("start_signal", embedding[0, :1, :]) # 1,512
self.register_buffer("dot_signal", embedding[0, 2:3, :]) # 1,512
self.register_buffer("end_signal", embedding[0, 3:4, :]) # 1,512
self.register_buffer("pad_signal", embedding[0, 4:5, :]) # 1,512
self.noun_bucket = {}
self._buffer_init = True
def forward(self, noun_list: List[str], clip_model: nn.Module, task_name: str):
if not self._buffer_init:
raise RuntimeError(
f"Buffer of {self.__class__.__name__} is not initialized"
)
self._update_noun_features(noun_list, clip_model)
if task_name is not None:
task_embed = self.task_embeddings[task_name].to(self.prefix_prompt.device)
task_prompt = [task_embed]
if self.task_prompt is not None:
task_prompt.append(self.task_prompt)
task_prompt = torch.cat(task_prompt)
else:
task_prompt = torch.Tensor([]).to(self.start_signal.device)
self.task_prompt = None
prefix = [self.start_signal]
if self.prefix_prompt is not None:
prefix.append(self.prefix_prompt)
prefix = torch.cat(prefix)
suffix = [self.dot_signal, self.end_signal]
if self.suffix_prompt is not None:
suffix.insert(0, self.suffix_prompt)
suffix = torch.cat(suffix)
# only process those which are not in bucket
lengths = [
len(task_prompt) + len(prefix) + len(suffix) + len(self.noun_bucket[noun])
for noun in noun_list
]
embeddings = torch.stack(
[
torch.cat(
[task_prompt, prefix, self.noun_bucket[noun], suffix]
+ [self.pad_signal.expand(77 - length, -1)]
)
for noun, length in zip(noun_list, lengths)
]
) # cls,77,512
indices = torch.Tensor(lengths).long().to(embeddings.device) - 1
text_features = self.get_text_feature(embeddings, indices, clip_model)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
return text_features
def _update_noun_features(self, noun_list, clip_model):
left_class_names = [noun for noun in noun_list if noun not in self.noun_bucket]
if len(left_class_names) > 0:
with torch.no_grad():
tokens, name_lengths = clip.tokenize(
left_class_names, return_length=True
)
name_lengths = [
n - 2 for n in name_lengths
]
text_embeddings = clip_model.token_embedding(
tokens.to(self.device)
).type(clip_model.dtype)
text_embeddings = [
embedding[1 : 1 + length]
for embedding, length in zip(text_embeddings, name_lengths)
]
self.noun_bucket.update(
{
name: embedding
for name, embedding in zip(left_class_names, text_embeddings)
}
)
@staticmethod
def get_text_feature(x, indices, clip_model):
x = x + clip_model.positional_embedding.type(clip_model.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = clip_model.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = clip_model.ln_final(x).type(clip_model.dtype)
# take features from the eot embedding (eot_token is the highest number in each sequence)
if not torch.is_tensor(clip_model.text_projection):
x = clip_model.text_projection(x[torch.arange(x.shape[0]), indices])
else:
x = x[torch.arange(x.shape[0]), indices] @ clip_model.text_projection
return x
@property
def n_prefix(self):
return self.prompt_shape[0]
@property
def n_suffix(self):
return self.prompt_shape[1]
@property
def device(self):
return self.start_signal.device
def extra_repr(self) -> str:
r"""Set the extra representation of the module
To print customized extra information, you should re-implement
this method in your own modules. Both single-line and multi-line
strings are acceptable.
"""
repr = f"prefix_prompt:{self.n_prefix},suffix_prompt:{self.n_suffix},dimension:{self.prompt_dim}\n"
repr = repr + "[Normal_Init(mu=0,std=0.02)]"
return repr
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/clip_adapter/adapter.py | mask2former/modeling/clip_adapter/adapter.py | from typing import List
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.structures import BitMasks
from .clip import build_clip_model, crop_with_mask, CLIP
from .text_prompt import PromptExtractor
class ClipAdapter(nn.Module):
def __init__(self, clip_model_name: str, prompt_learner: PromptExtractor):
super().__init__()
self.clip_model = build_clip_model(clip_model_name)
self.prompt_learner = prompt_learner
self.prompt_learner.init_buffer(self.clip_model)
self.text_feature_buffer = {}
self.prompt_learner.init_task_prompt(self.clip_model)
def forward(self, image: torch.Tensor, text: List[str], task_name: str, **kwargs):
image = self._preprocess_image(image, **kwargs)
text_feature = self.get_text_features(text, task_name) # k,feat_dim
image_features = self.get_image_features(image)
return self.get_sim_logits(text_feature, image_features)
def _preprocess_image(self, image: torch.Tensor):
return image
def _get_text_features(self, noun_list: List[str], task_name: str):
if not self.prompt_learner.with_trainable_params:
left_noun_list = [
noun for noun in noun_list if noun not in self.text_feature_buffer
]
if len(left_noun_list) > 0:
left_text_features = self.prompt_learner(
left_noun_list, self.clip_model, task_name
)
self.text_feature_buffer.update(
{
noun: text_feature
for noun, text_feature in zip(
left_noun_list, left_text_features
)
}
)
return torch.stack([self.text_feature_buffer[noun] for noun in noun_list])
else:
text_features = self.prompt_learner(noun_list, self.clip_model, task_name)
self.text_feature_buffer.update(
{
noun: text_feature.detach()
for noun, text_feature in zip(noun_list, text_features)
}
)
return text_features
def get_text_features(self, noun_list: List[str], task_name: str):
return self._get_text_features(noun_list, task_name)
def get_image_features(self, image: torch.Tensor):
image_features = self.clip_model.visual(image)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
return image_features
def get_sim_logits(
self,
text_features: torch.Tensor,
image_features: torch.Tensor,
temperature: float = 100,
):
return temperature * image_features.matmul(text_features.transpose(-1,-2))
def normalize_feature(self, feat: torch.Tensor):
return feat / feat.norm(dim=-1, keepdim=True)
class MaskFormerClipAdapter(ClipAdapter):
def __init__(
self,
clip_model_name: str,
prompt_learner: PromptExtractor,
mask_fill: str = "mean",
mask_expand_ratio: float = 1.0,
mask_thr: float = 0.5,
mask_matting: bool = False,
region_resized: bool = True,
):
super().__init__(clip_model_name, prompt_learner)
if torch.is_tensor(self.clip_model.text_projection):
text_embedding_shape = self.clip_model.text_projection.shape[-1]
else:
text_embedding_shape = self.clip_model.text_projection.weight.shape[0]
self.non_object_embedding = nn.Parameter(torch.empty(1, text_embedding_shape))
nn.init.normal_(
self.non_object_embedding.data,
std=self.clip_model.transformer.width ** -0.5,
)
self.prompt_learner.init_task_prompt(self.clip_model)
# for test
self.mask_fill = mask_fill
if self.mask_fill == "zero":
self.mask_fill = (0.0, 0.0, 0.0)
elif self.mask_fill == "mean":
self.mask_fill = [255.0 * c for c in CLIP.PIXEL_MEAN]
else:
raise NotImplementedError(
"Unknown mask_fill method: {}".format(self.mask_fill)
)
self.mask_expand_ratio = mask_expand_ratio
self.mask_thr = mask_thr
self.mask_matting = mask_matting
self.region_resized = region_resized
self.register_buffer(
"pixel_mean", torch.Tensor(CLIP.PIXEL_MEAN).reshape(1, 3, 1, 1) * 255.0
)
self.register_buffer(
"pixel_std", torch.Tensor(CLIP.PIXEL_STD).reshape(1, 3, 1, 1) * 255.0
)
def forward(
self,
image: torch.Tensor,
text: List[str],
task_name: str,
mask: torch.Tensor,
normalize: bool = True,
):
image, valid_flag = self._preprocess_image(image, mask, normalize=normalize)
if image is None:
return None, valid_flag
if isinstance(image, list):
image_features = torch.cat(
[self.get_image_features(image_i) for image_i in image], dim=0
)
else:
image_features = self.get_image_features(image)
text_feature = self.get_text_features(text, task_name) # k,feat_dim
return self.get_sim_logits(text_feature, image_features), valid_flag
def _preprocess_image(
self, image: torch.Tensor, mask: torch.Tensor, normalize: bool = True
):
"""crop, mask and normalize the image
Args:
image ([type]): [C,H,W]
mask ([type]): [K,H,W
normalize (bool, optional): [description]. Defaults to True.
"""
dtype = mask.dtype
bin_mask = mask > self.mask_thr
valid = bin_mask.sum(dim=(-1, -2)) > 0
bin_mask = bin_mask[valid]
mask = mask[valid]
if not self.mask_matting:
mask = bin_mask
bin_mask = BitMasks(bin_mask)
bboxes = bin_mask.get_bounding_boxes()
# crop,mask
regions = [
crop_with_mask(
image.type(dtype),
single_mask.type(dtype),
bbox,
fill=self.mask_fill,
expand_ratio=self.mask_expand_ratio,
)[None, ...]
for bbox, single_mask in zip(bboxes, mask)
]
if len(regions) == 0:
return None, valid
if normalize:
regions = [(r - self.pixel_mean) / self.pixel_std for r in regions]
# resize
if self.region_resized:
regions = [
F.interpolate(r, size=(224, 224), mode="bicubic", align_corners=True) for r in regions
]
regions = torch.cat(regions)
return regions, valid
def get_text_features(self, noun_list: List[str], task_name: str):
object_text_features = self._get_text_features(noun_list, task_name)
non_object_text_features = (
self.non_object_embedding
/ self.non_object_embedding.norm(dim=-1, keepdim=True)
)
return torch.cat([object_text_features, non_object_text_features], dim=0)
class PerPixelClipAdapter(ClipAdapter):
def __init__(self, *args, **kwargs):
super(PerPixelClipAdapter, self).__init__(*args, **kwargs)
self.register_buffer(
"pixel_mean", torch.Tensor(CLIP.PIXEL_MEAN).reshape(1, 3, 1, 1) * 255.0
)
self.register_buffer(
"pixel_std", torch.Tensor(CLIP.PIXEL_STD).reshape(1, 3, 1, 1) * 255.0
)
def _preprocess_image(self, image: torch.Tensor):
return (image.to(self.pixel_mean.device) - self.pixel_mean) / self.pixel_std
def get_image_features(self, image: torch.Tensor, per_pixel: bool = False):
if per_pixel:
image_features = self.clip_model.visual(image, return_cls=False) # b,h,w,c
else:
image_features = self.clip_model.visual(image)[:, None, None, :].expand(
image.shape[0], 2, 2, -1
) # b,c
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
return image_features
def forward(
self, image: torch.Tensor, text: List[str], task_name: str, per_pixel: bool = True, **kwargs
):
image = self._preprocess_image(image, **kwargs)
text_feature = self.get_text_features(text, task_name) # k,feat_dim
image_features = self.get_image_features(image)
return self.get_sim_logits(text_feature, image_features)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/clip_adapter/clip.py | mask2former/modeling/clip_adapter/clip.py | from typing import Tuple
import numpy as np
import torch
import timm
import clip
from detectron2.utils.comm import get_local_rank, synchronize
class CLIP:
PIXEL_MEAN = (0.48145466, 0.4578275, 0.40821073)
PIXEL_STD = (0.26862954, 0.26130258, 0.27577711)
IMAGENET_PROMPT = [
"a bad photo of a {}.",
"a photo of many {}.",
"a sculpture of a {}.",
"a photo of the hard to see {}.",
"a low resolution photo of the {}.",
"a rendering of a {}.",
"graffiti of a {}.",
"a bad photo of the {}.",
"a cropped photo of the {}.",
"a tattoo of a {}.",
"the embroidered {}.",
"a photo of a hard to see {}.",
"a bright photo of a {}.",
"a photo of a clean {}.",
"a photo of a dirty {}.",
"a dark photo of the {}.",
"a drawing of a {}.",
"a photo of my {}.",
"the plastic {}.",
"a photo of the cool {}.",
"a close-up photo of a {}.",
"a black and white photo of the {}.",
"a painting of the {}.",
"a painting of a {}.",
"a pixelated photo of the {}.",
"a sculpture of the {}.",
"a bright photo of the {}.",
"a cropped photo of a {}.",
"a plastic {}.",
"a photo of the dirty {}.",
"a jpeg corrupted photo of a {}.",
"a blurry photo of the {}.",
"a photo of the {}.",
"a good photo of the {}.",
"a rendering of the {}.",
"a {} in a video game.",
"a photo of one {}.",
"a doodle of a {}.",
"a close-up photo of the {}.",
"a photo of a {}.",
"the origami {}.",
"the {} in a video game.",
"a sketch of a {}.",
"a doodle of the {}.",
"a origami {}.",
"a low resolution photo of a {}.",
"the toy {}.",
"a rendition of the {}.",
"a photo of the clean {}.",
"a photo of a large {}.",
"a rendition of a {}.",
"a photo of a nice {}.",
"a photo of a weird {}.",
"a blurry photo of a {}.",
"a cartoon {}.",
"art of a {}.",
"a sketch of the {}.",
"a embroidered {}.",
"a pixelated photo of a {}.",
"itap of the {}.",
"a jpeg corrupted photo of the {}.",
"a good photo of a {}.",
"a plushie {}.",
"a photo of the nice {}.",
"a photo of the small {}.",
"a photo of the weird {}.",
"the cartoon {}.",
"art of the {}.",
"a drawing of the {}.",
"a photo of the large {}.",
"a black and white photo of a {}.",
"the plushie {}.",
"a dark photo of a {}.",
"itap of a {}.",
"graffiti of the {}.",
"a toy {}.",
"itap of my {}.",
"a photo of a cool {}.",
"a photo of a small {}.",
"a tattoo of the {}.",
]
VILD_PROMPT = [
"a photo of a {}.",
"This is a photo of a {}",
"There is a {} in the scene",
"There is the {} in the scene",
"a photo of a {} in the scene",
"a photo of a small {}.",
"a photo of a medium {}.",
"a photo of a large {}.",
"This is a photo of a small {}.",
"This is a photo of a medium {}.",
"This is a photo of a large {}.",
"There is a small {} in the scene.",
"There is a medium {} in the scene.",
"There is a large {} in the scene.",
]
def expand_box(
x1: float,
y1: float,
x2: float,
y2: float,
expand_ratio: float = 1.0,
max_h: int = None,
max_w: int = None,
):
cx = 0.5 * (x1 + x2)
cy = 0.5 * (y1 + y2)
w = x2 - x1
h = y2 - y1
w = w * expand_ratio
h = h * expand_ratio
box = [cx - 0.5 * w, cy - 0.5 * h, cx + 0.5 * w, cy + 0.5 * h]
if max_h is not None:
box[1] = max(0, box[1])
box[3] = min(max_h - 1, box[3])
if max_w is not None:
box[0] = max(0, box[0])
box[2] = min(max_w - 1, box[2])
return [int(b) for b in box]
def mask2box(mask: torch.Tensor):
# use naive way
row = torch.nonzero(mask.sum(dim=0))[:, 0]
if len(row) == 0:
return None
x1 = row.min()
x2 = row.max()
col = np.nonzero(mask.sum(dim=1))[:, 0]
y1 = col.min()
y2 = col.max()
return x1, y1, x2 + 1, y2 + 1
def crop_with_mask(
image: torch.Tensor,
mask: torch.Tensor,
bbox: torch.Tensor,
fill: Tuple[float, float, float] = (0, 0, 0),
expand_ratio: float = 1.0,
):
l, t, r, b = expand_box(*bbox, expand_ratio)
_, h, w = image.shape
l = max(l, 0)
t = max(t, 0)
r = min(r, w)
b = min(b, h)
new_image = torch.cat(
[image.new_full((1, b - t, r - l), fill_value=val) for val in fill]
)
return (
image[:, t:b, l:r] * mask[None, t:b, l:r]
+ (1 - mask[None, t:b, l:r]) * new_image
)
def build_clip_model(model_name: str, frozen: bool = True):
if model_name == "clip" or model_name == "ViT-B/16" or model_name == "ViT-L/14":
rank = get_local_rank()
if rank == 0:
# download on rank 0 only
model, _ = clip.load(model_name, device="cpu")
synchronize()
if rank != 0:
model, _ = clip.load(model_name, device="cpu")
synchronize()
if frozen:
for param in model.parameters():
param.requires_grad = False
return model
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/backbone/__init__.py | mask2former/modeling/backbone/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/backbone/clip_resnet.py | mask2former/modeling/backbone/clip_resnet.py | from collections import OrderedDict
from typing import Tuple, Union
from scipy import interpolate
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, 3, padding=1 * dilation, bias=False, dilation=dilation
)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False,
),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
)
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, width=64, strides=[2, 1, 2, 2, 2], multi_grid=[1, 1, 1]):
super().__init__()
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(strides[0]) if strides[0] > 1 else nn.Identity()
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0], stride=strides[1])
self.layer2 = self._make_layer(width * 2, layers[1], stride=strides[2])
self.layer3 = self._make_layer(width * 4, layers[2], stride=strides[3])
self.layer4 = self._make_layer(
width * 8, layers[3], stride=strides[4], dilations=multi_grid
)
self.num_features = [width * 4, width * 8, width * 16, width * 32]
def _make_layer(self, planes, blocks, stride=1, dilations=None):
if dilations is None:
dilations = [1] * blocks
layers = [Bottleneck(self._inplanes, planes, stride, dilation=dilations[0])]
self._inplanes = planes * Bottleneck.expansion
for i in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes, dilation=dilations[i]))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [
(self.conv1, self.bn1),
(self.conv2, self.bn2),
(self.conv3, self.bn3),
]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
output = {}
x = x.type(self.conv1.weight.dtype)
x = stem(x) # 1/4,1/4
x = self.layer1(x)
output["res2"] = x
x = self.layer2(x) # 1/8,1/8
output["res3"] = x
x = self.layer3(x) # 1/16,1/16
output["res4"] = x
x = self.layer4(x) # 1/32,1/32
output["res5"] = x
return output
@BACKBONE_REGISTRY.register()
class D2ModifiedResNet(ModifiedResNet, Backbone):
def __init__(self, cfg, input_shape):
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
strides = [2, 1, 2, 2, 2]
multi_grid = cfg.MODEL.RESNETS.RES5_MULTI_GRID
if cfg.MODEL.RESNETS.STEM_TYPE == "deeplab":
strides = [1, 1, 2, 2, 2]
super().__init__(
num_blocks_per_stage,
bottleneck_channels,
strides=strides,
multi_grid=multi_grid,
)
self._out_features = cfg.MODEL.RESNETS.OUT_FEATURES
self._out_feature_strides = {
"res2": 4,
"res3": 8,
"res4": 16,
"res5": 32,
}
self._out_feature_channels = {
"res2": self.num_features[0],
"res3": self.num_features[1],
"res4": self.num_features[2],
"res5": self.num_features[3],
}
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
outputs = {}
y = super().forward(x)
for k in y.keys():
if k in self._out_features:
outputs[k] = y[k]
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name],
stride=self._out_feature_strides[name],
)
for name in self._out_features
}
@property
def size_divisibility(self):
return 32
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/modeling/backbone/swin.py | mask2former/modeling/backbone/swin.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu, Yutong Lin, Yixuan Wei
# --------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation/blob/main/mmseg/models/backbones/swin_transformer.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
class Mlp(nn.Module):
"""Multilayer perceptron."""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = (
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(
B, H // window_size, W // window_size, window_size, window_size, -1
)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1,
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
1
).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
num_heads,
window_size=7,
shift_size=0,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
assert (
0 <= self.shift_size < self.window_size
), "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
self.H = None
self.W = None
def forward(self, x, mask_matrix):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
mask_matrix: Attention mask for cyclic shift.
"""
B, L, C = x.shape
H, W = self.H, self.W
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(
x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
)
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(
x_windows, mask=attn_mask
) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
)
else:
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(nn.Module):
"""Patch Merging Layer
Args:
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x, H, W):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
num_heads (int): Number of attention head.
window_size (int): Local window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
depth,
num_heads,
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.window_size = window_size
self.shift_size = window_size // 2
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i]
if isinstance(drop_path, list)
else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, H, W):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
attn_mask == 0, float(0.0)
)
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, attn_mask)
else:
x = blk(x, attn_mask)
if self.downsample is not None:
x_down = self.downsample(x, H, W)
Wh, Ww = (H + 1) // 2, (W + 1) // 2
return x, H, W, x_down, Wh, Ww
else:
return x, H, W, x, H, W
class PatchEmbed(nn.Module):
"""Image to Patch Embedding
Args:
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, H, W = x.size()
if W % self.patch_size[1] != 0:
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
if H % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
x = self.proj(x) # B C Wh Ww
if self.norm is not None:
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
return x
class SwinTransformer(nn.Module):
"""Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
pretrain_img_size=224,
patch_size=4,
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
norm_indices=None,
frozen_stages=-1,
use_checkpoint=False,
projection=False,
project_dim=256,
):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.out_indices = out_indices
self.norm_indices = norm_indices if norm_indices is not None else out_indices
self.frozen_stages = frozen_stages
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
# absolute position embedding
if self.ape:
pretrain_img_size = to_2tuple(pretrain_img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [
pretrain_img_size[0] // patch_size[0],
pretrain_img_size[1] // patch_size[1],
]
self.absolute_pos_embed = nn.Parameter(
torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])
)
trunc_normal_(self.absolute_pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
)
self.layers.append(layer)
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
self.num_features = num_features
# add a norm layer for each output
for i_layer in self.norm_indices:
if i_layer >= len(self.num_features):
continue
layer = norm_layer(num_features[i_layer])
layer_name = f"norm{i_layer}"
self.add_module(layer_name, layer)
# add projector head
self.projection = projection
if projection:
self.project_dim = project_dim
self.norm = norm_layer(self.num_features[-1])
self.projector = nn.Linear(self.num_features[-1], project_dim, bias=False)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1 and self.ape:
self.absolute_pos_embed.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""Forward function."""
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
if self.ape:
# interpolate the position embedding to the corresponding size
absolute_pos_embed = F.interpolate(
self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
)
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
else:
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
outs = {}
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
if i in self.norm_indices:
norm_layer = getattr(self, f"norm{i}")
x_out = norm_layer(x_out)
out = (
x_out.view(-1, H, W, self.num_features[i])
.permute(0, 3, 1, 2)
.contiguous()
)
outs["res{}".format(i + 2)] = out
if self.projection:
x_out = self.norm(x_out)
x_out = x_out.view(-1, H, W, self.num_features[-1]).contiguous()
outs["fc"] = self.projector(x_out).permute(0, 3, 1, 2)
return outs
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer, self).train(mode)
self._freeze_stages()
@BACKBONE_REGISTRY.register()
class D2SwinTransformer(SwinTransformer, Backbone):
def __init__(self, cfg, input_shape):
pretrain_img_size = cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE
patch_size = cfg.MODEL.SWIN.PATCH_SIZE
in_chans = 3
embed_dim = cfg.MODEL.SWIN.EMBED_DIM
depths = cfg.MODEL.SWIN.DEPTHS
num_heads = cfg.MODEL.SWIN.NUM_HEADS
window_size = cfg.MODEL.SWIN.WINDOW_SIZE
mlp_ratio = cfg.MODEL.SWIN.MLP_RATIO
qkv_bias = cfg.MODEL.SWIN.QKV_BIAS
qk_scale = cfg.MODEL.SWIN.QK_SCALE
drop_rate = cfg.MODEL.SWIN.DROP_RATE
attn_drop_rate = cfg.MODEL.SWIN.ATTN_DROP_RATE
drop_path_rate = cfg.MODEL.SWIN.DROP_PATH_RATE
norm_layer = nn.LayerNorm
ape = cfg.MODEL.SWIN.APE
patch_norm = cfg.MODEL.SWIN.PATCH_NORM
norm_indices = cfg.MODEL.SWIN.NORM_INDICES
projection = cfg.MODEL.SWIN.PROJECTION
project_dim = cfg.MODEL.SWIN.PROJECT_DIM
super().__init__(
pretrain_img_size,
patch_size,
in_chans,
embed_dim,
depths,
num_heads,
window_size,
mlp_ratio,
qkv_bias,
qk_scale,
drop_rate,
attn_drop_rate,
drop_path_rate,
norm_layer,
ape,
patch_norm,
norm_indices=norm_indices,
projection=projection,
project_dim=project_dim,
)
self._out_features = cfg.MODEL.SWIN.OUT_FEATURES
self._out_feature_strides = {
"res2": 4,
"res3": 8,
"res4": 16,
"res5": 32,
"fc": 32,
}
self._out_feature_channels = {
"res2": self.num_features[0],
"res3": self.num_features[1],
"res4": self.num_features[2],
"res5": self.num_features[3],
"fc": self.num_features[3],
}
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert (
x.dim() == 4
), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
y = super().forward(x)
for k in y.keys():
if k in self._out_features:
outputs[k] = y[k]
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name],
stride=self._out_feature_strides[name],
)
for name in self._out_features
}
@property
def size_divisibility(self):
return 32
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/build.py | mask2former/data/build.py | import itertools
import logging
import numpy as np
from collections import Counter
import torch.utils.data
from tabulate import tabulate
from termcolor import colored
from detectron2.utils.logger import _log_api_usage, log_first_n
from detectron2.data.catalog import DatasetCatalog, MetadataCatalog
import torch.utils.data
from detectron2.config import configurable
from detectron2.data.build import (
build_batch_data_loader,
trivial_batch_collator,
load_proposals_into_dataset,
filter_images_with_only_crowd_annotations,
filter_images_with_few_keypoints,
print_instances_class_histogram,
)
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.detection_utils import check_metadata_consistency
from detectron2.data.samplers import (
InferenceSampler,
RandomSubsetTrainingSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
"""
This file contains the default logic to build a dataloader for training or testing.
"""
__all__ = [
"build_detection_train_loader",
"build_detection_test_loader",
]
def print_classification_instances_class_histogram(dataset_dicts, class_names):
"""
Args:
dataset_dicts (list[dict]): list of dataset dicts.
class_names (list[str]): list of class names (zero-indexed).
"""
num_classes = len(class_names)
hist_bins = np.arange(num_classes + 1)
histogram = np.zeros((num_classes,), dtype=np.int)
for entry in dataset_dicts:
classes = np.asarray([entry["category_id"]], dtype=np.int)
if len(classes):
assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
assert (
classes.max() < num_classes
), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
histogram += np.histogram(classes, bins=hist_bins)[0]
N_COLS = min(6, len(class_names) * 2)
def short_name(x):
# make long class names shorter. useful for lvis
if len(x) > 13:
return x[:11] + ".."
return x
data = list(
itertools.chain(
*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]
)
)
total_num_instances = sum(data[1::2])
data.extend([None] * (N_COLS - (len(data) % N_COLS)))
if num_classes > 1:
data.extend(["total", total_num_instances])
data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
data,
headers=["category", "#instances"] * (N_COLS // 2),
tablefmt="pipe",
numalign="left",
stralign="center",
)
log_first_n(
logging.INFO,
"Distribution of instances among all {} categories:\n".format(num_classes)
+ colored(table, "cyan"),
key="message",
)
def wrap_metas(dataset_dict, **kwargs):
def _assign_attr(data_dict: dict, **kwargs):
assert not any(
[key in data_dict for key in kwargs]
), "Assigned attributes should not exist in the original sample."
data_dict.update(kwargs)
return data_dict
return [_assign_attr(sample, meta=kwargs) for sample in dataset_dict]
def get_detection_dataset_dicts(
names, filter_empty=True, min_keypoints=0, proposal_files=None
):
"""
Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
Args:
names (str or list[str]): a dataset name or a list of dataset names
filter_empty (bool): whether to filter out images without instance annotations
min_keypoints (int): filter out images with fewer keypoints than
`min_keypoints`. Set to 0 to do nothing.
proposal_files (list[str]): if given, a list of object proposal files
that match each dataset in `names`.
Returns:
list[dict]: a list of dicts following the standard dataset dict format.
"""
if isinstance(names, str):
names = [names]
assert len(names), names
dataset_dicts = [
wrap_metas(DatasetCatalog.get(dataset_name), dataset_name=dataset_name)
for dataset_name in names
]
for dataset_name, dicts in zip(names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
if proposal_files is not None:
assert len(names) == len(proposal_files)
# load precomputed proposals from proposal files
dataset_dicts = [
load_proposals_into_dataset(dataset_i_dicts, proposal_file)
for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
]
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
if min_keypoints > 0 and has_instances:
dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
if has_instances:
try:
class_names = MetadataCatalog.get(names[0]).thing_classes
check_metadata_consistency("thing_classes", names)
print_instances_class_histogram(dataset_dicts, class_names)
except AttributeError: # class names are not available for this dataset
pass
assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
return dataset_dicts
def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
if dataset is None:
dataset = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
_log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
if mapper is None:
mapper = DatasetMapper(cfg, True)
if "task" in dataset[0].keys():
shuffle = False
else:
shuffle = True
if sampler is None:
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = TrainingSampler(len(dataset), shuffle=shuffle)
elif sampler_name == "RepeatFactorTrainingSampler":
repeat_factors = (
RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset, cfg.DATALOADER.REPEAT_THRESHOLD
)
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
elif sampler_name == "RandomSubsetTrainingSampler":
sampler = RandomSubsetTrainingSampler(
len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return {
"dataset": dataset,
"sampler": sampler,
"mapper": mapper,
"total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
"aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
}
# TODO can allow dataset as an iterable or IterableDataset to make this function more general
@configurable(from_config=_train_loader_from_config)
def build_detection_train_loader(
dataset,
*,
mapper,
sampler=None,
total_batch_size,
aspect_ratio_grouping=True,
num_workers=0,
):
"""
Build a dataloader for object detection with some default features.
This interface is experimental.
Args:
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a map-style pytorch dataset. They can be obtained by using
:func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
indices to be applied on ``dataset``. Default to :class:`TrainingSampler`,
which coordinates an infinite random shuffle sequence across all workers.
total_batch_size (int): total batch size across all workers. Batching
simply puts data into a list.
aspect_ratio_grouping (bool): whether to group images with similar
aspect ratio for efficiency. When enabled, it requires each
element in dataset be a dict with keys "width" and "height".
num_workers (int): number of parallel data loading workers
Returns:
torch.utils.data.DataLoader:
a dataloader. Each output from it is a ``list[mapped_element]`` of length
``total_batch_size / num_workers``, where ``mapped_element`` is produced
by the ``mapper``.
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = TrainingSampler(len(dataset))
assert isinstance(sampler, torch.utils.data.sampler.Sampler)
return build_batch_data_loader(
dataset,
sampler,
total_batch_size,
aspect_ratio_grouping=aspect_ratio_grouping,
num_workers=num_workers,
)
def _test_loader_from_config(cfg, dataset_name, mapper=None):
"""
Uses the given `dataset_name` argument (instead of the names in cfg), because the
standard practice is to evaluate each test set individually (not combining them).
"""
if isinstance(dataset_name, str):
dataset_name = [dataset_name]
dataset = get_detection_dataset_dicts(
dataset_name,
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)]
for x in dataset_name
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
if mapper is None:
mapper = DatasetMapper(cfg, False)
return {
"dataset": dataset,
"mapper": mapper,
"num_workers": cfg.DATALOADER.NUM_WORKERS,
"samples_per_gpu": cfg.SOLVER.TEST_IMS_PER_BATCH,
}
@configurable(from_config=_test_loader_from_config)
def build_detection_test_loader(
dataset, *, mapper, sampler=None, num_workers=0, samples_per_gpu=1
):
"""
Similar to `build_detection_train_loader`, but uses a batch size of 1,
and :class:`InferenceSampler`. This sampler coordinates all workers to
produce the exact set of all samples.
This interface is experimental.
Args:
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a map-style pytorch dataset. They can be obtained by using
:func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper (callable): a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
which splits the dataset across all workers.
num_workers (int): number of parallel data loading workers
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
Examples:
::
data_loader = build_detection_test_loader(
DatasetRegistry.get("my_test"),
mapper=DatasetMapper(...))
# or, instantiate with a CfgNode:
data_loader = build_detection_test_loader(cfg, "my_test")
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = InferenceSampler(len(dataset))
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, samples_per_gpu, drop_last=False
)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
def dataset_sample_per_class(cfg):
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
if cfg.DATASETS.SAMPLE_PER_CLASS > 0:
category_list = [data["category_id"] for data in dataset_dicts]
category_count = Counter(category_list)
category_group = {
cat: [data for data in dataset_dicts if data["category_id"] == cat]
for cat in category_count.keys()
}
rng = np.random.default_rng(cfg.DATASETS.SAMPLE_SEED)
selected = {
cat: groups
if len(groups) < cfg.DATASETS.SAMPLE_PER_CLASS
else rng.choice(groups, size=cfg.DATASETS.SAMPLE_PER_CLASS).tolist()
for cat, groups in category_group.items()
}
tmp = []
for k, v in selected.items():
tmp.extend(v)
dataset_dicts = tmp
logger = logging.getLogger(__name__)
# logger.info(tmp)
dataset = dataset_dicts
print_classification_instances_class_histogram(
dataset, MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).stuff_classes
)
return dataset
def dataset_sample_per_task_class(cfg):
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
if cfg.DATASETS.SAMPLE_PER_CLASS > 0:
category_list = [data["category_id"] for data in dataset_dicts if data["task"] == "sem_seg"]
category_count = Counter(category_list)
category_group = {
cat: [data for data in dataset_dicts if data["category_id"] == cat and data["task"] == "sem_seg"]
for cat in category_count.keys()
}
rng = np.random.default_rng(cfg.DATASETS.SAMPLE_SEED)
selected = {
cat: groups
if len(groups) < cfg.DATASETS.SAMPLE_PER_CLASS
else rng.choice(groups, size=cfg.DATASETS.SAMPLE_PER_CLASS).tolist()
for cat, groups in category_group.items()
}
tmp = []
for k, v in selected.items():
tmp.extend(v)
# ins
category_list = [data["category_id"] for data in dataset_dicts if data["task"] == "ins_seg"]
category_count = Counter(category_list)
category_group = {
cat: [data for data in dataset_dicts if data["category_id"] == cat and data["task"] == "ins_seg"]
for cat in category_count.keys()
}
rng = np.random.default_rng(cfg.DATASETS.SAMPLE_SEED)
selected = {
cat: groups
if len(groups) < cfg.DATASETS.SAMPLE_PER_CLASS
else rng.choice(groups, size=cfg.DATASETS.SAMPLE_PER_CLASS).tolist()
for cat, groups in category_group.items()
}
for k, v in selected.items():
tmp.extend(v)
# pan
category_list = [data["category_id"] for data in dataset_dicts if data["task"] == "pan_seg"]
category_count = Counter(category_list)
category_group = {
cat: [data for data in dataset_dicts if data["category_id"] == cat and data["task"] == "pan_seg"]
for cat in category_count.keys()
}
rng = np.random.default_rng(cfg.DATASETS.SAMPLE_SEED)
selected = {
cat: groups
if len(groups) < cfg.DATASETS.SAMPLE_PER_CLASS
else rng.choice(groups, size=cfg.DATASETS.SAMPLE_PER_CLASS).tolist()
for cat, groups in category_group.items()
}
for k, v in selected.items():
tmp.extend(v)
dataset_dicts = tmp
logger = logging.getLogger(__name__)
logger.info(tmp)
dataset = dataset_dicts
print_classification_instances_class_histogram(
dataset, MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).stuff_classes
)
return dataset
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/augmentations.py | mask2former/data/augmentations.py | import math
import numbers
import numpy as np
from detectron2.data.transforms.augmentation import Augmentation
from detectron2.data.transforms.transform import (
CropTransform,
ResizeTransform,
TransformList,
)
from PIL import Image
from fvcore.transforms.transform import PadTransform
def mask2box(mask: np.ndarray):
# use naive way
row = np.nonzero(mask.sum(axis=0))[0]
if len(row) == 0:
return None
x1 = row.min()
x2 = row.max()
col = np.nonzero(mask.sum(axis=1))[0]
y1 = col.min()
y2 = col.max()
return x1, y1, x2 + 1 - x1, y2 + 1 - y1
def expand_box(x, y, w, h, expand_ratio=1.0, max_h=None, max_w=None):
cx = x + 0.5 * w
cy = y + 0.5 * h
w = w * expand_ratio
h = h * expand_ratio
box = [cx - 0.5 * w, cy - 0.5 * h, cx + 0.5 * w, cy + 0.5 * h]
if max_h is not None:
box[1] = max(0, box[1])
box[3] = min(max_h - 1, box[3])
if max_w is not None:
box[0] = max(0, box[0])
box[2] = min(max_w - 1, box[2])
box[2] = box[2] - box[0]
box[3] = box[3] - box[1]
return [int(b) for b in box]
class CropImageWithMask(Augmentation):
def __init__(self, expand_ratio=1.0, mode="choice"):
if isinstance(expand_ratio, numbers.Number):
expand_ratio = (expand_ratio, expand_ratio)
self.mode = mode
self.expand_ratio = expand_ratio
if self.mode == "range":
assert len(expand_ratio) == 2 and expand_ratio[0] < expand_ratio[1]
def get_transform(self, image, sem_seg, category_id):
input_size = image.shape[:2]
bin_mask = sem_seg == category_id
x, y, w, h = mask2box(bin_mask)
if self.mode == "choice":
expand_ratio = np.random.choice(self.expand_ratio)
else:
expand_ratio = np.random.uniform(self.expand_ratio[0], self.expand_ratio[1])
x, y, w, h = expand_box(x, y, w, h, expand_ratio, *input_size)
w = max(w, 1)
h = max(h, 1)
return CropTransform(x, y, w, h, input_size[1], input_size[0])
class CropImageWithBox(Augmentation):
def __init__(self, expand_ratio=1.0, mode="choice"):
if isinstance(expand_ratio, numbers.Number):
expand_ratio = (expand_ratio, expand_ratio)
self.mode = mode
self.expand_ratio = expand_ratio
if self.mode == "range":
assert len(expand_ratio) == 2 and expand_ratio[0] < expand_ratio[1]
def get_transform(self, image, boxes):
input_size = image.shape[:2]
x, y, x2, y2 = boxes[0]
w = x2 - x + 1
h = y2 - y + 1
if self.mode == "choice":
expand_ratio = np.random.choice(self.expand_ratio)
else:
expand_ratio = np.random.uniform(self.expand_ratio[0], self.expand_ratio[1])
x, y, w, h = expand_box(x, y, w, h, expand_ratio, *input_size)
w = max(w, 1)
h = max(h, 1)
return CropTransform(x, y, w, h, input_size[1], input_size[0])
class RandomResizedCrop(Augmentation):
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=Image.BILINEAR,
):
if isinstance(size, int):
size = (size, size)
else:
assert isinstance(size, (tuple, list)) and len(size) == 2
self.size = size
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
def get_transform(self, image):
height, width = image.shape[:2]
area = height * width
log_ratio = np.log(np.array(self.ratio))
is_success = False
for _ in range(10):
target_area = area * np.random.uniform(self.scale[0], self.scale[1])
aspect_ratio = np.exp(np.random.uniform(log_ratio[0], log_ratio[1]))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = np.random.randint(0, width - w + 1)
j = np.random.randint(0, height - h + 1)
is_success = True
break
if not is_success:
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(self.ratio):
w = width
h = int(round(w / min(self.ratio)))
elif in_ratio > max(self.ratio):
h = height
w = int(round(h * max(self.ratio)))
else: # whole image
w = width
h = height
i = (width - w) // 2
j = (height - h) // 2
return TransformList(
[
CropTransform(i, j, w, h, width, height),
ResizeTransform(
h, w, self.size[1], self.size[0], interp=self.interpolation
),
]
)
class CenterCrop(Augmentation):
def __init__(self, size, seg_ignore_label):
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
size = (size[0], size[0])
self.size = size
self.seg_ignore_label = seg_ignore_label
def get_transform(self, image):
image_height, image_width = image.shape[:2]
crop_height, crop_width = self.size
transforms = []
if crop_width > image_width or crop_height > image_height:
padding_ltrb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2
if crop_height > image_height
else 0,
]
transforms.append(
PadTransform(
*padding_ltrb,
orig_w=image_width,
orig_h=image_height,
seg_pad_value=self.seg_ignore_label
)
)
image_width, image_height = (
image_width + padding_ltrb[0] + padding_ltrb[2],
image_height + padding_ltrb[1] + padding_ltrb[3],
)
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
transforms.append(
CropTransform(
crop_left, crop_top, crop_width, crop_height, image_width, image_height
)
)
return TransformList(transforms)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/__init__.py | mask2former/data/__init__.py | from .dataset_mappers import *
from . import datasets
from .build import (
build_detection_train_loader,
build_detection_test_loader,
dataset_sample_per_class,
dataset_sample_per_task_class,
)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/datasets/register_coco_stuff.py | mask2former/data/datasets/register_coco_stuff.py | # Copyright (c) Facebook, Inc. and its affiliates.
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
from .utils import load_binary_mask
COCO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
{"id": 92, "name": "banner", "supercategory": "textile"},
{"id": 93, "name": "blanket", "supercategory": "textile"},
{"id": 94, "name": "branch", "supercategory": "plant"},
{"id": 95, "name": "bridge", "supercategory": "building"},
{"id": 96, "name": "building-other", "supercategory": "building"},
{"id": 97, "name": "bush", "supercategory": "plant"},
{"id": 98, "name": "cabinet", "supercategory": "furniture-stuff"},
{"id": 99, "name": "cage", "supercategory": "structural"},
{"id": 100, "name": "cardboard", "supercategory": "raw-material"},
{"id": 101, "name": "carpet", "supercategory": "floor"},
{"id": 102, "name": "ceiling-other", "supercategory": "ceiling"},
{"id": 103, "name": "ceiling-tile", "supercategory": "ceiling"},
{"id": 104, "name": "cloth", "supercategory": "textile"},
{"id": 105, "name": "clothes", "supercategory": "textile"},
{"id": 106, "name": "clouds", "supercategory": "sky"},
{"id": 107, "name": "counter", "supercategory": "furniture-stuff"},
{"id": 108, "name": "cupboard", "supercategory": "furniture-stuff"},
{"id": 109, "name": "curtain", "supercategory": "textile"},
{"id": 110, "name": "desk-stuff", "supercategory": "furniture-stuff"},
{"id": 111, "name": "dirt", "supercategory": "ground"},
{"id": 112, "name": "door-stuff", "supercategory": "furniture-stuff"},
{"id": 113, "name": "fence", "supercategory": "structural"},
{"id": 114, "name": "floor-marble", "supercategory": "floor"},
{"id": 115, "name": "floor-other", "supercategory": "floor"},
{"id": 116, "name": "floor-stone", "supercategory": "floor"},
{"id": 117, "name": "floor-tile", "supercategory": "floor"},
{"id": 118, "name": "floor-wood", "supercategory": "floor"},
{"id": 119, "name": "flower", "supercategory": "plant"},
{"id": 120, "name": "fog", "supercategory": "water"},
{"id": 121, "name": "food-other", "supercategory": "food-stuff"},
{"id": 122, "name": "fruit", "supercategory": "food-stuff"},
{"id": 123, "name": "furniture-other", "supercategory": "furniture-stuff"},
{"id": 124, "name": "grass", "supercategory": "plant"},
{"id": 125, "name": "gravel", "supercategory": "ground"},
{"id": 126, "name": "ground-other", "supercategory": "ground"},
{"id": 127, "name": "hill", "supercategory": "solid"},
{"id": 128, "name": "house", "supercategory": "building"},
{"id": 129, "name": "leaves", "supercategory": "plant"},
{"id": 130, "name": "light", "supercategory": "furniture-stuff"},
{"id": 131, "name": "mat", "supercategory": "textile"},
{"id": 132, "name": "metal", "supercategory": "raw-material"},
{"id": 133, "name": "mirror-stuff", "supercategory": "furniture-stuff"},
{"id": 134, "name": "moss", "supercategory": "plant"},
{"id": 135, "name": "mountain", "supercategory": "solid"},
{"id": 136, "name": "mud", "supercategory": "ground"},
{"id": 137, "name": "napkin", "supercategory": "textile"},
{"id": 138, "name": "net", "supercategory": "structural"},
{"id": 139, "name": "paper", "supercategory": "raw-material"},
{"id": 140, "name": "pavement", "supercategory": "ground"},
{"id": 141, "name": "pillow", "supercategory": "textile"},
{"id": 142, "name": "plant-other", "supercategory": "plant"},
{"id": 143, "name": "plastic", "supercategory": "raw-material"},
{"id": 144, "name": "platform", "supercategory": "ground"},
{"id": 145, "name": "playingfield", "supercategory": "ground"},
{"id": 146, "name": "railing", "supercategory": "structural"},
{"id": 147, "name": "railroad", "supercategory": "ground"},
{"id": 148, "name": "river", "supercategory": "water"},
{"id": 149, "name": "road", "supercategory": "ground"},
{"id": 150, "name": "rock", "supercategory": "solid"},
{"id": 151, "name": "roof", "supercategory": "building"},
{"id": 152, "name": "rug", "supercategory": "textile"},
{"id": 153, "name": "salad", "supercategory": "food-stuff"},
{"id": 154, "name": "sand", "supercategory": "ground"},
{"id": 155, "name": "sea", "supercategory": "water"},
{"id": 156, "name": "shelf", "supercategory": "furniture-stuff"},
{"id": 157, "name": "sky-other", "supercategory": "sky"},
{"id": 158, "name": "skyscraper", "supercategory": "building"},
{"id": 159, "name": "snow", "supercategory": "ground"},
{"id": 160, "name": "solid-other", "supercategory": "solid"},
{"id": 161, "name": "stairs", "supercategory": "furniture-stuff"},
{"id": 162, "name": "stone", "supercategory": "solid"},
{"id": 163, "name": "straw", "supercategory": "plant"},
{"id": 164, "name": "structural-other", "supercategory": "structural"},
{"id": 165, "name": "table", "supercategory": "furniture-stuff"},
{"id": 166, "name": "tent", "supercategory": "building"},
{"id": 167, "name": "textile-other", "supercategory": "textile"},
{"id": 168, "name": "towel", "supercategory": "textile"},
{"id": 169, "name": "tree", "supercategory": "plant"},
{"id": 170, "name": "vegetable", "supercategory": "food-stuff"},
{"id": 171, "name": "wall-brick", "supercategory": "wall"},
{"id": 172, "name": "wall-concrete", "supercategory": "wall"},
{"id": 173, "name": "wall-other", "supercategory": "wall"},
{"id": 174, "name": "wall-panel", "supercategory": "wall"},
{"id": 175, "name": "wall-stone", "supercategory": "wall"},
{"id": 176, "name": "wall-tile", "supercategory": "wall"},
{"id": 177, "name": "wall-wood", "supercategory": "wall"},
{"id": 178, "name": "water-other", "supercategory": "water"},
{"id": 179, "name": "waterdrops", "supercategory": "water"},
{"id": 180, "name": "window-blind", "supercategory": "window"},
{"id": 181, "name": "window-other", "supercategory": "window"},
{"id": 182, "name": "wood", "supercategory": "solid"},
]
COCO_BASE_CATEGORIES = [
c
for i, c in enumerate(COCO_CATEGORIES)
if c["id"] - 1
not in [20, 24, 32, 33, 40, 56, 86, 99, 105, 123, 144, 147, 148, 168, 171]
]
COCO_NOVEL_CATEGORIES = [
c
for i, c in enumerate(COCO_CATEGORIES)
if c["id"] - 1
in [20, 24, 32, 33, 40, 56, 86, 99, 105, 123, 144, 147, 148, 168, 171]
]
def _get_coco_stuff_meta(cat_list):
# Id 0 is reserved for ignore_label, we change ignore_label for 0
# to 255 in our pre-processing.
stuff_ids = [k["id"] for k in cat_list]
# For semantic segmentation, this mapping maps from contiguous stuff id
# (in [0, 91], used in models) to ids in the dataset (used for processing results)
stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
stuff_classes = [k["name"] for k in cat_list]
ret = {
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
}
return ret
def register_all_coco_stuff_10k(root):
root = os.path.join(root, "coco", "coco_stuff_10k")
meta = _get_coco_stuff_meta(COCO_CATEGORIES)
for name, image_dirname, sem_seg_dirname in [
("train", "images_detectron2/train", "annotations_detectron2/train"),
("test", "images_detectron2/test", "annotations_detectron2/test"),
]:
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname)
name = f"coco_2017_{name}_stuff_10k_sem_seg"
DatasetCatalog.register(
name,
lambda x=image_dir, y=gt_dir: load_sem_seg(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**meta,
)
def register_all_coco_stuff_164k(root):
root = os.path.join(root, "coco")
meta = _get_coco_stuff_meta(COCO_CATEGORIES)
base_meta = _get_coco_stuff_meta(COCO_BASE_CATEGORIES)
novel_meta = _get_coco_stuff_meta(COCO_NOVEL_CATEGORIES)
for name, image_dirname, sem_seg_dirname in [
("train", "train2017", "stuffthingmaps_detectron2/train2017"),
("test", "val2017", "stuffthingmaps_detectron2/val2017"),
]:
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname)
all_name = f"coco_2017_{name}_stuff_sem_seg"
DatasetCatalog.register(
all_name,
lambda x=image_dir, y=gt_dir: load_sem_seg(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(all_name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
evaluation_set={
"base": [
meta["stuff_classes"].index(n) for n in base_meta["stuff_classes"]
],
"novel_thing": [
meta["stuff_classes"].index(n)
for i, n in enumerate(novel_meta["stuff_classes"])
if COCO_NOVEL_CATEGORIES[i].get("isthing", 0) == 1
],
"novel_stuff": [
meta["stuff_classes"].index(n)
for i, n in enumerate(novel_meta["stuff_classes"])
if COCO_NOVEL_CATEGORIES[i].get("isthing", 0) == 0
],
},
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in meta["stuff_classes"]
],
**meta,
)
# classification
DatasetCatalog.register(
all_name + "_classification",
lambda x=image_dir, y=gt_dir: load_binary_mask(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(all_name + "_classification").set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="classification",
ignore_label=255,
evaluation_set={
"base": [
meta["stuff_classes"].index(n) for n in base_meta["stuff_classes"]
],
},
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in meta["stuff_classes"]
],
**meta,
)
# zero shot
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname + "_base")
base_name = f"coco_2017_{name}_stuff_base_sem_seg"
DatasetCatalog.register(
base_name,
lambda x=image_dir, y=gt_dir: load_sem_seg(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(base_name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**base_meta,
)
# classification
DatasetCatalog.register(
base_name + "_classification",
lambda x=image_dir, y=gt_dir: load_binary_mask(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(base_name + "_classification").set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="classification",
ignore_label=255,
**base_meta,
)
# zero shot
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname + "_novel")
novel_name = f"coco_2017_{name}_stuff_novel_sem_seg"
DatasetCatalog.register(
novel_name,
lambda x=image_dir, y=gt_dir: load_sem_seg(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(novel_name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**novel_meta,
)
def register_all_coco_stuff_164k_pseudo(root, pseudo_sem_dir):
root = os.path.join(root, "coco")
meta = _get_coco_stuff_meta(COCO_CATEGORIES)
base_meta = _get_coco_stuff_meta(COCO_BASE_CATEGORIES)
novel_meta = _get_coco_stuff_meta(COCO_NOVEL_CATEGORIES)
for name, image_dirname, sem_seg_dirname in [
("train", "train2017", "stuffthingmaps_detectron2/train2017"),
]:
image_dir = os.path.join(root, image_dirname)
all_name = f"coco_2017_{name}_stuff_sem_seg_pseudo"
DatasetCatalog.register(
all_name,
lambda x=image_dir, y=pseudo_sem_dir: load_sem_seg(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(all_name).set(
image_root=image_dir,
sem_seg_root=pseudo_sem_dir,
evaluator_type="sem_seg",
ignore_label=255,
evaluation_set={
"base": [
meta["stuff_classes"].index(n) for n in base_meta["stuff_classes"]
],
"novel": [
meta["stuff_classes"].index(n) for n in novel_meta["stuff_classes"]
],
},
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in meta["stuff_classes"]
],
**meta,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_coco_stuff_10k(_root)
register_all_coco_stuff_164k(_root)
_pseudo_dir = os.getenv("DETECTRON2_SEM_PSEUDO", "output/inference")
register_all_coco_stuff_164k_pseudo(_root, _pseudo_dir)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/datasets/register_coco_full.py | mask2former/data/datasets/register_coco_full.py | # Copyright (c) Facebook, Inc. and its affiliates.
import os
import json
import logging
from PIL import Image
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
from detectron2.utils.file_io import PathManager
from .utils import load_binary_mask
logger = logging.getLogger(__name__)
COCO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
{"id": 92, "name": "banner", "supercategory": "textile"},
{"id": 93, "name": "blanket", "supercategory": "textile"},
{"id": 94, "name": "branch", "supercategory": "plant"},
{"id": 95, "name": "bridge", "supercategory": "building"},
{"id": 96, "name": "building-other", "supercategory": "building"},
{"id": 97, "name": "bush", "supercategory": "plant"},
{"id": 98, "name": "cabinet", "supercategory": "furniture-stuff"},
{"id": 99, "name": "cage", "supercategory": "structural"},
{"id": 100, "name": "cardboard", "supercategory": "raw-material"},
{"id": 101, "name": "carpet", "supercategory": "floor"},
{"id": 102, "name": "ceiling-other", "supercategory": "ceiling"},
{"id": 103, "name": "ceiling-tile", "supercategory": "ceiling"},
{"id": 104, "name": "cloth", "supercategory": "textile"},
{"id": 105, "name": "clothes", "supercategory": "textile"},
{"id": 106, "name": "clouds", "supercategory": "sky"},
{"id": 107, "name": "counter", "supercategory": "furniture-stuff"},
{"id": 108, "name": "cupboard", "supercategory": "furniture-stuff"},
{"id": 109, "name": "curtain", "supercategory": "textile"},
{"id": 110, "name": "desk-stuff", "supercategory": "furniture-stuff"},
{"id": 111, "name": "dirt", "supercategory": "ground"},
{"id": 112, "name": "door-stuff", "supercategory": "furniture-stuff"},
{"id": 113, "name": "fence", "supercategory": "structural"},
{"id": 114, "name": "floor-marble", "supercategory": "floor"},
{"id": 115, "name": "floor-other", "supercategory": "floor"},
{"id": 116, "name": "floor-stone", "supercategory": "floor"},
{"id": 117, "name": "floor-tile", "supercategory": "floor"},
{"id": 118, "name": "floor-wood", "supercategory": "floor"},
{"id": 119, "name": "flower", "supercategory": "plant"},
{"id": 120, "name": "fog", "supercategory": "water"},
{"id": 121, "name": "food-other", "supercategory": "food-stuff"},
{"id": 122, "name": "fruit", "supercategory": "food-stuff"},
{"id": 123, "name": "furniture-other", "supercategory": "furniture-stuff"},
{"id": 124, "name": "grass", "supercategory": "plant"},
{"id": 125, "name": "gravel", "supercategory": "ground"},
{"id": 126, "name": "ground-other", "supercategory": "ground"},
{"id": 127, "name": "hill", "supercategory": "solid"},
{"id": 128, "name": "house", "supercategory": "building"},
{"id": 129, "name": "leaves", "supercategory": "plant"},
{"id": 130, "name": "light", "supercategory": "furniture-stuff"},
{"id": 131, "name": "mat", "supercategory": "textile"},
{"id": 132, "name": "metal", "supercategory": "raw-material"},
{"id": 133, "name": "mirror-stuff", "supercategory": "furniture-stuff"},
{"id": 134, "name": "moss", "supercategory": "plant"},
{"id": 135, "name": "mountain", "supercategory": "solid"},
{"id": 136, "name": "mud", "supercategory": "ground"},
{"id": 137, "name": "napkin", "supercategory": "textile"},
{"id": 138, "name": "net", "supercategory": "structural"},
{"id": 139, "name": "paper", "supercategory": "raw-material"},
{"id": 140, "name": "pavement", "supercategory": "ground"},
{"id": 141, "name": "pillow", "supercategory": "textile"},
{"id": 142, "name": "plant-other", "supercategory": "plant"},
{"id": 143, "name": "plastic", "supercategory": "raw-material"},
{"id": 144, "name": "platform", "supercategory": "ground"},
{"id": 145, "name": "playingfield", "supercategory": "ground"},
{"id": 146, "name": "railing", "supercategory": "structural"},
{"id": 147, "name": "railroad", "supercategory": "ground"},
{"id": 148, "name": "river", "supercategory": "water"},
{"id": 149, "name": "road", "supercategory": "ground"},
{"id": 150, "name": "rock", "supercategory": "solid"},
{"id": 151, "name": "roof", "supercategory": "building"},
{"id": 152, "name": "rug", "supercategory": "textile"},
{"id": 153, "name": "salad", "supercategory": "food-stuff"},
{"id": 154, "name": "sand", "supercategory": "ground"},
{"id": 155, "name": "sea", "supercategory": "water"},
{"id": 156, "name": "shelf", "supercategory": "furniture-stuff"},
{"id": 157, "name": "sky-other", "supercategory": "sky"},
{"id": 158, "name": "skyscraper", "supercategory": "building"},
{"id": 159, "name": "snow", "supercategory": "ground"},
{"id": 160, "name": "solid-other", "supercategory": "solid"},
{"id": 161, "name": "stairs", "supercategory": "furniture-stuff"},
{"id": 162, "name": "stone", "supercategory": "solid"},
{"id": 163, "name": "straw", "supercategory": "plant"},
{"id": 164, "name": "structural-other", "supercategory": "structural"},
{"id": 165, "name": "table", "supercategory": "furniture-stuff"},
{"id": 166, "name": "tent", "supercategory": "building"},
{"id": 167, "name": "textile-other", "supercategory": "textile"},
{"id": 168, "name": "towel", "supercategory": "textile"},
{"id": 169, "name": "tree", "supercategory": "plant"},
{"id": 170, "name": "vegetable", "supercategory": "food-stuff"},
{"id": 171, "name": "wall-brick", "supercategory": "wall"},
{"id": 172, "name": "wall-concrete", "supercategory": "wall"},
{"id": 173, "name": "wall-other", "supercategory": "wall"},
{"id": 174, "name": "wall-panel", "supercategory": "wall"},
{"id": 175, "name": "wall-stone", "supercategory": "wall"},
{"id": 176, "name": "wall-tile", "supercategory": "wall"},
{"id": 177, "name": "wall-wood", "supercategory": "wall"},
{"id": 178, "name": "water-other", "supercategory": "water"},
{"id": 179, "name": "waterdrops", "supercategory": "water"},
{"id": 180, "name": "window-blind", "supercategory": "window"},
{"id": 181, "name": "window-other", "supercategory": "window"},
{"id": 182, "name": "wood", "supercategory": "solid"},
]
COCO_BASE_CATEGORIES = [
c
for i, c in enumerate(COCO_CATEGORIES)
if c["id"] - 1
not in [20, 24, 32, 33, 40, 56, 86, 99, 105, 123, 144, 147, 148, 168, 171]
]
COCO_NOVEL_CATEGORIES = [
c
for i, c in enumerate(COCO_CATEGORIES)
if c["id"] - 1
in [20, 24, 32, 33, 40, 56, 86, 99, 105, 123, 144, 147, 148, 168, 171]
]
def _get_coco_stuff_meta(cat_list):
# Id 0 is reserved for ignore_label, we change ignore_label for 0
# to 255 in our pre-processing.
thing_ids = [k["id"] for k in cat_list if "isthing" in k]
stuff_ids = [k["id"] for k in cat_list]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
thing_classes = [k["name"] for k in cat_list if "isthing" in k]
stuff_classes = [k["name"] for k in cat_list]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
"stuff_classes": stuff_classes,
}
return ret
def load_full_binary_mask(json_file, pan_dir, gt_root, image_root, meta, gt_ext="png", image_ext="jpg"):
"""
`label_count_file` contains a dictionary like:
```
"""
label_count_file = gt_root + "_label_count.json"
with open(label_count_file) as f:
label_count_dict = json.load(f)
data_dicts = load_sem_seg(gt_root, image_root, gt_ext, image_ext)
flattened_data_dicts = []
for data in data_dicts:
category_per_image = label_count_dict[
os.path.basename(data["sem_seg_file_name"])
]
data["task"] = "sem_seg"
flattened_data = [
dict(**{"category_id": cat}, **data) for cat in category_per_image
]
flattened_data_dicts.extend(flattened_data)
pan_data_dicts = load_coco_full_json(json_file, image_root, pan_dir, gt_root, meta)
# instance
for data in pan_data_dicts:
for seg_info in data["segments_info"]:
if seg_info["iscrowd"] == 0 and seg_info["isthing"] == True:
flattened_data = dict(**seg_info, **{
"task": "ins_seg",
"file_name": data["file_name"],
"pan_seg_file_name": data["pan_seg_file_name"],
"sem_seg_file_name": data["sem_seg_file_name"]
})
flattened_data_dicts.append(flattened_data)
# panoptic
for data in pan_data_dicts:
for seg_info in data["segments_info"]:
if seg_info["iscrowd"] == 0:
flattened_data = dict(**seg_info, **{
"task": "pan_seg",
"file_name": data["file_name"],
"pan_seg_file_name": data["pan_seg_file_name"],
"sem_seg_file_name": data["sem_seg_file_name"]
})
flattened_data_dicts.append(flattened_data)
logger.info(
"Loaded {} images with flattened semantic segmentation from {}".format(
len(flattened_data_dicts), image_root
)
)
return flattened_data_dicts
def load_coco_full_json(json_file, image_dir, gt_dir, semseg_dir, meta):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
def _convert_category_id(segment_info, meta):
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = True
else:
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = False
return segment_info
def file2id(folder_path, file_path):
# extract relative path starting from `folder_path`
image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
# remove file extension
image_id = os.path.splitext(image_id)[0]
return image_id
with PathManager.open(json_file) as f:
json_info = json.load(f)
input_files = sorted(
(os.path.join(image_dir, f) for f in PathManager.ls(image_dir) if f.endswith("jpg")),
key=lambda file_path: file2id(image_dir, file_path),
)
gt_files = sorted(
(os.path.join(gt_dir, f) for f in PathManager.ls(gt_dir) if f.endswith("png")),
key=lambda file_path: file2id(gt_dir, file_path),
)
semseg_files = sorted(
(os.path.join(semseg_dir, f) for f in PathManager.ls(semseg_dir) if f.endswith("png")),
key=lambda file_path: file2id(semseg_dir, file_path),
)
assert len(gt_files) > 0, "No annotations found in {}.".format(gt_dir)
# Use the intersection
if len(input_files) != len(gt_files) or len(input_files) != len(semseg_files):
logger.warn(
"Directory {}, {}, and {} has {}, {}, and {} files, respectively.".format(
image_dir, gt_dir, semseg_dir, len(input_files), len(gt_files), len(semseg_files)
)
)
input_basenames = [os.path.basename(f)[: -len("jpg")] for f in input_files]
gt_basenames = [os.path.basename(f)[: -len("png")] for f in gt_files]
semseg_basenames = [os.path.basename(f)[: -len("png")] for f in semseg_files]
intersect = list(set(input_basenames) & set(gt_basenames) & set(semseg_basenames))
# sort, otherwise each worker may obtain a list[dict] in different order
intersect = sorted(intersect)
logger.warn("Will use their intersection of {} files.".format(len(intersect)))
input_files = [os.path.join(image_dir, f + "jpg") for f in intersect]
gt_files = [os.path.join(gt_dir, f + "png") for f in intersect]
semseg_files = [os.path.join(semseg_dir, f + "png") for f in intersect]
logger.info(
"Loaded {} images with semantic segmentation from {}".format(len(input_files), image_dir)
)
annotations_dicts = {anno["file_name"]: anno for anno in json_info["annotations"]}
dataset_dicts = []
for (img_path, gt_path, semseg_path) in zip(input_files, gt_files, semseg_files):
record = {}
record["file_name"] = img_path
record["pan_seg_file_name"] = gt_path
record["sem_seg_file_name"] = semseg_path
basename = os.path.basename(gt_path)
annotation = annotations_dicts[basename]
image_id = int(annotation["image_id"])
record["image_id"] = image_id
segments_info = [_convert_category_id(x, meta) for x in annotation["segments_info"]
if x["category_id"] in meta["stuff_dataset_id_to_contiguous_id"]]
record["segments_info"] = segments_info
dataset_dicts.append(record)
assert len(dataset_dicts), f"No images found in {image_dir}!"
assert PathManager.isfile(dataset_dicts[0]["file_name"]), dataset_dicts[0]["file_name"]
assert PathManager.isfile(dataset_dicts[0]["pan_seg_file_name"]), dataset_dicts[0]["pan_seg_file_name"]
assert PathManager.isfile(dataset_dicts[0]["sem_seg_file_name"]), dataset_dicts[0]["sem_seg_file_name"]
return dataset_dicts
def register_all_coco_full_164k(root):
root = os.path.join(root, "coco")
meta = _get_coco_stuff_meta(COCO_CATEGORIES)
base_meta = _get_coco_stuff_meta(COCO_BASE_CATEGORIES)
novel_meta = _get_coco_stuff_meta(COCO_NOVEL_CATEGORIES)
for name, image_dirname, sem_seg_dirname in [
("train", "train2017", "stuffthingmaps_panoptic_detectron2/train2017"),
("test", "val2017", "stuffthingmaps_panoptic_detectron2/val2017"),
]:
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname)
all_name = f"coco_2017_{name}_full_task"
if name == "test":
prefix_instances = "coco_2017_" + "val"
panoptic_json = os.path.join(root, "panoptic_detectron2/panoptic_val2017.json")
panoptic_root = os.path.join(root, "panoptic_detectron2/val2017")
else:
prefix_instances = "coco_2017_" + name
panoptic_json = os.path.join(root, "panoptic_detectron2/panoptic_train2017.json")
panoptic_root = os.path.join(root, "panoptic_detectron2/train2017")
instances_meta = MetadataCatalog.get(prefix_instances)
instances_json = instances_meta.json_file
DatasetCatalog.register(
all_name,
lambda panoptic_json=panoptic_json,
image_dir=image_dir,
panoptic_root=panoptic_root,
gt_dir=gt_dir: load_coco_full_json(
panoptic_json,
image_dir,
panoptic_root,
gt_dir,
meta
),
)
MetadataCatalog.get(all_name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
json_file=instances_json,
panoptic_json=panoptic_json,
panoptic_root=panoptic_root,
evaluator_type="coco_panoptic_seg",
ignore_label=255,
evaluation_set={
"base": [
meta["stuff_classes"].index(n) for n in base_meta["stuff_classes"]
],
"novel_thing": [
meta["stuff_classes"].index(n)
for i, n in enumerate(novel_meta["stuff_classes"])
if COCO_NOVEL_CATEGORIES[i].get("isthing", 0) == 1
],
"novel_stuff": [
meta["stuff_classes"].index(n)
for i, n in enumerate(novel_meta["stuff_classes"])
if COCO_NOVEL_CATEGORIES[i].get("isthing", 0) == 0
],
},
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in meta["stuff_classes"]
],
**meta,
)
# classification
DatasetCatalog.register(
all_name + "_classification",
lambda x=image_dir, y=gt_dir: load_binary_mask(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(all_name + "_classification").set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="classification",
ignore_label=255,
evaluation_set={
"base": [
meta["stuff_classes"].index(n) for n in base_meta["stuff_classes"]
],
},
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in meta["stuff_classes"]
],
**meta,
)
# zero shot
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname + "_base")
base_name = f"coco_2017_{name}_full_task_base"
panoptic_json_base = panoptic_json[:-5] + "_base.json"
panoptic_root_base = panoptic_root + "_base"
DatasetCatalog.register(
base_name,
lambda panoptic_json=panoptic_json_base,
image_dir=image_dir,
panoptic_root=panoptic_root_base,
gt_dir=gt_dir: load_coco_full_json(
panoptic_json,
image_dir,
panoptic_root,
gt_dir,
base_meta
),
)
MetadataCatalog.get(base_name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
json_file=instances_json,
panoptic_json=panoptic_json_base,
panoptic_root=panoptic_root_base,
evaluator_type="coco_panoptic_seg",
ignore_label=255,
**base_meta,
)
# classification
DatasetCatalog.register(
base_name + "_classification",
lambda panoptic_json=panoptic_json_base,
image_dir=image_dir,
panoptic_root=panoptic_root_base,
gt_dir=gt_dir: load_full_binary_mask(
panoptic_json,
panoptic_root,
gt_dir,
image_dir,
base_meta,
gt_ext="png",
image_ext="jpg"
),
)
MetadataCatalog.get(base_name + "_classification").set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="classification",
ignore_label=255,
**base_meta,
)
# zero shot
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname + "_novel")
novel_name = f"coco_2017_{name}_full_task_novel"
panoptic_json_novel = panoptic_json[:-5] + "_novel.json"
panoptic_root_novel = panoptic_root + "_novel"
DatasetCatalog.register(
novel_name,
lambda panoptic_json=panoptic_json_novel,
image_dir=image_dir,
panoptic_root=panoptic_root_novel,
gt_dir=gt_dir: load_coco_full_json(
panoptic_json,
image_dir,
panoptic_root,
gt_dir,
novel_meta
),
)
MetadataCatalog.get(novel_name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
json_file=instances_json,
panoptic_json=panoptic_json_novel,
panoptic_root=panoptic_root_novel,
evaluator_type="coco_panoptic_seg",
ignore_label=255,
**novel_meta,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_coco_full_164k(_root)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/datasets/register_coco_panoptic.py | mask2former/data/datasets/register_coco_panoptic.py | # Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from PIL import Image
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
from detectron2.utils.file_io import PathManager
# All coco categories, together with their nice-looking visualization colors
# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
COCO_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
{"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
{"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
{"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
{"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
{"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
{"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
{"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
{"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
{"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
{"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
{"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
{"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
{"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
{"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
{"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
{"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
{"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
{"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
{"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
{"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
{"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
{"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
{"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
{"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
{"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
{"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
{"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
{"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
{"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
{"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
{"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
{"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
{"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
{"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
{"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
{"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
{"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
{"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
{"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
{"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
{"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
{"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
{"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
{"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
{"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
{"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
{"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
{"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
{"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
{"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
{"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
{"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
{"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
{"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
{"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
{"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
{"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
{"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
{"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
{"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
{"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
{"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
{"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
{"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
{"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
{"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
{"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
{"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
{"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
{"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
{"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
{"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
{"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
{"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
{"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
{"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
{"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
{"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
{"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
{"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
{"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
{"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
{"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
{"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
{"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
{"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
{"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
{"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
{"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
{"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
{"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
{"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
{"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
{"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
{"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
{"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
{"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
{"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
{"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
{"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
{"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
{"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
{"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
{"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
{"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
{"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
{"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
{"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
{"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
{"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
{"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
{"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
{"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
{"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
{"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
{"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
{"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
{"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
{"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
{"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
{"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
{"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
]
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
"coco_2017_train_panoptic": (
# This is the original panoptic annotation directory
"coco/panoptic_train2017",
"coco/annotations/panoptic_train2017.json",
# This directory contains semantic annotations that are
# converted from panoptic annotations.
# It is used by PanopticFPN.
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
# to create these directories.
"coco/panoptic_semseg_train2017",
),
"coco_2017_val_panoptic": (
"coco/panoptic_val2017",
"coco/annotations/panoptic_val2017.json",
"coco/panoptic_semseg_val2017",
),
}
COCO_BASE_CATEGORIES = [
c
for i, c in enumerate(COCO_CATEGORIES)
if c["id"] - 1
not in [20, 24, 32, 33, 40, 56, 86, 123, 148]
]
base_stuff_classes = [k["name"] for k in COCO_BASE_CATEGORIES]
def get_metadata():
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
stuff_colors = [k["color"] for k in COCO_CATEGORIES]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(COCO_CATEGORIES):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
# in order to use sem_seg evaluator
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
# thing_dataset_id_to_contiguous_id[255] = 0
# stuff_dataset_id_to_contiguous_id[255] = 0
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def load_coco_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
def _convert_category_id(segment_info, meta):
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = True
else:
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = False
return segment_info
with PathManager.open(json_file) as f:
json_info = json.load(f)
ret = []
for ann in json_info["annotations"]:
image_id = int(ann["image_id"])
# TODO: currently we assume image and label has the same filename but
# different extension, and images have extension ".jpg" for COCO. Need
# to make image extension a user-provided argument if we extend this
# function to support other COCO-like datasets.
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
label_file = os.path.join(gt_dir, ann["file_name"])
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
ret.append(
{
"file_name": image_file,
"image_id": image_id,
"pan_seg_file_name": label_file,
"sem_seg_file_name": sem_label_file,
"segments_info": segments_info,
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
return ret
def register_coco_panoptic_annos_sem_seg(
name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
):
panoptic_name = name
delattr(MetadataCatalog.get(panoptic_name), "thing_classes")
delattr(MetadataCatalog.get(panoptic_name), "thing_colors")
MetadataCatalog.get(panoptic_name).set(
thing_classes=metadata["thing_classes"],
thing_colors=metadata["thing_colors"],
# thing_dataset_id_to_contiguous_id=metadata["thing_dataset_id_to_contiguous_id"],
)
semantic_name = name + "_with_sem_seg"
DatasetCatalog.register(
semantic_name,
lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, sem_seg_root, metadata),
)
MetadataCatalog.get(semantic_name).set(
sem_seg_root=sem_seg_root,
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="coco_panoptic_seg",
ignore_label=255,
label_divisor=1000,
trainable_flag=[
1 if n in base_stuff_classes else 0
for n in metadata["stuff_classes"]
],
**metadata,
)
def register_all_coco_panoptic_annos_sem_seg(root):
for (
prefix,
(panoptic_root, panoptic_json, semantic_root),
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[: -len("_panoptic")]
instances_meta = MetadataCatalog.get(prefix_instances)
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
register_coco_panoptic_annos_sem_seg(
prefix,
get_metadata(),
image_root,
os.path.join(root, panoptic_root),
os.path.join(root, panoptic_json),
os.path.join(root, semantic_root),
instances_json,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_coco_panoptic_annos_sem_seg(_root)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/datasets/utils.py | mask2former/data/datasets/utils.py | import json
import logging
import os
from detectron2.data.datasets.coco import load_sem_seg
logger = logging.getLogger(__name__)
def load_binary_mask(gt_root, image_root, gt_ext="png", image_ext="jpg"):
"""
Flatten the results of `load_sem_seg` to annotations for binary mask.
`label_count_file` contains a dictionary like:
```
{
"xxx.png":[0,3,5],
"xxxx.png":[3,4,7],
}
```
"""
label_count_file = gt_root + "_label_count.json"
with open(label_count_file) as f:
label_count_dict = json.load(f)
data_dicts = load_sem_seg(gt_root, image_root, gt_ext, image_ext)
flattened_data_dicts = []
for data in data_dicts:
category_per_image = label_count_dict[
os.path.basename(data["sem_seg_file_name"])
]
flattened_data = [
dict(**{"category_id": cat}, **data) for cat in category_per_image
]
flattened_data_dicts.extend(flattened_data)
logger.info(
"Loaded {} images with flattened semantic segmentation from {}".format(
len(flattened_data_dicts), image_root
)
)
return flattened_data_dicts
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/datasets/register_voc_seg.py | mask2former/data/datasets/register_voc_seg.py | # Copyright (c) Facebook, Inc. and its affiliates.
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import load_sem_seg
from .utils import load_binary_mask
CLASS_NAMES = (
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
)
BASE_CLASS_NAMES = [
c for i, c in enumerate(CLASS_NAMES) if i not in [15, 16, 17, 18, 19]
]
NOVEL_CLASS_NAMES = [c for i, c in enumerate(CLASS_NAMES) if i in [15, 16, 17, 18, 19]]
def _get_voc_meta(cat_list):
ret = {
"thing_classes": cat_list,
"stuff_classes": cat_list,
}
return ret
def register_all_voc_11k(root):
root = os.path.join(root, "VOC2012")
meta = _get_voc_meta(CLASS_NAMES)
base_meta = _get_voc_meta(BASE_CLASS_NAMES)
novel_meta = _get_voc_meta(NOVEL_CLASS_NAMES)
for name, image_dirname, sem_seg_dirname in [
("train", "JPEGImages", "annotations_detectron2/train"),
("test", "JPEGImages", "annotations_detectron2/val"),
]:
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname)
all_name = f"voc_sem_seg_{name}"
DatasetCatalog.register(
all_name,
lambda x=image_dir, y=gt_dir: load_sem_seg(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(all_name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**meta,
)
MetadataCatalog.get(all_name).set(
evaluation_set={
"base": [
meta["stuff_classes"].index(n) for n in base_meta["stuff_classes"]
],
},
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in meta["stuff_classes"]
],
)
# classification
DatasetCatalog.register(
all_name + "_classification",
lambda x=image_dir, y=gt_dir: load_binary_mask(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(all_name + "_classification").set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="classification",
ignore_label=255,
evaluation_set={
"base": [
meta["stuff_classes"].index(n) for n in base_meta["stuff_classes"]
],
},
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in meta["stuff_classes"]
],
**meta,
)
# zero shot
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname + "_base")
base_name = f"voc_base_sem_seg_{name}"
DatasetCatalog.register(
base_name,
lambda x=image_dir, y=gt_dir: load_sem_seg(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(base_name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**base_meta,
)
# classification
DatasetCatalog.register(
base_name + "_classification",
lambda x=image_dir, y=gt_dir: load_binary_mask(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(base_name + "_classification").set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="classification",
ignore_label=255,
**base_meta,
)
# zero shot
image_dir = os.path.join(root, image_dirname)
gt_dir = os.path.join(root, sem_seg_dirname + "_novel")
novel_name = f"voc_novel_sem_seg_{name}"
DatasetCatalog.register(
novel_name,
lambda x=image_dir, y=gt_dir: load_sem_seg(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(novel_name).set(
image_root=image_dir,
sem_seg_root=gt_dir,
evaluator_type="sem_seg",
ignore_label=255,
**novel_meta,
)
def register_all_voc_pseudo(root, pseudo_sem_dir):
root = os.path.join(root, "VOC2012")
meta = _get_voc_meta(CLASS_NAMES)
base_meta = _get_voc_meta(BASE_CLASS_NAMES)
novel_meta = _get_voc_meta(NOVEL_CLASS_NAMES)
for name, image_dirname, sem_seg_dirname in [
("train", "JPEGImages", "annotations_detectron2/train"),
]:
image_dir = os.path.join(root, image_dirname)
all_name = f"voc_sem_seg_{name}_pseudo"
DatasetCatalog.register(
all_name,
lambda x=image_dir, y=pseudo_sem_dir: load_sem_seg(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(all_name).set(
image_root=image_dir,
sem_seg_root=pseudo_sem_dir,
evaluator_type="sem_seg",
ignore_label=255,
evaluation_set={
"base": [
meta["stuff_classes"].index(n) for n in base_meta["stuff_classes"]
],
},
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in meta["stuff_classes"]
],
**meta,
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_voc_11k(_root)
_pseudo_dir = os.getenv("DETECTRON2_SEM_PSEUDO", "output/inference")
register_all_voc_pseudo(_root, _pseudo_dir)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/datasets/__init__.py | mask2former/data/datasets/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
from . import register_coco_stuff, register_voc_seg, register_coco_full, register_coco_panoptic | python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/datasets/register_ade20k_all_data.py | mask2former/data/datasets/register_ade20k_all_data.py | # Copyright (c) Facebook, Inc. and its affiliates.
import json
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.file_io import PathManager
from .utils import load_binary_mask
ADE20K_150_CATEGORIES = [
{"color": [120, 120, 120], "id": 0, "isthing": 0, "name": "wall"},
{"color": [180, 120, 120], "id": 1, "isthing": 0, "name": "building"},
{"color": [6, 230, 230], "id": 2, "isthing": 0, "name": "sky"},
{"color": [80, 50, 50], "id": 3, "isthing": 0, "name": "floor"},
{"color": [4, 200, 3], "id": 4, "isthing": 0, "name": "tree"},
{"color": [120, 120, 80], "id": 5, "isthing": 0, "name": "ceiling"},
{"color": [140, 140, 140], "id": 6, "isthing": 0, "name": "road, route"},
{"color": [204, 5, 255], "id": 7, "isthing": 1, "name": "bed"},
{"color": [230, 230, 230], "id": 8, "isthing": 1, "name": "window "},
{"color": [4, 250, 7], "id": 9, "isthing": 0, "name": "grass"},
{"color": [224, 5, 255], "id": 10, "isthing": 1, "name": "cabinet"},
{"color": [235, 255, 7], "id": 11, "isthing": 0, "name": "sidewalk, pavement"},
{"color": [150, 5, 61], "id": 12, "isthing": 1, "name": "person"},
{"color": [120, 120, 70], "id": 13, "isthing": 0, "name": "earth, ground"},
{"color": [8, 255, 51], "id": 14, "isthing": 1, "name": "door"},
{"color": [255, 6, 82], "id": 15, "isthing": 1, "name": "table"},
{"color": [143, 255, 140], "id": 16, "isthing": 0, "name": "mountain, mount"},
{"color": [204, 255, 4], "id": 17, "isthing": 0, "name": "plant"},
{"color": [255, 51, 7], "id": 18, "isthing": 1, "name": "curtain"},
{"color": [204, 70, 3], "id": 19, "isthing": 1, "name": "chair"},
{"color": [0, 102, 200], "id": 20, "isthing": 1, "name": "car"},
{"color": [61, 230, 250], "id": 21, "isthing": 0, "name": "water"},
{"color": [255, 6, 51], "id": 22, "isthing": 1, "name": "painting, picture"},
{"color": [11, 102, 255], "id": 23, "isthing": 1, "name": "sofa"},
{"color": [255, 7, 71], "id": 24, "isthing": 1, "name": "shelf"},
{"color": [255, 9, 224], "id": 25, "isthing": 0, "name": "house"},
{"color": [9, 7, 230], "id": 26, "isthing": 0, "name": "sea"},
{"color": [220, 220, 220], "id": 27, "isthing": 1, "name": "mirror"},
{"color": [255, 9, 92], "id": 28, "isthing": 0, "name": "rug"},
{"color": [112, 9, 255], "id": 29, "isthing": 0, "name": "field"},
{"color": [8, 255, 214], "id": 30, "isthing": 1, "name": "armchair"},
{"color": [7, 255, 224], "id": 31, "isthing": 1, "name": "seat"},
{"color": [255, 184, 6], "id": 32, "isthing": 1, "name": "fence"},
{"color": [10, 255, 71], "id": 33, "isthing": 1, "name": "desk"},
{"color": [255, 41, 10], "id": 34, "isthing": 0, "name": "rock, stone"},
{"color": [7, 255, 255], "id": 35, "isthing": 1, "name": "wardrobe, closet, press"},
{"color": [224, 255, 8], "id": 36, "isthing": 1, "name": "lamp"},
{"color": [102, 8, 255], "id": 37, "isthing": 1, "name": "tub"},
{"color": [255, 61, 6], "id": 38, "isthing": 1, "name": "rail"},
{"color": [255, 194, 7], "id": 39, "isthing": 1, "name": "cushion"},
{"color": [255, 122, 8], "id": 40, "isthing": 0, "name": "base, pedestal, stand"},
{"color": [0, 255, 20], "id": 41, "isthing": 1, "name": "box"},
{"color": [255, 8, 41], "id": 42, "isthing": 1, "name": "column, pillar"},
{"color": [255, 5, 153], "id": 43, "isthing": 1, "name": "signboard, sign"},
{
"color": [6, 51, 255],
"id": 44,
"isthing": 1,
"name": "chest of drawers, chest, bureau, dresser",
},
{"color": [235, 12, 255], "id": 45, "isthing": 1, "name": "counter"},
{"color": [160, 150, 20], "id": 46, "isthing": 0, "name": "sand"},
{"color": [0, 163, 255], "id": 47, "isthing": 1, "name": "sink"},
{"color": [140, 140, 140], "id": 48, "isthing": 0, "name": "skyscraper"},
{"color": [250, 10, 15], "id": 49, "isthing": 1, "name": "fireplace"},
{"color": [20, 255, 0], "id": 50, "isthing": 1, "name": "refrigerator, icebox"},
{"color": [31, 255, 0], "id": 51, "isthing": 0, "name": "grandstand, covered stand"},
{"color": [255, 31, 0], "id": 52, "isthing": 0, "name": "path"},
{"color": [255, 224, 0], "id": 53, "isthing": 1, "name": "stairs"},
{"color": [153, 255, 0], "id": 54, "isthing": 0, "name": "runway"},
{"color": [0, 0, 255], "id": 55, "isthing": 1, "name": "case, display case, showcase, vitrine"},
{
"color": [255, 71, 0],
"id": 56,
"isthing": 1,
"name": "pool table, billiard table, snooker table",
},
{"color": [0, 235, 255], "id": 57, "isthing": 1, "name": "pillow"},
{"color": [0, 173, 255], "id": 58, "isthing": 1, "name": "screen door, screen"},
{"color": [31, 0, 255], "id": 59, "isthing": 0, "name": "stairway, staircase"},
{"color": [11, 200, 200], "id": 60, "isthing": 0, "name": "river"},
{"color": [255, 82, 0], "id": 61, "isthing": 0, "name": "bridge, span"},
{"color": [0, 255, 245], "id": 62, "isthing": 1, "name": "bookcase"},
{"color": [0, 61, 255], "id": 63, "isthing": 0, "name": "blind, screen"},
{"color": [0, 255, 112], "id": 64, "isthing": 1, "name": "coffee table"},
{
"color": [0, 255, 133],
"id": 65,
"isthing": 1,
"name": "toilet, can, commode, crapper, pot, potty, stool, throne",
},
{"color": [255, 0, 0], "id": 66, "isthing": 1, "name": "flower"},
{"color": [255, 163, 0], "id": 67, "isthing": 1, "name": "book"},
{"color": [255, 102, 0], "id": 68, "isthing": 0, "name": "hill"},
{"color": [194, 255, 0], "id": 69, "isthing": 1, "name": "bench"},
{"color": [0, 143, 255], "id": 70, "isthing": 1, "name": "countertop"},
{"color": [51, 255, 0], "id": 71, "isthing": 1, "name": "stove"},
{"color": [0, 82, 255], "id": 72, "isthing": 1, "name": "palm, palm tree"},
{"color": [0, 255, 41], "id": 73, "isthing": 1, "name": "kitchen island"},
{"color": [0, 255, 173], "id": 74, "isthing": 1, "name": "computer"},
{"color": [10, 0, 255], "id": 75, "isthing": 1, "name": "swivel chair"},
{"color": [173, 255, 0], "id": 76, "isthing": 1, "name": "boat"},
{"color": [0, 255, 153], "id": 77, "isthing": 0, "name": "bar"},
{"color": [255, 92, 0], "id": 78, "isthing": 1, "name": "arcade machine"},
{"color": [255, 0, 255], "id": 79, "isthing": 0, "name": "hovel, hut, hutch, shack, shanty"},
{"color": [255, 0, 245], "id": 80, "isthing": 1, "name": "bus"},
{"color": [255, 0, 102], "id": 81, "isthing": 1, "name": "towel"},
{"color": [255, 173, 0], "id": 82, "isthing": 1, "name": "light"},
{"color": [255, 0, 20], "id": 83, "isthing": 1, "name": "truck"},
{"color": [255, 184, 184], "id": 84, "isthing": 0, "name": "tower"},
{"color": [0, 31, 255], "id": 85, "isthing": 1, "name": "chandelier"},
{"color": [0, 255, 61], "id": 86, "isthing": 1, "name": "awning, sunshade, sunblind"},
{"color": [0, 71, 255], "id": 87, "isthing": 1, "name": "street lamp"},
{"color": [255, 0, 204], "id": 88, "isthing": 1, "name": "booth"},
{"color": [0, 255, 194], "id": 89, "isthing": 1, "name": "tv"},
{"color": [0, 255, 82], "id": 90, "isthing": 1, "name": "plane"},
{"color": [0, 10, 255], "id": 91, "isthing": 0, "name": "dirt track"},
{"color": [0, 112, 255], "id": 92, "isthing": 1, "name": "clothes"},
{"color": [51, 0, 255], "id": 93, "isthing": 1, "name": "pole"},
{"color": [0, 194, 255], "id": 94, "isthing": 0, "name": "land, ground, soil"},
{
"color": [0, 122, 255],
"id": 95,
"isthing": 1,
"name": "bannister, banister, balustrade, balusters, handrail",
},
{
"color": [0, 255, 163],
"id": 96,
"isthing": 0,
"name": "escalator, moving staircase, moving stairway",
},
{
"color": [255, 153, 0],
"id": 97,
"isthing": 1,
"name": "ottoman, pouf, pouffe, puff, hassock",
},
{"color": [0, 255, 10], "id": 98, "isthing": 1, "name": "bottle"},
{"color": [255, 112, 0], "id": 99, "isthing": 0, "name": "buffet, counter, sideboard"},
{
"color": [143, 255, 0],
"id": 100,
"isthing": 0,
"name": "poster, posting, placard, notice, bill, card",
},
{"color": [82, 0, 255], "id": 101, "isthing": 0, "name": "stage"},
{"color": [163, 255, 0], "id": 102, "isthing": 1, "name": "van"},
{"color": [255, 235, 0], "id": 103, "isthing": 1, "name": "ship"},
{"color": [8, 184, 170], "id": 104, "isthing": 1, "name": "fountain"},
{
"color": [133, 0, 255],
"id": 105,
"isthing": 0,
"name": "conveyer belt, conveyor belt, conveyer, conveyor, transporter",
},
{"color": [0, 255, 92], "id": 106, "isthing": 0, "name": "canopy"},
{
"color": [184, 0, 255],
"id": 107,
"isthing": 1,
"name": "washer, automatic washer, washing machine",
},
{"color": [255, 0, 31], "id": 108, "isthing": 1, "name": "plaything, toy"},
{"color": [0, 184, 255], "id": 109, "isthing": 0, "name": "pool"},
{"color": [0, 214, 255], "id": 110, "isthing": 1, "name": "stool"},
{"color": [255, 0, 112], "id": 111, "isthing": 1, "name": "barrel, cask"},
{"color": [92, 255, 0], "id": 112, "isthing": 1, "name": "basket, handbasket"},
{"color": [0, 224, 255], "id": 113, "isthing": 0, "name": "falls"},
{"color": [112, 224, 255], "id": 114, "isthing": 0, "name": "tent"},
{"color": [70, 184, 160], "id": 115, "isthing": 1, "name": "bag"},
{"color": [163, 0, 255], "id": 116, "isthing": 1, "name": "minibike, motorbike"},
{"color": [153, 0, 255], "id": 117, "isthing": 0, "name": "cradle"},
{"color": [71, 255, 0], "id": 118, "isthing": 1, "name": "oven"},
{"color": [255, 0, 163], "id": 119, "isthing": 1, "name": "ball"},
{"color": [255, 204, 0], "id": 120, "isthing": 1, "name": "food, solid food"},
{"color": [255, 0, 143], "id": 121, "isthing": 1, "name": "step, stair"},
{"color": [0, 255, 235], "id": 122, "isthing": 0, "name": "tank, storage tank"},
{"color": [133, 255, 0], "id": 123, "isthing": 1, "name": "trade name"},
{"color": [255, 0, 235], "id": 124, "isthing": 1, "name": "microwave"},
{"color": [245, 0, 255], "id": 125, "isthing": 1, "name": "pot"},
{"color": [255, 0, 122], "id": 126, "isthing": 1, "name": "animal"},
{"color": [255, 245, 0], "id": 127, "isthing": 1, "name": "bicycle"},
{"color": [10, 190, 212], "id": 128, "isthing": 0, "name": "lake"},
{"color": [214, 255, 0], "id": 129, "isthing": 1, "name": "dishwasher"},
{"color": [0, 204, 255], "id": 130, "isthing": 1, "name": "screen"},
{"color": [20, 0, 255], "id": 131, "isthing": 0, "name": "blanket, cover"},
{"color": [255, 255, 0], "id": 132, "isthing": 1, "name": "sculpture"},
{"color": [0, 153, 255], "id": 133, "isthing": 1, "name": "hood, exhaust hood"},
{"color": [0, 41, 255], "id": 134, "isthing": 1, "name": "sconce"},
{"color": [0, 255, 204], "id": 135, "isthing": 1, "name": "vase"},
{"color": [41, 0, 255], "id": 136, "isthing": 1, "name": "traffic light"},
{"color": [41, 255, 0], "id": 137, "isthing": 1, "name": "tray"},
{"color": [173, 0, 255], "id": 138, "isthing": 1, "name": "trash can"},
{"color": [0, 245, 255], "id": 139, "isthing": 1, "name": "fan"},
{"color": [71, 0, 255], "id": 140, "isthing": 0, "name": "pier"},
{"color": [122, 0, 255], "id": 141, "isthing": 0, "name": "crt screen"},
{"color": [0, 255, 184], "id": 142, "isthing": 1, "name": "plate"},
{"color": [0, 92, 255], "id": 143, "isthing": 1, "name": "monitor"},
{"color": [184, 255, 0], "id": 144, "isthing": 1, "name": "bulletin board"},
{"color": [0, 133, 255], "id": 145, "isthing": 0, "name": "shower"},
{"color": [255, 214, 0], "id": 146, "isthing": 1, "name": "radiator"},
{"color": [25, 194, 194], "id": 147, "isthing": 1, "name": "glass, drinking glass"},
{"color": [102, 255, 0], "id": 148, "isthing": 1, "name": "clock"},
{"color": [92, 0, 255], "id": 149, "isthing": 1, "name": "flag"},
]
ADE20k_COLORS = [k["color"] for k in ADE20K_150_CATEGORIES]
MetadataCatalog.get("ade20k_sem_seg_train").set(
stuff_colors=ADE20k_COLORS[:],
)
MetadataCatalog.get("ade20k_sem_seg_val").set(
stuff_colors=ADE20k_COLORS[:],
)
NOVEL_ID = [9, 15, 30, 37, 49, 60, 74, 81, 89, 99, 112, 128, 136, 143, 149]
BASE_ADE20K_CATEGORIES = [cat for cat in ADE20K_150_CATEGORIES if cat["id"] not in NOVEL_ID]
NOVEL_ADE20K_CATEGORIES = [cat for cat in ADE20K_150_CATEGORIES if cat["id"] in NOVEL_ID]
def load_ade20k_json(json_file, image_dir, gt_dir, semseg_dir, meta):
"""
Args:
image_dir (str): path to the raw dataset.
gt_dir (str): path to the raw annotations.
json_file (str): path to the json file.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
def _convert_category_id(segment_info, meta):
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = True
else:
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
segment_info["category_id"]
]
segment_info["isthing"] = False
return segment_info
with PathManager.open(json_file) as f:
json_info = json.load(f)
ret = []
for ann in json_info["annotations"]:
image_id = ann["image_id"]
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
label_file = os.path.join(gt_dir, ann["file_name"])
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
ret.append(
{
"file_name": image_file,
"image_id": image_id,
"pan_seg_file_name": label_file,
"sem_seg_file_name": sem_label_file,
"segments_info": segments_info,
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
return ret
def register_ade20k_all(
name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None
):
"""
Register a "standard" version of ADE20k segmentation dataset named `name`.
The dictionaries in this registered dataset follows detectron2's standard format.
Hence it's called "standard".
Args:
name (str): the name that identifies a dataset,
metadata (dict): extra metadata associated with this dataset.
image_root (str): directory which contains all the images
panoptic_root (str): directory which contains panoptic annotation images in COCO format
panoptic_json (str): path to the json panoptic annotation file in COCO format
sem_seg_root (none): not used, to be consistent with
`register_coco_panoptic_separated`.
instances_json (str): path to the json instance annotation file
"""
all_meta = metadata
base_meta = get_metadata(BASE_ADE20K_CATEGORIES)
novel_meta = get_metadata(NOVEL_ADE20K_CATEGORIES)
DatasetCatalog.register(
name,
lambda: load_ade20k_json(
panoptic_json, image_root, panoptic_root, semantic_root, all_meta
),
)
MetadataCatalog.get(name).set(
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="ade20k_panoptic_seg",
ignore_label=255,
label_divisor=1000,
evaluation_set={
"base": [
all_meta["stuff_classes"].index(n) for n in base_meta["stuff_classes"]
],
"novel_thing": [
all_meta["stuff_classes"].index(n)
for i, n in enumerate(novel_meta["stuff_classes"])
if ADE20K_150_CATEGORIES[i].get("isthing", 0) == 1
],
"novel_stuff": [
all_meta["stuff_classes"].index(n)
for i, n in enumerate(novel_meta["stuff_classes"])
if ADE20K_150_CATEGORIES[i].get("isthing", 0) == 0
],
},
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in all_meta["stuff_classes"]
],
**all_meta,
)
# classification
DatasetCatalog.register(
name + "_classification",
lambda x=image_root, y=semantic_root: load_binary_mask(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(name + "_classification").set(
panoptic_root=panoptic_root,
image_root=image_root,
panoptic_json=panoptic_json,
json_file=instances_json,
evaluator_type="classification",
ignore_label=255,
label_divisor=1000,
trainable_flag=[
1 if n in base_meta["stuff_classes"] else 0
for n in all_meta["stuff_classes"]
],
**all_meta,
)
# zero shot
semantic_root_base = semantic_root + "_base"
panoptic_json_base = panoptic_json[:-5] + "_base.json"
panoptic_root_base = panoptic_root + "_base"
instances_json_base = instances_json = "_base"
DatasetCatalog.register(
name + "_base",
lambda: load_ade20k_json(
panoptic_json_base, image_root, panoptic_root_base, semantic_root_base, base_meta
),
)
MetadataCatalog.get(name + "_base").set(
panoptic_root=panoptic_root_base,
image_root=image_root,
panoptic_json=panoptic_json_base,
json_file=instances_json_base,
evaluator_type="ade20k_panoptic_seg",
ignore_label=255,
label_divisor=1000,
**base_meta,
)
# cls
DatasetCatalog.register(
name + "_base_classification",
lambda x=image_root, y=semantic_root_base: load_binary_mask(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(name + "_base_classification").set(
panoptic_root=panoptic_root_base,
image_root=image_root,
panoptic_json=panoptic_json_base,
json_file=instances_json_base,
evaluator_type="classification",
ignore_label=255,
label_divisor=1000,
**base_meta,
)
# zero shot
semantic_root_novel = semantic_root + "_novel"
panoptic_json_novel = panoptic_json[:-5] + "_novel.json"
panoptic_root_novel = panoptic_root + "_novel"
instances_json_novel = instances_json = "_novel"
DatasetCatalog.register(
name + "_novel",
lambda: load_ade20k_json(
panoptic_json_novel, image_root, panoptic_root_novel, semantic_root_novel, novel_meta
),
)
MetadataCatalog.get(name + "_novel").set(
panoptic_root=panoptic_root_novel,
image_root=image_root,
panoptic_json=panoptic_json_novel,
json_file=instances_json_novel,
evaluator_type="ade20k_panoptic_seg",
ignore_label=255,
label_divisor=1000,
**novel_meta,
)
# cls
DatasetCatalog.register(
name + "_novel_classification",
lambda x=image_root, y=semantic_root_novel: load_binary_mask(
y, x, gt_ext="png", image_ext="jpg"
),
)
MetadataCatalog.get(name + "_novel_classification").set(
panoptic_root=panoptic_root_novel,
image_root=image_root,
panoptic_json=panoptic_json_novel,
json_file=instances_json_novel,
evaluator_type="ade20k_panoptic_seg",
ignore_label=255,
label_divisor=1000,
**novel_meta,
)
_PREDEFINED_SPLITS_ADE20K = {
"ade20k_all_train": (
"ADEChallengeData2016/images/training",
"ADEChallengeData2016/ade20k_panoptic_train",
"ADEChallengeData2016/ade20k_panoptic_train.json",
"ADEChallengeData2016/annotations_detectron2/training",
"ADEChallengeData2016/ade20k_instance_train.json",
),
"ade20k_all_val": (
"ADEChallengeData2016/images/validation",
"ADEChallengeData2016/ade20k_panoptic_val",
"ADEChallengeData2016/ade20k_panoptic_val.json",
"ADEChallengeData2016/annotations_detectron2/validation",
"ADEChallengeData2016/ade20k_instance_val.json",
),
}
def get_metadata(cat_list):
meta = {}
# The following metadata maps contiguous id from [0, #thing categories +
# #stuff categories) to their names and colors. We have to replica of the
# same name and color under "thing_*" and "stuff_*" because the current
# visualization function in D2 handles thing and class classes differently
# due to some heuristic used in Panoptic FPN. We keep the same naming to
# enable reusing existing visualization functions.
thing_classes = [k["name"] for k in cat_list if k["isthing"] == 1]
thing_colors = [k["color"] for k in cat_list if k["isthing"] == 1]
stuff_classes = [k["name"] for k in cat_list]
stuff_colors = [k["color"] for k in cat_list]
meta["thing_classes"] = thing_classes
meta["thing_colors"] = thing_colors
meta["stuff_classes"] = stuff_classes
meta["stuff_colors"] = stuff_colors
# Convert category id for training:
# category id: like semantic segmentation, it is the class id for each
# pixel. Since there are some classes not used in evaluation, the category
# id is not always contiguous and thus we have two set of category ids:
# - original category id: category id in the original dataset, mainly
# used for evaluation.
# - contiguous category id: [0, #classes), in order to train the linear
# softmax classifier.
thing_dataset_id_to_contiguous_id = {}
stuff_dataset_id_to_contiguous_id = {}
for i, cat in enumerate(cat_list):
if cat["isthing"]:
thing_dataset_id_to_contiguous_id[cat["id"]] = i
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
return meta
def register_all_ade20k(root):
metadata = get_metadata(ADE20K_150_CATEGORIES)
for (
prefix,
(image_root, panoptic_root, panoptic_json, semantic_root, instance_json),
) in _PREDEFINED_SPLITS_ADE20K.items():
register_ade20k_all(
prefix,
metadata,
os.path.join(root, image_root),
os.path.join(root, panoptic_root),
os.path.join(root, semantic_root),
os.path.join(root, panoptic_json),
os.path.join(root, instance_json),
)
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_all_ade20k(_root)
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/coco_full_task_new_baseline_dataset_mapper.py | mask2former/data/dataset_mappers/coco_full_task_new_baseline_dataset_mapper.py | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/d2/detr/dataset_mapper.py
import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
from detectron2.projects.point_rend import ColorAugSSDTransform
from detectron2.structures import BitMasks, Boxes, Instances
__all__ = ["COCOFullTaskNewBaselineDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
assert is_train, "Only support training augmentation"
image_size = cfg.INPUT.IMAGE_SIZE
min_scale = cfg.INPUT.MIN_SCALE
max_scale = cfg.INPUT.MAX_SCALE
augmentation = []
if cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
augmentation.extend([
T.ResizeScale(
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size
),
T.FixedSizeCrop(crop_size=(image_size, image_size)),
])
return augmentation
# This is specifically designed for the COCO dataset.
class COCOFullTaskNewBaselineDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
tfm_gens,
image_format,
ignore_label,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
crop_gen: crop augmentation
tfm_gens: data augmentation
image_format: an image format supported by :func:`detection_utils.read_image`.
"""
self.tfm_gens = tfm_gens
logging.getLogger(__name__).info(
"[COCOPanopticSemNewBaselineDatasetMapper] Full TransformGens used in training: {}".format(
str(self.tfm_gens)
)
)
self.img_format = image_format
self.is_train = is_train
self.ignore_label = ignore_label
self.size_divisibility = size_divisibility
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
tfm_gens = build_transform_gen(cfg, is_train)
# Build augmentation
if is_train:
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN,
cfg.INPUT.MAX_SIZE_TRAIN,
cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop_CategoryAreaConstraint(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
)
)
if cfg.INPUT.COLOR_AUG_SSD:
augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))
augs.append(T.RandomFlip())
# Assume always applies to the training set.
dataset_names = cfg.DATASETS.TRAIN
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augs = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
dataset_names = cfg.DATASETS.TEST
meta = MetadataCatalog.get(dataset_names[0])
ignore_label = meta.ignore_label
ret = {
"is_train": is_train,
"tfm_gens": augs,
"image_format": cfg.INPUT.FORMAT,
"ignore_label": ignore_label,
"size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY if is_train else -1,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
# PyTorch transformation not implemented for uint16, so converting it to double first
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype(
"double"
)
else:
sem_seg_gt = None
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
sem_seg_gt = aug_input.sem_seg
image_shape = image.shape[:2] # h, w
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "pan_seg_file_name" in dataset_dict:
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
segments_info = dataset_dict["segments_info"]
# apply the same transformation to panoptic segmentation
pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)
from panopticapi.utils import rgb2id
pan_seg_gt = rgb2id(pan_seg_gt)
instances = Instances(image_shape)
classes = []
masks = []
for segment_info in segments_info:
class_id = segment_info["category_id"]
if not segment_info["iscrowd"]:
classes.append(class_id)
masks.append(pan_seg_gt == segment_info["id"])
classes = np.array(classes)
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
instances.gt_boxes = Boxes(torch.zeros((0, 4)))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
instances.gt_masks = masks.tensor
instances.gt_boxes = masks.get_bounding_boxes()
dataset_dict["pan_instances"] = instances
###### for instance segmentation
ins_instances = Instances(image_shape)
classes = []
masks = []
for segment_info in segments_info:
class_id = segment_info["category_id"]
if not segment_info["iscrowd"] and segment_info["isthing"]:
classes.append(class_id)
masks.append(pan_seg_gt == segment_info["id"])
classes = np.array(classes)
ins_instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
ins_instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
ins_instances.gt_boxes = Boxes(torch.zeros((0, 4)))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
ins_instances.gt_masks = masks.tensor
ins_instances.gt_boxes = masks.get_bounding_boxes()
dataset_dict["ins_instances"] = ins_instances
# semantic segmentation
if sem_seg_gt is not None:
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
sem_seg_gt = F.pad(
sem_seg_gt, padding_size, value=self.ignore_label
).contiguous()
image_shape = (image.shape[-2], image.shape[-1]) # h, w
dataset_dict["image"] = image
dataset_dict["sem_seg"] = sem_seg_gt.long()
# Prepare per-category binary masks
sem_seg_gt = sem_seg_gt.numpy()
sem_seg_instances = Instances(image_shape)
sem_classes = np.unique(sem_seg_gt)
# remove ignored region
sem_classes = sem_classes[sem_classes != self.ignore_label]
sem_seg_instances.gt_classes = torch.tensor(sem_classes, dtype=torch.int64)
sem_masks = []
for class_id in sem_classes:
sem_masks.append(sem_seg_gt == class_id)
if len(sem_masks) == 0:
# Some image does not have annotation (all ignored)
sem_seg_instances.gt_masks = torch.zeros(
(0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])
)
else:
sem_masks = BitMasks(
torch.stack(
[
torch.from_numpy(np.ascontiguousarray(x.copy()))
for x in sem_masks
]
)
)
sem_seg_instances.gt_masks = sem_masks.tensor
dataset_dict["sem_instances"] = sem_seg_instances
return dataset_dict
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/mask_former_binary_semantic_dataset_mapper.py | mask2former/data/dataset_mappers/mask_former_binary_semantic_dataset_mapper.py | # Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.projects.point_rend import ColorAugSSDTransform
from detectron2.structures import BitMasks, Instances
from ..augmentations import CropImageWithMask, RandomResizedCrop, CenterCrop
__all__ = ["MaskFormerBinarySemanticDatasetMapper"]
class MaskFormerBinarySemanticDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for semantic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
ignore_label,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
ignore_label: the label that is ignored to evaluation
size_divisibility: pad image size to be divisible by this value
"""
self.is_train = is_train
self.tfm_gens = augmentations
self.img_format = image_format
self.ignore_label = ignore_label
self.size_divisibility = size_divisibility
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(
f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}"
)
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
# before augmentation, we have to crop the image around the selected mask with a expand ratio
augs = [CropImageWithMask(cfg.MODEL.CLIP_ADAPTER.MASK_EXPAND_RATIO)]
if is_train:
augs.append(RandomResizedCrop(cfg.INPUT.MIN_SIZE_TRAIN))
augs.append(T.RandomFlip())
# Assume always applies to the training set.
dataset_names = cfg.DATASETS.TRAIN
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
dataset_names = cfg.DATASETS.TEST
meta = MetadataCatalog.get(dataset_names[0])
ignore_label = meta.ignore_label
augs.append(CenterCrop(min_size, seg_ignore_label=ignore_label))
meta = MetadataCatalog.get(dataset_names[0])
ignore_label = meta.ignore_label
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"ignore_label": ignore_label,
"size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
# assert self.is_train, "MaskFormerSemanticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
# PyTorch transformation not implemented for uint16, so converting it to double first
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype(
"double"
)
else:
sem_seg_gt = None
if sem_seg_gt is None:
raise ValueError(
"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.".format(
dataset_dict["file_name"]
)
)
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
aug_input.category_id = dataset_dict["category_id"]
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
sem_seg_gt = aug_input.sem_seg
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
if sem_seg_gt is not None:
sem_seg_gt = F.pad(
sem_seg_gt, padding_size, value=self.ignore_label
).contiguous()
image_shape = (image.shape[-2], image.shape[-1]) # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = image
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = sem_seg_gt.long()
if "annotations" in dataset_dict:
raise ValueError(
"Semantic segmentation dataset should not have 'annotations'."
)
# Prepare per-category binary masks
if sem_seg_gt is not None:
sem_seg_gt = sem_seg_gt.numpy()
instances = Instances(image_shape)
instances.gt_classes = torch.tensor(
[dataset_dict["category_id"]], dtype=torch.int64
)
masks = []
masks.append(sem_seg_gt == dataset_dict["category_id"])
if masks[0].sum() == 0:
return None
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros(
(0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])
)
else:
masks = BitMasks(
torch.stack(
[
torch.from_numpy(np.ascontiguousarray(x.copy()))
for x in masks
]
)
)
instances.gt_masks = masks.tensor
dataset_dict["instances"] = instances
return dataset_dict
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py | mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py | # Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import pycocotools.mask as mask_util
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.projects.point_rend import ColorAugSSDTransform
from detectron2.structures import BitMasks, Instances, polygons_to_bitmask
__all__ = ["MaskFormerInstanceDatasetMapper"]
class MaskFormerInstanceDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for instance segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
size_divisibility: pad image size to be divisible by this value
"""
self.is_train = is_train
self.tfm_gens = augmentations
self.img_format = image_format
self.size_divisibility = size_divisibility
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN,
cfg.INPUT.MAX_SIZE_TRAIN,
cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
)
)
if cfg.INPUT.COLOR_AUG_SSD:
augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))
augs.append(T.RandomFlip())
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
assert self.is_train, "MaskFormerPanopticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
# transform instnace masks
assert "annotations" in dataset_dict
for anno in dataset_dict["annotations"]:
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(obj, transforms, image.shape[:2])
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
if len(annos):
assert "segmentation" in annos[0]
segms = [obj["segmentation"] for obj in annos]
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image.shape[:2]))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a binary segmentation mask "
" in a 2D numpy array of shape HxW.".format(type(segm))
)
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
# pad image
image = F.pad(image, padding_size, value=128).contiguous()
# pad mask
masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]
image_shape = (image.shape[-2], image.shape[-1]) # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = image
# Prepare per-category binary masks
instances = Instances(image_shape)
instances.gt_classes = classes
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))
else:
masks = BitMasks(torch.stack(masks))
instances.gt_masks = masks.tensor
dataset_dict["ins_instances"] = instances
return dataset_dict
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py | mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py | # Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.projects.point_rend import ColorAugSSDTransform
from detectron2.structures import BitMasks, Instances
__all__ = ["MaskFormerSemanticDatasetMapper"]
class MaskFormerSemanticDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for semantic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
ignore_label,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
ignore_label: the label that is ignored to evaluation
size_divisibility: pad image size to be divisible by this value
"""
self.is_train = is_train
self.tfm_gens = augmentations
self.img_format = image_format
self.ignore_label = ignore_label
self.size_divisibility = size_divisibility
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(
f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}"
)
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
if is_train:
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN,
cfg.INPUT.MAX_SIZE_TRAIN,
cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(
T.RandomCrop_CategoryAreaConstraint(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
)
)
if cfg.INPUT.COLOR_AUG_SSD:
augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))
augs.append(T.RandomFlip())
# Assume always applies to the training set.
dataset_names = cfg.DATASETS.TRAIN
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augs = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
dataset_names = cfg.DATASETS.TEST
meta = MetadataCatalog.get(dataset_names[0])
ignore_label = meta.ignore_label
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"ignore_label": ignore_label,
"size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY if is_train else -1,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
# assert self.is_train, "MaskFormerSemanticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
# PyTorch transformation not implemented for uint16, so converting it to double first
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype(
"double"
)
else:
sem_seg_gt = None
if sem_seg_gt is None:
raise ValueError(
"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.".format(
dataset_dict["file_name"]
)
)
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
sem_seg_gt = aug_input.sem_seg
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
if sem_seg_gt is not None:
sem_seg_gt = F.pad(
sem_seg_gt, padding_size, value=self.ignore_label
).contiguous()
image_shape = (image.shape[-2], image.shape[-1]) # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = image
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = sem_seg_gt.long()
if "annotations" in dataset_dict:
raise ValueError(
"Semantic segmentation dataset should not have 'annotations'."
)
# Prepare per-category binary masks
if sem_seg_gt is not None:
sem_seg_gt = sem_seg_gt.numpy()
instances = Instances(image_shape)
classes = np.unique(sem_seg_gt)
# remove ignored region
classes = classes[classes != self.ignore_label]
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
masks = []
for class_id in classes:
masks.append(sem_seg_gt == class_id)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros(
(0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])
)
else:
masks = BitMasks(
torch.stack(
[
torch.from_numpy(np.ascontiguousarray(x.copy()))
for x in masks
]
)
)
instances.gt_masks = masks.tensor
dataset_dict["sem_instances"] = instances
return dataset_dict
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/mask_former_binary_full_dataset_mapper.py | mask2former/data/dataset_mappers/mask_former_binary_full_dataset_mapper.py | # Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.projects.point_rend import ColorAugSSDTransform
from detectron2.structures import BitMasks, Instances
from ..augmentations import CropImageWithMask, RandomResizedCrop, CenterCrop
__all__ = ["MaskFormerBinaryFullDatasetMapper"]
class MaskFormerBinaryFullDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for semantic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
ignore_label,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
ignore_label: the label that is ignored to evaluation
size_divisibility: pad image size to be divisible by this value
"""
self.is_train = is_train
self.tfm_gens = augmentations
self.img_format = image_format
self.ignore_label = ignore_label
self.size_divisibility = size_divisibility
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(
f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}"
)
@classmethod
def from_config(cls, cfg, is_train=True):
# Build augmentation
# before augmentation, we have to crop the image around the selected mask with a expand ratio
augs = [CropImageWithMask(cfg.MODEL.CLIP_ADAPTER.MASK_EXPAND_RATIO)]
if is_train:
augs.append(RandomResizedCrop(cfg.INPUT.MIN_SIZE_TRAIN))
augs.append(T.RandomFlip())
# Assume always applies to the training set.
dataset_names = cfg.DATASETS.TRAIN
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
dataset_names = cfg.DATASETS.TEST
meta = MetadataCatalog.get(dataset_names[0])
ignore_label = meta.ignore_label
augs.append(CenterCrop(min_size, seg_ignore_label=ignore_label))
meta = MetadataCatalog.get(dataset_names[0])
ignore_label = meta.ignore_label
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"ignore_label": ignore_label,
"size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
# assert self.is_train, "MaskFormerSemanticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
# PyTorch transformation not implemented for uint16, so converting it to double first
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype(
"double"
)
else:
sem_seg_gt = None
if sem_seg_gt is None:
raise ValueError(
"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.".format(
dataset_dict["file_name"]
)
)
if "annotations" in dataset_dict:
raise ValueError(
"Semantic segmentation dataset should not have 'annotations'."
)
if dataset_dict["task"] == "sem_seg":
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
aug_input.category_id = dataset_dict["category_id"]
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
sem_seg_gt = aug_input.sem_seg
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
if sem_seg_gt is not None:
sem_seg_gt = F.pad(
sem_seg_gt, padding_size, value=self.ignore_label
).contiguous()
image_shape = (image.shape[-2], image.shape[-1]) # h, w
dataset_dict["image"] = image
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = sem_seg_gt.long()
sem_seg_gt = sem_seg_gt.numpy()
instances = Instances(image_shape)
instances.gt_classes = torch.tensor(
[dataset_dict["category_id"]], dtype=torch.int64
)
masks = []
masks.append(sem_seg_gt == dataset_dict["category_id"])
if masks[0].sum() == 0:
return None
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros(
(0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])
)
else:
masks = BitMasks(
torch.stack(
[
torch.from_numpy(np.ascontiguousarray(x.copy()))
for x in masks
]
)
)
instances.gt_masks = masks.tensor
dataset_dict["instances"] = instances
elif dataset_dict["task"] == "pan_seg":
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB").astype("double")
from panopticapi.utils import rgb2id
pan_seg_gt = rgb2id(pan_seg_gt)
aug_input = T.AugInput(image, sem_seg=pan_seg_gt)
aug_input.category_id = dataset_dict["id"]
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
pan_seg_gt = aug_input.sem_seg
pan_seg_gt = torch.as_tensor(pan_seg_gt.astype("long"))
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
if pan_seg_gt is not None:
pan_seg_gt = F.pad(
pan_seg_gt, padding_size, value=self.ignore_label
).contiguous()
image_shape = (image.shape[-2], image.shape[-1]) # h, w
dataset_dict["image"] = image
pan_seg_gt = pan_seg_gt.numpy()
instances = Instances(image_shape)
classes = []
masks = []
class_id = dataset_dict["category_id"]
if not dataset_dict["iscrowd"]:
classes.append(class_id)
masks.append(pan_seg_gt == dataset_dict["id"])
# classes = np.array(classes)
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
instances.gt_boxes = Boxes(torch.zeros((0, 4)))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
instances.gt_masks = masks.tensor
instances.gt_boxes = masks.get_bounding_boxes()
dataset_dict["instances"] = instances
# ### for instance segmentation
elif dataset_dict["task"] == "ins_seg":
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB").astype("double")
from panopticapi.utils import rgb2id
pan_seg_gt = rgb2id(pan_seg_gt)
aug_input = T.AugInput(image, sem_seg=pan_seg_gt)
aug_input.category_id = dataset_dict["id"]
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
pan_seg_gt = aug_input.sem_seg
pan_seg_gt = torch.as_tensor(pan_seg_gt.astype("long"))
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
if pan_seg_gt is not None:
pan_seg_gt = F.pad(
pan_seg_gt, padding_size, value=self.ignore_label
).contiguous()
image_shape = (image.shape[-2], image.shape[-1]) # h, w
dataset_dict["image"] = image
pan_seg_gt = pan_seg_gt.numpy()
ins_instances = Instances(image_shape)
classes = []
masks = []
class_id = dataset_dict["category_id"]
if not dataset_dict["iscrowd"] and dataset_dict["isthing"]:
classes.append(class_id)
masks.append(pan_seg_gt == dataset_dict["id"])
# classes = np.array(classes)
ins_instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
ins_instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
ins_instances.gt_boxes = Boxes(torch.zeros((0, 4)))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
ins_instances.gt_masks = masks.tensor
ins_instances.gt_boxes = masks.get_bounding_boxes()
dataset_dict["instances"] = ins_instances
return dataset_dict
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/__init__.py | mask2former/data/dataset_mappers/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
from .mask_former_binary_semantic_dataset_mapper import (
MaskFormerBinarySemanticDatasetMapper,
)
from .mask_former_binary_full_dataset_mapper import MaskFormerBinaryFullDatasetMapper
from .mask_former_semantic_dataset_mapper import MaskFormerSemanticDatasetMapper
from .mask_former_instance_dataset_mapper import MaskFormerInstanceDatasetMapper
from .mask_former_panoptic_dataset_mapper import MaskFormerPanopticDatasetMapper
from .coco_full_task_new_baseline_dataset_mapper import COCOFullTaskNewBaselineDatasetMapper
from .proposal_classification_dataset_mapper import ProposalClasificationDatasetMapper
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/proposal_classification_dataset_mapper.py | mask2former/data/dataset_mappers/proposal_classification_dataset_mapper.py | from detectron2.data.dataset_mapper import DatasetMapper
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.structures import BoxMode
from ..augmentations import CropImageWithBox, RandomResizedCrop, CenterCrop
class ProposalClasificationDatasetMapper(DatasetMapper):
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations,
image_format: str,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
"""
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
if is_train:
augs = [
CropImageWithBox(cfg.INPUT.EXPAND_RATIO, mode=cfg.INPUT.EXPAND_MODE)
]
else:
augs = [CropImageWithBox(cfg.INPUT.EXPAND_RATIO[0])]
if is_train:
augs.append(RandomResizedCrop(cfg.INPUT.MIN_SIZE_TRAIN))
augs.append(T.RandomFlip())
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
augs.append(T.ResizeShortestEdge(min_size, sample_style="choice"))
augs.append(CenterCrop(min_size, seg_ignore_label=0))
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
}
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
assert len(dataset_dict["annotations"]) == 1
obj = dataset_dict["annotations"][0]
aug_input = T.AugInput(
image,
boxes=BoxMode.convert(
np.asarray(obj["bbox"]).reshape((1, 4)),
obj["bbox_mode"],
BoxMode.XYXY_ABS,
),
)
transforms = self.augmentations(aug_input)
image = aug_input.image
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(
np.ascontiguousarray(image.transpose(2, 0, 1))
)
dataset_dict["category_id"] = torch.Tensor([obj["category_id"]]).long()
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py | mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py | # Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.structures import BitMasks, Instances
from .mask_former_semantic_dataset_mapper import MaskFormerSemanticDatasetMapper
__all__ = ["MaskFormerPanopticDatasetMapper"]
class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for panoptic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
ignore_label,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
ignore_label: the label that is ignored to evaluation
size_divisibility: pad image size to be divisible by this value
"""
super().__init__(
is_train,
augmentations=augmentations,
image_format=image_format,
ignore_label=ignore_label,
size_divisibility=size_divisibility,
)
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
assert self.is_train, "MaskFormerPanopticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
# semantic segmentation
if "sem_seg_file_name" in dataset_dict:
# PyTorch transformation not implemented for uint16, so converting it to double first
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double")
else:
sem_seg_gt = None
# panoptic segmentation
if "pan_seg_file_name" in dataset_dict:
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
segments_info = dataset_dict["segments_info"]
else:
pan_seg_gt = None
segments_info = None
if pan_seg_gt is None:
raise ValueError(
"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.".format(
dataset_dict["file_name"]
)
)
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
if sem_seg_gt is not None:
sem_seg_gt = aug_input.sem_seg
# apply the same transformation to panoptic segmentation
pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)
from panopticapi.utils import rgb2id
pan_seg_gt = rgb2id(pan_seg_gt)
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
pan_seg_gt = torch.as_tensor(pan_seg_gt.astype("long"))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
if sem_seg_gt is not None:
sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()
pan_seg_gt = F.pad(
pan_seg_gt, padding_size, value=0
).contiguous() # 0 is the VOID panoptic label
image_shape = (image.shape[-2], image.shape[-1]) # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = image
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = sem_seg_gt.long()
if "annotations" in dataset_dict:
raise ValueError("Pemantic segmentation dataset should not have 'annotations'.")
# Prepare per-category binary masks
pan_seg_gt = pan_seg_gt.numpy()
instances = Instances(image_shape)
classes = []
masks = []
for segment_info in segments_info:
class_id = segment_info["category_id"]
if not segment_info["iscrowd"]:
classes.append(class_id)
masks.append(pan_seg_gt == segment_info["id"])
classes = np.array(classes)
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
instances.gt_masks = masks.tensor
dataset_dict["pan_instances"] = instances
return dataset_dict
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/data/dataset_mappers/ade_all_task_dataset_mapper.py | mask2former/data/dataset_mappers/ade_all_task_dataset_mapper.py | # Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
import torch
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.structures import BitMasks, Instances
from .mask_former_semantic_dataset_mapper import MaskFormerSemanticDatasetMapper
__all__ = ["ADEDatasetMapper"]
class ADEDatasetMapper(MaskFormerSemanticDatasetMapper):
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by MaskFormer for panoptic segmentation.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
@configurable
def __init__(
self,
is_train=True,
*,
augmentations,
image_format,
ignore_label,
size_divisibility,
):
"""
NOTE: this interface is experimental.
Args:
is_train: for training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
ignore_label: the label that is ignored to evaluation
size_divisibility: pad image size to be divisible by this value
"""
super().__init__(
is_train,
augmentations=augmentations,
image_format=image_format,
ignore_label=ignore_label,
size_divisibility=size_divisibility,
)
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
assert self.is_train, "MaskFormerPanopticDatasetMapper should only be used for training!"
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
# PyTorch transformation not implemented for uint16, so converting it to double first
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double")
else:
sem_seg_gt = None
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
image = aug_input.image
if sem_seg_gt is not None:
sem_seg_gt = aug_input.sem_seg
image_shape = (image.shape[-2], image.shape[-1]) # h, w
if "pan_seg_file_name" in dataset_dict:
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
segments_info = dataset_dict["segments_info"]
# apply the same transformation to panoptic segmentation
pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)
from panopticapi.utils import rgb2id
pan_seg_gt = rgb2id(pan_seg_gt)
# Pad image and segmentation label here!
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
pan_seg_gt = torch.as_tensor(pan_seg_gt.astype("long"))
if "annotations" in dataset_dict:
raise ValueError("Pemantic segmentation dataset should not have 'annotations'.")
# Prepare per-category binary masks
pan_seg_gt = pan_seg_gt.numpy()
instances = Instances(image_shape)
classes = []
masks = []
for segment_info in segments_info:
class_id = segment_info["category_id"]
if not segment_info["iscrowd"]:
classes.append(class_id)
masks.append(pan_seg_gt == segment_info["id"])
classes = np.array(classes)
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
instances.gt_masks = masks.tensor
dataset_dict["pan_instances"] = instances
# for instance segmentation
ins_instances = Instances(image_shape)
classes = []
masks = []
for segment_info in segments_info:
class_id = segment_info["category_id"]
if "iscrowd" not in segment_info or segment_info["iscrowd"] == False:
if segment_info["isthing"]:
classes.append(class_id)
masks.append(pan_seg_gt == segment_info["id"])
classes = np.array(classes)
ins_instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
if len(masks) == 0:
# Some image does not have annotation (all ignored)
ins_instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
ins_instances.gt_boxes = Boxes(torch.zeros((0, 4)))
else:
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
)
ins_instances.gt_masks = masks.tensor
ins_instances.gt_boxes = masks.get_bounding_boxes()
dataset_dict["ins_instances"] = ins_instances
# semantic segmentation
if sem_seg_gt is not None:
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
if self.size_divisibility > 0:
image_size = (image.shape[-2], image.shape[-1])
padding_size = [
0,
self.size_divisibility - image_size[1],
0,
self.size_divisibility - image_size[0],
]
image = F.pad(image, padding_size, value=128).contiguous()
sem_seg_gt = F.pad(
sem_seg_gt, padding_size, value=self.ignore_label
).contiguous()
image_shape = (image.shape[-2], image.shape[-1]) # h, w
dataset_dict["image"] = image
dataset_dict["sem_seg"] = sem_seg_gt.long()
# Prepare per-category binary masks
sem_seg_gt = sem_seg_gt.numpy()
sem_seg_instances = Instances(image_shape)
sem_classes = np.unique(sem_seg_gt)
# remove ignored region
sem_classes = sem_classes[sem_classes != self.ignore_label]
sem_seg_instances.gt_classes = torch.tensor(sem_classes, dtype=torch.int64)
sem_masks = []
for class_id in sem_classes:
sem_masks.append(sem_seg_gt == class_id)
if len(sem_masks) == 0:
# Some image does not have annotation (all ignored)
sem_seg_instances.gt_masks = torch.zeros(
(0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])
)
else:
sem_masks = BitMasks(
torch.stack(
[
torch.from_numpy(np.ascontiguousarray(x.copy()))
for x in sem_masks
]
)
)
sem_seg_instances.gt_masks = sem_masks.tensor
dataset_dict["sem_instances"] = sem_seg_instances
return dataset_dict
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/evaluation/instance_seg_evaluation.py | mask2former/evaluation/instance_seg_evaluation.py | # Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_json
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
from detectron2.evaluation.evaluator import DatasetEvaluator
try:
from detectron2.evaluation.fast_eval_api import COCOeval_opt
except ImportError:
COCOeval_opt = COCOeval
class COCOEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
the metric cannot be computed (e.g. due to no predictions made).
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
max_dets_per_image=None,
use_fast_impl=True,
kpt_oks_sigmas=(),
allow_cached_coco=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
max_dets_per_image (int): limit on the maximum number of detections per image.
By default in COCO, this limit is to 100, but this can be customized
to be greater, as is needed in evaluation metrics AP fixed and AP pool
(see https://arxiv.org/pdf/2102.01066.pdf)
This doesn't affect keypoint evaluation.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
See http://cocodataset.org/#keypoints-eval
When empty, it will use the defaults in COCO.
Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
allow_cached_coco (bool): Whether to use cached coco json from previous validation
runs. You should set this to False if you need to use different validation data.
Defaults to True.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
if use_fast_impl and (COCOeval_opt is COCOeval):
self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.")
use_fast_impl = False
self._use_fast_impl = use_fast_impl
# COCOeval requires the limit on the number of detections per image (maxDets) to be a list
# with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the
# 3rd element (100) is used as the limit on the number of detections per image when
# evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval,
# we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults.
if max_dets_per_image is None:
max_dets_per_image = [1, 10, 100]
else:
max_dets_per_image = [1, 10, max_dets_per_image]
self._max_dets_per_image = max_dets_per_image
if tasks is not None and isinstance(tasks, CfgNode):
kpt_oks_sigmas = (
tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
)
self._logger.warn(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
if output_dir is None:
raise ValueError(
"output_dir must be provided to COCOEvaluator "
"for datasets not in COCO format."
)
self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...")
cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._coco_api.dataset
if self._do_evaluation:
self._kpt_oks_sigmas = kpt_oks_sigmas
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
# import pdb; pdb.set_trace()
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
if "proposals" in output:
prediction["proposals"] = output["proposals"].to(self._cpu_device)
if len(prediction) > 1:
self._predictions.append(prediction)
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "proposals" in predictions[0]:
self._eval_box_proposals(predictions)
if "instances" in predictions[0]:
self._eval_predictions(predictions, img_ids=img_ids)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _tasks_from_predictions(self, predictions):
"""
Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
"""
tasks = {"bbox"}
for pred in predictions:
if "segmentation" in pred:
tasks.add("segm")
if "keypoints" in pred:
tasks.add("keypoints")
return sorted(tasks)
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(coco_results)
# import pdb; pdb.set_trace()
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in coco_results:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api,
coco_results,
task,
kpt_oks_sigmas=self._kpt_oks_sigmas,
use_fast_impl=self._use_fast_impl,
img_ids=img_ids,
max_dets_per_image=self._max_dets_per_image,
)
if len(coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, task, class_names=self._metadata.get("thing_classes")
)
self._results[task] = res
def _eval_box_proposals(self, predictions):
"""
Evaluate the box proposals in predictions.
Fill self._results with the metrics for "box_proposals" task.
"""
if self._output_dir:
# Saving generated box proposals to file.
# Predicted box_proposals are in XYXY_ABS mode.
bbox_mode = BoxMode.XYXY_ABS.value
ids, boxes, objectness_logits = [], [], []
for prediction in predictions:
ids.append(prediction["image_id"])
boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
proposal_data = {
"boxes": boxes,
"objectness_logits": objectness_logits,
"ids": ids,
"bbox_mode": bbox_mode,
}
with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
pickle.dump(proposal_data, f)
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info("Evaluating bbox proposals ...")
res = {}
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
key = "AR{}@{:d}".format(suffix, limit)
res[key] = float(stats["ar"].item() * 100)
self._logger.info("Proposal metrics: \n" + create_small_table(res))
self._results["box_proposals"] = res
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
seen_res = []
unseen_res = []
for name, ap in results_per_category:
if "coco" in self._metadata.evaluator_type:
novel_classes = ["cow", "giraffe", "frisbee", "suitcase", "skateboard", "carrot", "scissors"] # 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200]:
elif "ade" in self._metadata.evaluator_type:
novel_classes = ["table", "armchair", "tub", "fireplace", "computer", "towel", "tv", "basket, handbasket", "traffic light", "monitor", "flag"]
else:
novel_classes = []
if name in novel_classes:
unseen_res.append(ap)
else:
seen_res.append(ap)
mean_s_res = np.mean(seen_res)
mean_un_res = np.mean(unseen_res)
print("Seen-classes AP:", mean_s_res)
print("Unseen-classes AP:", mean_un_res)
self._logger.info("Seen-classes AP:" + str(mean_s_res))
self._logger.info("Unseen-classes AP:" + str(mean_un_res))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
def instances_to_coco_json(instances, img_id):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances):
img_id (int): the image id
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
rles = [
mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in instances.pred_masks
]
for rle in rles:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
rle["counts"] = rle["counts"].decode("utf-8")
has_keypoints = instances.has("pred_keypoints")
if has_keypoints:
keypoints = instances.pred_keypoints
results = []
for k in range(num_instance):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
}
if has_mask:
result["segmentation"] = rles[k]
if has_keypoints:
# In COCO annotations,
# keypoints coordinates are pixel indices.
# However our predictions are floating point coordinates.
# Therefore we subtract 0.5 to be consistent with the annotation format.
# This is the inverse of data loading logic in `datasets/coco.py`.
keypoints[k][:, :2] -= 0.5
result["keypoints"] = keypoints[k].flatten().tolist()
results.append(result)
return results
# inspired from Detectron:
# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
"""
Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0**2, 1e5**2], # all
[0**2, 32**2], # small
[32**2, 96**2], # medium
[96**2, 1e5**2], # large
[96**2, 128**2], # 96-128
[128**2, 256**2], # 128-256
[256**2, 512**2], # 256-512
[512**2, 1e5**2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for prediction_dict in dataset_predictions:
predictions = prediction_dict["proposals"]
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = predictions.objectness_logits.sort(descending=True)[1]
predictions = predictions[inds]
ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
anno = coco_api.loadAnns(ann_ids)
gt_boxes = [
BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
for obj in anno
if obj["iscrowd"] == 0
]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = Boxes(gt_boxes)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0 or len(predictions) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if limit is not None and len(predictions) > limit:
predictions = predictions[:limit]
overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(predictions), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = (
torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def _evaluate_predictions_on_coco(
coco_gt,
coco_results,
iou_type,
kpt_oks_sigmas=None,
use_fast_impl=True,
img_ids=None,
max_dets_per_image=None,
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
if iou_type == "segm":
coco_results = copy.deepcopy(coco_results)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
# import pdb; pdb.set_trace()
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
# For COCO, the default max_dets_per_image is [1, 10, 100].
if max_dets_per_image is None:
max_dets_per_image = [1, 10, 100] # Default from COCOEval
else:
assert (
len(max_dets_per_image) >= 3
), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3"
# In the case that user supplies a custom input for max_dets_per_image,
# apply COCOevalMaxDets to evaluate AP with the custom input.
if max_dets_per_image[2] != 100:
coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type)
if iou_type != "keypoints":
coco_eval.params.maxDets = max_dets_per_image
if img_ids is not None:
coco_eval.params.imgIds = img_ids
if iou_type == "keypoints":
# Use the COCO default keypoint OKS sigmas unless overrides are specified
if kpt_oks_sigmas:
assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
# COCOAPI requires every detection and every gt to have keypoints, so
# we just take the first entry from both
num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
f"Ground truth contains {num_keypoints_gt} keypoints. "
f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
"They have to agree with each other. For meaning of OKS, please refer to "
"http://cocodataset.org/#keypoints-eval."
)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
class COCOevalMaxDets(COCOeval):
"""
Modified version of COCOeval for evaluating AP with a custom
maxDets (by default for COCO, maxDets is 100)
"""
def summarize(self):
"""
Compute and display summary metrics for evaluation results given
a custom value for max_dets_per_image
"""
def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
p = self.params
iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
titleStr = "Average Precision" if ap == 1 else "Average Recall"
typeStr = "(AP)" if ap == 1 else "(AR)"
iouStr = (
"{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
if iouThr is None
else "{:0.2f}".format(iouThr)
)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval["precision"]
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval["recall"]
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
# Evaluate AP using the custom limit on maximum detections per image
stats[0] = _summarize(1, maxDets=self.params.maxDets[2])
stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
stats[3] = _summarize(1, maxDets=20, areaRng="medium")
stats[4] = _summarize(1, maxDets=20, areaRng="large")
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
stats[8] = _summarize(0, maxDets=20, areaRng="medium")
stats[9] = _summarize(0, maxDets=20, areaRng="large")
return stats
if not self.eval:
raise Exception("Please run accumulate() first")
iouType = self.params.iouType
if iouType == "segm" or iouType == "bbox":
summarize = _summarizeDets
elif iouType == "keypoints":
summarize = _summarizeKps
self.stats = summarize()
def __str__(self):
self.summarize() | python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/evaluation/pseudo_sem_seg_evaluation.py | mask2former/evaluation/pseudo_sem_seg_evaluation.py | # Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import json
import logging
import numpy as np
import os
from collections import OrderedDict
import PIL.Image as Image
import torch
from detectron2.data import MetadataCatalog
from detectron2.utils.comm import all_gather, is_main_process, synchronize
from detectron2.utils.file_io import PathManager
from detectron2.evaluation import SemSegEvaluator
class GeneralizedPseudoSemSegEvaluator(SemSegEvaluator):
"""
Evaluate semantic segmentation metrics.
"""
def __init__(
self,
dataset_name,
distributed=True,
output_dir=None,
*,
num_classes=None,
ignore_label=None,
post_process_func=None,
with_prior=True,
reject_threshold=0.0,
flag_name="trainable_flag",
):
super().__init__(
dataset_name,
distributed=distributed,
output_dir=output_dir,
num_classes=num_classes,
ignore_label=ignore_label,
)
meta = MetadataCatalog.get(dataset_name)
try:
self._evaluation_set = meta.evaluation_set
except AttributeError:
self._evaluation_set = None
self.post_process_func = (
post_process_func
if post_process_func is not None
else lambda x, **kwargs: x
)
if hasattr(meta, flag_name):
self.trained_flag = MetadataCatalog.get(dataset_name).get(flag_name)
self.with_prior = with_prior
self.reject_threshold = reject_threshold
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model.
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name".
outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
for input, output in zip(inputs, outputs):
output = self.post_process_func(
output["sem_seg"], image=np.array(Image.open(input["file_name"]))
)
with PathManager.open(
self.input_file_to_gt_file[input["file_name"]], "rb"
) as f:
gt = np.array(Image.open(f), dtype=np.int)
mask = 0
for i in np.unique(gt):
if i == self._ignore_label:
continue
if self.trained_flag[i] == 1:
mask = mask + (gt == i)
trained_mask = mask > 0
if self.with_prior:
pseudo_prob = output.softmax(dim=0) * (
1 - output.new_tensor(self.trained_flag)[:, None, None]
)
else:
pseudo_prob = output
pred_score, pred = pseudo_prob.max(dim=0)
pred[pred_score < self.reject_threshold] = self._num_classes
pred = np.array(pred.to(self._cpu_device), dtype=np.int)
pred[trained_mask] = gt[trained_mask]
# evaluation
gt[gt == self._ignore_label] = self._num_classes
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def evaluate(self):
"""
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
* Mean intersection-over-union averaged across classes (mIoU)
* Frequency Weighted IoU (fwIoU)
* Mean pixel accuracy averaged across classes (mACC)
* Pixel Accuracy (pACC)
"""
if self._distributed:
synchronize()
conf_matrix_list = all_gather(self._conf_matrix)
self._predictions = all_gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not is_main_process():
return
self._conf_matrix = np.zeros_like(self._conf_matrix)
for conf_matrix in conf_matrix_list:
self._conf_matrix += conf_matrix
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(self._predictions))
acc = np.full(self._num_classes, np.nan, dtype=np.float)
iou = np.full(self._num_classes, np.nan, dtype=np.float)
tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
class_weights = pos_gt / np.sum(pos_gt)
pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
acc_valid = pos_gt > 0
acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
iou_valid = (pos_gt + pos_pred) > 0
union = pos_gt + pos_pred - tp
iou[acc_valid] = tp[acc_valid] / union[acc_valid]
macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
fiou = np.sum(iou[acc_valid] * class_weights[acc_valid])
pacc = np.sum(tp) / np.sum(pos_gt)
res = {}
res["mIoU"] = 100 * miou
res["fwIoU"] = 100 * fiou
for i, name in enumerate(self._class_names):
res["IoU-{}".format(name)] = 100 * iou[i]
res["mACC"] = 100 * macc
res["pACC"] = 100 * pacc
for i, name in enumerate(self._class_names):
res["ACC-{}".format(name)] = 100 * acc[i]
if self._evaluation_set is not None:
for set_name, set_inds in self._evaluation_set.items():
iou_list = []
set_inds = np.array(set_inds, np.int)
mask = np.zeros((len(iou),)).astype(np.bool)
mask[set_inds] = 1
miou = np.sum(iou[mask][acc_valid[mask]]) / np.sum(iou_valid[mask])
pacc = np.sum(tp[mask]) / np.sum(pos_gt[mask])
res["mIoU-{}".format(set_name)] = 100 * miou
res["pAcc-{}".format(set_name)] = 100 * pacc
iou_list.append(miou)
miou = np.sum(iou[~mask][acc_valid[~mask]]) / np.sum(iou_valid[~mask])
pacc = np.sum(tp[~mask]) / np.sum(pos_gt[~mask])
res["mIoU-un{}".format(set_name)] = 100 * miou
res["pAcc-un{}".format(set_name)] = 100 * pacc
iou_list.append(miou)
res["hIoU-{}".format(set_name)] = (
100 * len(iou_list) / sum([1 / iou for iou in iou_list])
)
if self._output_dir:
file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(res, f)
results = OrderedDict({"sem_seg": res})
self._logger.info(results)
return results
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/evaluation/classification_evaluation.py | mask2former/evaluation/classification_evaluation.py | # Copyright (c) Facebook, Inc. and its affiliates.
import copy
import itertools
import logging
from collections import OrderedDict
import numpy as np
import torch
import detectron2.utils.comm as comm
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
class ClassificationEvaluator(DatasetEvaluator):
def __init__(
self,
dataset_name,
distributed=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
try:
self._evaluation_set = self._metadata.evaluation_set
except AttributeError:
self._evaluation_set = None
self.dataset_name = dataset_name
self._predictions = []
self._gts = []
def reset(self):
self._predictions = []
self._gts = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a classification model.
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a classification model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {}
if ("classification" in output) and (output["classification"] is not None):
prediction["classification"] = output["classification"].cpu()
if len(prediction) > 0:
self._predictions.append(prediction["classification"])
if "instances" in input:
self._gts.append(input["instances"].gt_classes.cpu())
else:
self._gts.append(input["category_id"].cpu())
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
gts = comm.gather(self._gts, dst=0)
gts = list(itertools.chain(*gts))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
gts = self._gts
if len(predictions) == 0:
self._logger.warning(
"[ClassificationEvaluator] Did not receive valid predictions."
)
return {}
self._results = OrderedDict()
class_names = self._metadata.stuff_classes
predictions = torch.cat(predictions)
gts = torch.cat(gts)
top1_acc, top5_acc = accuracy(predictions, gts, (1, 5))
self._results["macc_top1"] = top1_acc.mean() * 100
self._results["macc_top5"] = top5_acc.mean() * 100
mmacc_top1 = []
mmacc_top5 = []
count_mat = np.zeros((len(class_names), len(class_names)))
count_list = []
for i in range(len(class_names)):
flag = gts == i
if flag.sum() == 0:
count_list.append(i)
continue
res_id = predictions[gts == i].argmax(dim=-1)
for j in range(len(class_names)):
count_mat[i, j] += (res_id == j).float().sum()
for i, name in enumerate(class_names):
macc_top1_per_cls = safe_mean(top1_acc[gts == i]) * 100
macc_top5_per_cls = safe_mean(top5_acc[gts == i]) * 100
self._results[f"acc_top1_{name}"] = macc_top1_per_cls
self._results[f"acc_top5_{name}"] = macc_top5_per_cls
mmacc_top1.append(macc_top1_per_cls)
mmacc_top5.append(macc_top5_per_cls)
if self._evaluation_set is not None:
for set_name, set_inds in self._evaluation_set.items():
self._results[f"acc_top1_{set_name}"] = (
torch.cat([top1_acc[gts == i] for i in set_inds]).mean() * 100
)
self._results[f"acc_top1_w/o_{set_name}"] = (
torch.cat(
[
top1_acc[gts == i]
for i in range(len(class_names))
if i not in set_inds
]
).mean()
* 100
)
self._results["mmacc_top1"] = sum(mmacc_top1) / len(mmacc_top1)
self._results["mmacc_top5"] = sum(mmacc_top5) / len(mmacc_top5)
return copy.deepcopy(self._results)
def safe_mean(vec):
if len(vec) == 0:
return torch.Tensor([0])
else:
return vec.mean()
def accuracy(output: torch.Tensor, target: torch.Tensor, topk=(1,)):
"""
Computes the accuracy over the k top predictions for the specified values of k
In top-5 accuracy you give yourself credit for having the right answer
if the right answer appears in your top five guesses.
"""
maxk = max(topk)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = (pred == target.unsqueeze(dim=0)).expand_as(pred)
res = []
for k in topk:
correct_k = correct[:k].float().sum(0)
res.append(correct_k)
return res
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/evaluation/panoptic_seg_evaluation.py | mask2former/evaluation/panoptic_seg_evaluation.py | # Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import itertools
import json
import logging
import numpy as np
import os
import tempfile
from collections import OrderedDict
from typing import Optional
from PIL import Image
from tabulate import tabulate
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from detectron2.evaluation.evaluator import DatasetEvaluator
logger = logging.getLogger(__name__)
class GeneralizedPanopticEvaluator(DatasetEvaluator):
"""
Evaluate Panoptic Quality metrics on COCO using PanopticAPI.
It saves panoptic segmentation prediction in `output_dir`
It contains a synchronize call and has to be called from all workers.
"""
def __init__(self, dataset_name: str, output_dir: Optional[str] = None):
"""
Args:
dataset_name: name of the dataset
output_dir: output directory to save results for evaluation.
"""
self._metadata = MetadataCatalog.get(dataset_name)
self._thing_contiguous_id_to_dataset_id = {
v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
}
self._stuff_contiguous_id_to_dataset_id = {
v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items()
}
self._output_dir = output_dir
if self._output_dir is not None:
PathManager.mkdirs(self._output_dir)
def reset(self):
self._predictions = []
def _convert_category_id(self, segment_info):
isthing = segment_info.pop("isthing", None)
if isthing is None:
# the model produces panoptic category id directly. No more conversion needed
return segment_info
if isthing is True:
segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[
segment_info["category_id"]
]
else:
segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[
segment_info["category_id"]
]
return segment_info
def process(self, inputs, outputs):
from panopticapi.utils import id2rgb
for input, output in zip(inputs, outputs):
panoptic_img, segments_info = output["panoptic_seg"]
panoptic_img = panoptic_img.cpu().numpy()
# import pdb; pdb.set_trace()
if segments_info is None:
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label, and add 1 to panoptic_img since the official
# evaluation script uses 0 for VOID label.
label_divisor = self._metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_img):
if panoptic_label == -1 :
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = (
pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values()
)
segments_info.append(
{
"id": int(panoptic_label),
"category_id": int(pred_class),
"isthing": bool(isthing),
}
)
# Official evaluation script uses 0 for VOID label.
file_name = os.path.basename(input["file_name"])
file_name_png = os.path.splitext(file_name)[0] + ".png"
image_id = int(os.path.splitext(file_name)[0])
with io.BytesIO() as out:
Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
segments_info = [self._convert_category_id(x) for x in segments_info]
self._predictions.append(
{
"image_id": image_id,
"file_name": file_name_png,
"png_string": out.getvalue(),
"segments_info": segments_info,
}
)
def evaluate(self):
comm.synchronize()
self._predictions = comm.gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not comm.is_main_process():
return
# PanopticApi requires local files
gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
gt_folder = PathManager.get_local_path(self._metadata.panoptic_root)
# with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
pred_dir = self._output_dir + "/panoptic_seg"
os.makedirs(pred_dir, exist_ok=True)
logger.info("Writing all panoptic predictions to {} ...".format(pred_dir))
for p in self._predictions:
with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
f.write(p.pop("png_string"))
with open(gt_json, "r") as f:
json_data = json.load(f)
json_data["annotations"] = self._predictions
output_dir = pred_dir
predictions_json = os.path.join(output_dir, "predictions.json")
with PathManager.open(predictions_json, "w") as f:
f.write(json.dumps(json_data))
from panopticapi.evaluation import pq_compute
with contextlib.redirect_stdout(io.StringIO()):
pq_res = pq_compute(
gt_json,
PathManager.get_local_path(predictions_json),
gt_folder=gt_folder,
pred_folder=pred_dir,
)
res = {}
res["PQ"] = 100 * pq_res["All"]["pq"]
res["SQ"] = 100 * pq_res["All"]["sq"]
res["RQ"] = 100 * pq_res["All"]["rq"]
res["PQ_th"] = 100 * pq_res["Things"]["pq"]
res["SQ_th"] = 100 * pq_res["Things"]["sq"]
res["RQ_th"] = 100 * pq_res["Things"]["rq"]
res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]
results = OrderedDict({"panoptic_seg": res})
_print_panoptic_results(pq_res)
print(pq_res["per_class"])
per_class_res = pq_res["per_class"]
base_cls_res = {}
novel_cls_res = {}
for cls_id, res_c in per_class_res.items():
if "coco" in self._metadata.evaluator_type:
novel_classes = [21, 25, 33, 34, 41, 57, 87, 100, 106, 124, 145, 148, 149, 169, 172] # 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200]:
else:
novel_classes = []
if cls_id in novel_classes:
novel_cls_res[cls_id] = res_c
else:
base_cls_res[cls_id] = res_c
# base results
numb = 0
pq, sq, rq = 0, 0, 0
for cls_id, res_c in base_cls_res.items():
pq += res_c['pq']
sq += res_c['sq']
rq += res_c['rq']
numb += 1
pq = pq / numb * 100
sq = sq / numb * 100
rq = rq / numb * 100
print("Base Class Results:", pq, "\t", sq, "\t", rq)
logger.info("Panoptic Base Class Evaluation Results:\n" + "pq" + str(pq) + "\t" + "sq" + str(sq) + "\t" + "rq" + str(rq))
# novel results
numb = 0
pq, sq, rq = 0, 0, 0
for cls_id, res_c in novel_cls_res.items():
pq += res_c['pq']
sq += res_c['sq']
rq += res_c['rq']
numb += 1
pq = pq / numb * 100
sq = sq / numb * 100
rq = rq / numb * 100
print("Novel Class Results:", pq, "\t", sq, "\t", rq)
logger.info("Panoptic Novel Class Evaluation Results:\n" + "pq" + str(pq) + "\t" + "sq" + str(sq) + "\t" + "rq" + str(rq))
return results
def _print_panoptic_results(pq_res):
headers = ["", "PQ", "SQ", "RQ", "#categories"]
data = []
for name in ["All", "Things", "Stuff"]:
row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]]
data.append(row)
table = tabulate(
data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center"
)
logger.info("Panoptic Evaluation Results:\n" + table)
if __name__ == "__main__":
from detectron2.utils.logger import setup_logger
logger = setup_logger()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gt-json")
parser.add_argument("--gt-dir")
parser.add_argument("--pred-json")
parser.add_argument("--pred-dir")
args = parser.parse_args()
from panopticapi.evaluation import pq_compute
with contextlib.redirect_stdout(io.StringIO()):
pq_res = pq_compute(
args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir
)
_print_panoptic_results(pq_res) | python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/evaluation/instance_evaluation.py | mask2former/evaluation/instance_evaluation.py | # Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_json
from detectron2.evaluation.coco_evaluation import COCOEvaluator, _evaluate_predictions_on_coco
from detectron2.evaluation.fast_eval_api import COCOeval_opt
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
# modified from COCOEvaluator for instance segmetnat
class InstanceSegEvaluator(COCOEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
the metric cannot be computed (e.g. due to no predictions made).
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(coco_results)
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
# all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
# num_classes = len(all_contiguous_ids)
# assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in coco_results:
category_id = result["category_id"]
# assert category_id < num_classes, (
# f"A prediction has class={category_id}, "
# f"but the dataset only has {num_classes} classes and "
# f"predicted class id should be in [0, {num_classes - 1}]."
# )
assert category_id in reverse_id_mapping, (
f"A prediction has class={category_id}, "
f"but the dataset only has class ids in {dataset_id_to_contiguous_id}."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api,
coco_results,
task,
kpt_oks_sigmas=self._kpt_oks_sigmas,
use_fast_impl=self._use_fast_impl,
img_ids=img_ids,
max_dets_per_image=self._max_dets_per_image,
)
if len(coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, task, class_names=self._metadata.get("thing_classes")
)
self._results[task] = res
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/evaluation/__init__.py | mask2former/evaluation/__init__.py | from .generalized_sem_seg_evaluation import GeneralizedSemSegEvaluator
from .classification_evaluation import ClassificationEvaluator
from .pseudo_sem_seg_evaluation import GeneralizedPseudoSemSegEvaluator
from .panoptic_seg_evaluation import GeneralizedPanopticEvaluator
from .instance_evaluation import InstanceSegEvaluator
from .instance_seg_evaluation import COCOEvaluator | python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
bytedance/FreeSeg | https://github.com/bytedance/FreeSeg/blob/7707335cc3f2a1a73d4d2829f3cdbb0e031d3961/mask2former/evaluation/generalized_sem_seg_evaluation.py | mask2former/evaluation/generalized_sem_seg_evaluation.py | # Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import json
import logging
import numpy as np
import os
from collections import OrderedDict
import PIL.Image as Image
import pycocotools.mask as mask_util
import torch
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.comm import all_gather, is_main_process, synchronize
from detectron2.utils.file_io import PathManager
from detectron2.evaluation import SemSegEvaluator
class GeneralizedSemSegEvaluator(SemSegEvaluator):
"""
Evaluate semantic segmentation metrics.
"""
def __init__(
self,
dataset_name,
distributed=True,
output_dir=None,
*,
num_classes=None,
ignore_label=None,
post_process_func=None,
):
super().__init__(
dataset_name,
distributed=distributed,
output_dir=output_dir,
num_classes=num_classes,
ignore_label=ignore_label,
)
meta = MetadataCatalog.get(dataset_name)
try:
self._evaluation_set = meta.evaluation_set
except AttributeError:
self._evaluation_set = None
self.post_process_func = (
post_process_func
if post_process_func is not None
else lambda x, **kwargs: x
)
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model.
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name".
outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
for input, output in zip(inputs, outputs):
output = self.post_process_func(
output["sem_seg"], image=np.array(Image.open(input["file_name"]))
)
output = output.argmax(dim=0).to(self._cpu_device)
pred = np.array(output, dtype=np.int)
with PathManager.open(
self.input_file_to_gt_file[input["file_name"]], "rb"
) as f:
gt = np.array(Image.open(f), dtype=np.int)
# import pdb; pdb.set_trace()
gt[gt == self._ignore_label] = self._num_classes
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def evaluate(self):
"""
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
* Mean intersection-over-union averaged across classes (mIoU)
* Frequency Weighted IoU (fwIoU)
* Mean pixel accuracy averaged across classes (mACC)
* Pixel Accuracy (pACC)
"""
if self._distributed:
synchronize()
conf_matrix_list = all_gather(self._conf_matrix)
self._predictions = all_gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not is_main_process():
return
self._conf_matrix = np.zeros_like(self._conf_matrix)
for conf_matrix in conf_matrix_list:
self._conf_matrix += conf_matrix
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(self._predictions))
acc = np.full(self._num_classes, np.nan, dtype=np.float)
iou = np.full(self._num_classes, np.nan, dtype=np.float)
tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
class_weights = pos_gt / np.sum(pos_gt)
pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
acc_valid = pos_gt > 0
acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
iou_valid = (pos_gt + pos_pred) > 0
union = pos_gt + pos_pred - tp
iou[acc_valid] = tp[acc_valid] / union[acc_valid]
macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
fiou = np.sum(iou[acc_valid] * class_weights[acc_valid])
pacc = np.sum(tp) / np.sum(pos_gt)
res = {}
res["mIoU"] = 100 * miou
res["fwIoU"] = 100 * fiou
for i, name in enumerate(self._class_names):
res["IoU-{}".format(name)] = 100 * iou[i]
res["mACC"] = 100 * macc
res["pACC"] = 100 * pacc
for i, name in enumerate(self._class_names):
res["ACC-{}".format(name)] = 100 * acc[i]
if self._evaluation_set is not None:
for set_name, set_inds in self._evaluation_set.items():
iou_list = []
set_inds = np.array(set_inds, np.int)
mask = np.zeros((len(iou),)).astype(np.bool)
mask[set_inds] = 1
miou = np.sum(iou[mask][acc_valid[mask]]) / np.sum(iou_valid[mask])
pacc = np.sum(tp[mask]) / np.sum(pos_gt[mask])
res["mIoU-{}".format(set_name)] = 100 * miou
res["pAcc-{}".format(set_name)] = 100 * pacc
iou_list.append(miou)
miou = np.sum(iou[~mask][acc_valid[~mask]]) / np.sum(iou_valid[~mask])
pacc = np.sum(tp[~mask]) / np.sum(pos_gt[~mask])
res["mIoU-un{}".format(set_name)] = 100 * miou
res["pAcc-un{}".format(set_name)] = 100 * pacc
iou_list.append(miou)
res["hIoU-{}".format(set_name)] = (
100 * len(iou_list) / sum([1 / iou for iou in iou_list])
)
if self._output_dir:
file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(res, f)
results = OrderedDict({"sem_seg": res})
self._logger.info(results)
return results
| python | Apache-2.0 | 7707335cc3f2a1a73d4d2829f3cdbb0e031d3961 | 2026-01-05T07:13:42.671257Z | false |
johngai19/TextDistiller | https://github.com/johngai19/TextDistiller/blob/18d18b8c7845d6a1baee9a011b985853278d0f53/bsCLI.py | bsCLI.py | from preprocess import *
import argparse
import os
from shutil import copyfile
def dir_path(path):
if os.path.exists(path):
return path
else:
raise argparse.ArgumentTypeError(f"readable_dir:{path} is not a valid path")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Enter path of PDF file to summarize')
parser.add_argument('--path', type=dir_path, help='path of PDF file to summarize')
args = parser.parse_args()
copyfile(args.path, app.config['PDF_UPLOADS'] + '/pdf_file.pdf')
pdfParser(app.config['PDF_UPLOADS'] + '/pdf_file.pdf')
| python | MIT | 18d18b8c7845d6a1baee9a011b985853278d0f53 | 2026-01-05T07:13:40.590988Z | false |
johngai19/TextDistiller | https://github.com/johngai19/TextDistiller/blob/18d18b8c7845d6a1baee9a011b985853278d0f53/mail.py | mail.py | from imports import *
def send_fail(receiver_address):
mail_content_file = open('error_body.txt')
sender_address = 'YOUR_EMAIL_HERE'
sender_pass = 'PASS'
# Setup the MIME
message = MIMEMultipart()
message['From'] = "John Ngai"
message['To'] = receiver_address
message['Subject'] = 'Chapter Summaries Attached'
# The subject line
# The body and the attachments for the mail
message.attach(MIMEText(mail_content_file.read(), 'plain'))
session = smtplib.SMTP('smtp.gmail.com', 587) # use gmail with port
session.starttls() # enable security
session.login(sender_address, sender_pass) # login with mail_id and password
text = message.as_string()
session.sendmail(sender_address, receiver_address, text)
session.quit()
print('Fail Mail Sent')
def send_mail(zipfile_name, receiver_address):
mail_content_file = open('mail_body.txt')
sender_address = 'YOUR_EMAIL_HERE'
sender_pass = 'PASS'
# Setup the MIME
message = MIMEMultipart()
message['From'] = "John Ngai"
message['To'] = receiver_address
message['Subject'] = 'Chapter Summaries Attached'
# The subject line
# The body and the attachments for the mail
message.attach(MIMEText(mail_content_file.read(), 'plain'))
attach_file_name = f'{zipfile_name}'
attach_file = open(attach_file_name, 'rb') # Open the file as binary mode
payload = MIMEBase('application', 'zip')
payload.set_payload(attach_file.read())
encoders.encode_base64(payload) # encode the attachment
payload.add_header('Content-Disposition', f'attachment; filename= {attach_file_name}')
# add payload header with filename
message.attach(payload)
# Create SMTP session for sending the mail
session = smtplib.SMTP('smtp.gmail.com', 587) # use gmail with port
session.starttls() # enable security
session.login(sender_address, sender_pass) # login with mail_id and password
text = message.as_string()
session.sendmail(sender_address, receiver_address, text)
session.quit()
print('Mail Sent')
| python | MIT | 18d18b8c7845d6a1baee9a011b985853278d0f53 | 2026-01-05T07:13:40.590988Z | false |
johngai19/TextDistiller | https://github.com/johngai19/TextDistiller/blob/18d18b8c7845d6a1baee9a011b985853278d0f53/views.py | views.py | from imports import *
from preprocess import *
from mail import *
def allowed_pdf(filename):
if not "." in filename:
return False
ext = filename.rsplit(".", 1)[1]
if ext.upper() in app.config["ALLOWED_EXTENSIONS"]:
return True
return False
@app.route("/", methods=["GET"])
def redirect_to_site():
return redirect(request.url + 'upload-pdf')
@app.route('/upload-pdf', methods=["GET", "POST"])
def upload_pdf():
if request.method == "POST":
if request.files:
pdf = request.files["pdf"]
mail = request.form['email']
if pdf.filename == "":
return render_template('public/upload_pdf.html')
if not allowed_pdf(pdf.filename):
return render_template('public/upload_pdf.html')
else:
filename = 'pdf_file.pdf'
pdf.save(os.path.join(app.config["PDF_UPLOADS"], filename))
thread = Thread(target = pdfParser, kwargs={'filename': os.path.join(app.config["PDF_UPLOADS"], 'pdf_file.pdf'), 'mailid': f'{mail}'})
thread.start()
return render_template('public/upload_pdf.html')
return redirect(request.url)
return render_template('public/upload_pdf.html')
if __name__ == '__main__':
port = int(os.environ.get("PORT", 9876))
app.run(host="0.0.0.0", port=port)
| python | MIT | 18d18b8c7845d6a1baee9a011b985853278d0f53 | 2026-01-05T07:13:40.590988Z | false |
johngai19/TextDistiller | https://github.com/johngai19/TextDistiller/blob/18d18b8c7845d6a1baee9a011b985853278d0f53/model.py | model.py | from imports import *
from mail import *
model = T5ForConditionalGeneration.from_pretrained('t5-small')
tokenizer = T5Tokenizer.from_pretrained('t5-small')
device = torch.device('cpu')
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
def cleanText(text):
text = re.sub(r"@[A-Za-z0-9]+", ' ', text)
text = re.sub(r"https?://[A-Za-z0-9./]+", ' ', text)
text = re.sub(r"[^a-zA-z.!?'0-9]", ' ', text)
text = re.sub('\t', ' ', text)
text = re.sub(r" +", ' ', text)
return text
def getSummary(text, tokenizer):
preprocess_text = text.strip().replace("\n", "")
t5_prepared_Text = "summarize: " + preprocess_text
tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors="pt").to(device)
summary_ids = model.generate(tokenized_text,
num_beams=5,
no_repeat_ngram_size=2,
min_length=30,
max_length=96,
early_stopping=True)
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
return output
def sentenceCorrection(text):
correctedText = ""
parser = GingerIt()
sentences = sent_tokenize(text, language='english')
for sentence in sentences:
sentenceDict = parser.parse(sentence)
sentence = str(sentenceDict['result'])
correctedText += sentence
return correctedText
def summaryGeneration(mailid=None):
try:
txtFiles = []
for filename in os.listdir(app.config["PDF_UPLOADS"]):
if fnmatch.fnmatch(filename, 'pdf_fileChapter*.txt'):
print(filename)
txtFiles.append(filename)
for fname in txtFiles:
summary = ""
print("Summarising: ", fname)
text = ""
with open(os.path.join(app.config['PDF_UPLOADS'] + '/' + fname), 'r', encoding="utf-8") as f:
textLines = f.readlines()
for line in textLines:
line = cleanText(line)
line = line.replace("\n", " ")
text += line
textTokens = word_tokenize(text)
totalTokens = len(textTokens)
chunkCounter = 0
maxTokenLen = 400
chunkList = []
start = 0
end = maxTokenLen
if (totalTokens % maxTokenLen) == 0:
totalChunks = int(totalTokens / maxTokenLen)
for i in range(0, totalChunks):
tempTokens = textTokens[start:end]
chunkText = ' '.join([str(elem) for elem in tempTokens])
chunkList.append(chunkText)
start = end
end += maxTokenLen
chunkText = ""
else:
totalChunks = int(totalTokens / maxTokenLen) + 1
for i in range(0, (totalChunks - 1)):
tempTokens = textTokens[start:end]
chunkText = ' '.join([str(elem) for elem in tempTokens])
chunkList.append(chunkText)
start = end
end += maxTokenLen
chunkText = ""
tempTokens = textTokens[start:totalTokens]
chunkText = ' '.join([str(elem) for elem in tempTokens])
chunkList.append(chunkText)
for chunk in chunkList:
tempSummary = getSummary(chunk, tokenizer)
summary += tempSummary
summary = sentenceCorrection(summary)
print("Summarisation complete!")
fileName = fname[:-4] + "_summary.txt"
with open(os.path.join(app.config['PDF_UPLOADS'] + '/' + fileName), 'w', encoding="utf-8") as f1:
f1.write(summary)
print("Summary written to file!")
f1.close()
f.close()
os.remove(os.path.join(app.config['PDF_UPLOADS'] + '/' + fname))
makezipAndCleanUp(mailid)
except Exception as e:
print(e)
send_fail(mailid)
def makezipAndCleanUp(mailid=None):
# function to compress all summary text files into single zip file
# call mail function and send zip file
shutil.make_archive('summarized_chapters', 'zip', app.config['PDF_UPLOADS'])
for file in os.listdir(app.config['PDF_UPLOADS']):
os.remove(os.path.join(app.config['PDF_UPLOADS'] + '/' + file))
if mailid is not None:
send_mail('summarized_chapters.zip', mailid)
else:
print('\nChapter-wise Summaries stored in summarized_chapters.zip')
| python | MIT | 18d18b8c7845d6a1baee9a011b985853278d0f53 | 2026-01-05T07:13:40.590988Z | false |
johngai19/TextDistiller | https://github.com/johngai19/TextDistiller/blob/18d18b8c7845d6a1baee9a011b985853278d0f53/imports.py | imports.py | import fnmatch
import io
import json
import nltk
import os
import re
import shutil
import smtplib
import sys
import torch
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from flask import flash, Flask, render_template, request, redirect, send_file
from flask_socketio import SocketIO
from gingerit.gingerit import GingerIt
from nltk.tokenize import sent_tokenize, word_tokenize
from pdfminer.converter import HTMLConverter, TextConverter, XMLConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
from threading import Thread
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
from werkzeug.utils import secure_filename
app = Flask(__name__)
app.config["PDF_UPLOADS"] = "static/pdf/uploads"
app.config["ALLOWED_EXTENSIONS"] = ["PDF"]
app.config["MAX_CONTENT_LENGTH"] = 20 * 1024 * 1024
| python | MIT | 18d18b8c7845d6a1baee9a011b985853278d0f53 | 2026-01-05T07:13:40.590988Z | false |
johngai19/TextDistiller | https://github.com/johngai19/TextDistiller/blob/18d18b8c7845d6a1baee9a011b985853278d0f53/preprocess.py | preprocess.py | from imports import *
from model import *
def splitChapters(filename, mailid=None):
checkFlag = 0
skipCount = 0
flag = 0
chapterNumber = 0
filename = filename[:-4]
file = filename + '.txt'
with open(file, 'r', encoding='utf-8') as f1:
lines = f1.readlines()
print('Total Number of Lines:', len(lines))
for line in lines:
words = ['CONTENTS', 'Contents']
ignoreWords = ['ACKNOWLEDGEMENT', 'INDEX', 'Subject Index']
tokens = line.split()
check = any(item in words for item in tokens)
if check is True:
print('Contents page found!\n')
checkFlag = 1
skipCount = 40
continue
elif checkFlag == 1 and skipCount > 0:
skipCount -= 1
continue
pattern = re.compile(r'CHAPTER')
foundChapter = re.search(pattern, line)
if foundChapter:
flag = 1
chapterNumber += 1
counter = 0
continue
elif flag == 1:
if counter == 0:
counter += 1
print('Chapter', chapterNumber, 'found! Writing to a txt file')
file = filename + 'Chapter' + str(chapterNumber) + '.txt'
with open(file, 'w', encoding='utf-8') as f2:
f2.write(line)
f2.close()
else:
print('Writing chapter', chapterNumber, '!\n')
file = filename + 'Chapter' + str(chapterNumber) + '.txt'
with open(file, 'a', encoding='utf-8') as f2:
f2.write(line)
f2.close()
continue
ignoreCheck = any(item in ignoreWords for item in tokens)
if ignoreCheck is True:
print('All Chapters written!\n')
break
if flag == 0:
print('No chapters in book! Writing entire book!')
with open(filename + 'ChapterAll.txt', 'w', encoding="utf-8") as f2:
f2.writelines(lines)
f2.close()
print("Done writing!")
f1.close()
try:
os.remove(os.path.join(app.config['PDF_UPLOADS'] + '/pdf_file.pdf'))
os.remove(os.path.join(app.config['PDF_UPLOADS'] + '/pdf_file.txt'))
except Exception as e:
print(e)
pass
finally:
summaryGeneration(mailid)
def pdfParser(filename, mailid=None):
fp = open(filename, 'rb')
rsrcmgr = PDFResourceManager()
retstr = io.StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fp, check_extractable=False):
interpreter.process_page(page)
data = retstr.getvalue()
print('Converting PDF to txt file.')
file = filename[:-4] + '.txt'
with open(file, 'w', encoding='utf-8') as f:
f.write(data)
f.close()
print('Successfully converted PDF to txt.')
splitChapters(filename, mailid)
| python | MIT | 18d18b8c7845d6a1baee9a011b985853278d0f53 | 2026-01-05T07:13:40.590988Z | false |
tunz/binch | https://github.com/tunz/binch/blob/b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34/setup.py | setup.py | from setuptools import setup, find_packages
py_modules = [
'pyelftools',
'capstone',
'keystone-engine',
'urwid',
'blinker',
'progressbar2'
]
setup (
name = 'binch',
version = '0.3.1',
description = 'a light ELF binary patch tool',
author = 'Choongwoo Han',
author_email = 'cwhan.tunz@gmail.com',
url = 'https://github.com/tunz/binch',
license = 'MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Security',
'Topic :: Software Development',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Environment :: Console :: Curses',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux'
],
keywords = 'disassemble binary patch',
packages = find_packages(),
install_requires = py_modules,
entry_points = {
'console_scripts': ['binch = binchlib.main:binch']
}
)
| python | MIT | b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34 | 2026-01-05T07:13:47.889227Z | false |
tunz/binch | https://github.com/tunz/binch/blob/b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34/binchlib/view.py | binchlib/view.py | import urwid
from .disassemble import *
from .statusbar import *
from . import signals
from capstone.x86 import X86_OP_IMM
from capstone.arm import ARM_OP_IMM
import traceback
import progressbar
import sys
import re
class DisassembleText(urwid.Text):
def selectable(self):
return False
def keypress(self, size, key):
return key
class DisassembleInstruction(urwid.WidgetWrap):
def __init__(self, instr, disasmblr, view):
urwid.WidgetWrap.__init__(self, None)
self.instruction = instr
self.hexcode = list(self.instruction.bytes)
self.isthumb = disasmblr.is_thumb_instr(instr)
self._editbox = None
self._hexeditbox = None
self.edit_mode = False
self.hex_edit_mode = False
self.disasmblr = disasmblr
self.view = view
self.repeat = 1
self.mode_plain()
def selectable(self):
return True
def mode_plain(self):
repeat_str = ""
if self.repeat > 1:
repeat_str = " .. (repeat %d times)" % self.repeat
self._w = urwid.Columns([('fixed', 102, urwid.Text("%s%s%s%s" % (
hex(self.instruction.address).rstrip('L').ljust(11, ' ')+' ',
' '.join(["%02x" % j for j in self.hexcode*self.repeat]).ljust(27, ' ')+' ',
self.instruction.mnemonic.ljust(7, ' ')+' ',
self.instruction.op_str + repeat_str))
)])
self._w = urwid.AttrMap(self._w, 'bg', 'reveal focus')
def mode_edit1(self):
self.address = urwid.Text(hex(self.instruction.address).rstrip('L'))
self.opcode = urwid.Text(' '.join(["%02x" % j for j in self.hexcode*self.repeat]))
self._w = urwid.Columns([
('fixed', 12, self.address),
('fixed', 28, self.opcode),
('fixed', 62, self._editbox)
])
self._w = urwid.AttrMap(self._w, 'bg', 'reveal focus')
def mode_edit2(self):
self.address = urwid.Text(hex(self.instruction.address).rstrip('L'))
self.instr = urwid.Text("%s%s" % (self.instruction.mnemonic.ljust(8, ' '), self.instruction.op_str))
self._w = urwid.Columns([
('fixed', 12, self.address),
('fixed', 28, self._hexeditbox),
('fixed', 62, self.instr)
])
self._w = urwid.AttrMap(self._w, 'bg', 'reveal focus')
def modify_opcode(self, opcode, original_opcode=None):
if opcode == "":
self.mode_plain()
return
if original_opcode == None:
original_opcode = ''.join(map(chr, self.hexcode*self.repeat))
original_opcode_len = len(original_opcode)
if len(opcode) < original_opcode_len:
if self.disasmblr.arch == 'ARM':
opcode = opcode.ljust(original_opcode_len, "\x00") # Fill with nop
else:
opcode = opcode.ljust(original_opcode_len, "\x90") # Fill with nop
elif len(opcode) > original_opcode_len:
safe_opcode_len = 0
opcode_data = self.disasmblr.read_memory(self.instruction.address, 0x20)
md = self.disasmblr.md if not self.isthumb else self.disasmblr.t_md
disasm_code = md.disasm(opcode_data, 0x20)
for i in disasm_code:
if len(opcode) > safe_opcode_len:
safe_opcode_len += len(i.bytes)
if self.disasmblr.arch == 'ARM':
opcode = opcode.ljust(safe_opcode_len, "\x00") # Fill with nop
else:
opcode = opcode.ljust(safe_opcode_len, "\x90") # Fill with nop
self.disasmblr.write_memory(self.instruction.address, opcode)
repeat = 0
if self.isthumb:
codes = [i for i in self.disasmblr.t_md.disasm(opcode, self.instruction.address)]
else:
codes = [i for i in self.disasmblr.md.disasm(opcode, self.instruction.address)]
if self.disasmblr.arch in ['x86','x64']:
NOPCODE = [0x90]
elif self.disasmblr.arch == 'ARM':
NOPCODE = [0x00, 0x00]
nopcode_repeat = True
for c in codes:
repeat += 1
if list(c.bytes) != NOPCODE:
nopcode_repeat = False
break
if nopcode_repeat:
codes = codes[:1]
self.repeat = repeat
else:
self.repeat = 1
if original_opcode_len == len(opcode) and len(codes) == 1:
code = codes[0]
if (len(code.operands) == 1 and
((self.disasmblr.arch in ['x86','x64'] and code.operands[0].type == X86_OP_IMM) or
(self.disasmblr.arch == 'ARM' and code.operands[0].type == ARM_OP_IMM))):
self.view.update_list(self.view.disasmlist._w.focus_position)
self.instruction = code
self.hexcode = list(self.instruction.bytes)
self.mode_plain()
else:
def update_all(yn, arg):
if yn == "y":
self.view.update_list(self.view.disasmlist._w.focus_position)
else:
self.modify_opcode(original_opcode)
signals.set_prompt_yn.send(self,
text="This operation will break following codes, is it okey?",
callback=update_all,
arg=None
)
def repeat_inc(self):
self.repeat += 1
self.mode_plain()
def keypress(self, size, key):
if self.edit_mode:
if key == "esc":
self.edit_mode = False
self.mode_plain()
elif key == "enter":
self.edit_mode = False
asmcode = self._editbox.get_edit_text()
is_thumb_code = True if self.disasmblr.arch == 'ARM' and self.isthumb else False
opcode = self.disasmblr.asm(asmcode, thumb=is_thumb_code)
self.modify_opcode(opcode)
elif isinstance(key, basestring):
self._w.keypress(size, key)
else:
return key
elif self.hex_edit_mode:
if key == "esc":
self.hex_edit_mode = False
self.mode_plain()
elif key == "enter":
self.hex_edit_mode = False
hexcode = self._hexeditbox.get_edit_text()
original_hexcode = ''.join(map(chr, self.hexcode*self.repeat))
try:
opcode = hexcode.replace(' ','').decode('hex')
self.modify_opcode(opcode, original_hexcode)
except Exception as e:
msg = "Error: "+str(e)
self.modify_opcode(original_hexcode, original_hexcode)
signals.set_message.send(0, message=msg, expire=2)
self.mode_plain()
elif isinstance(key, basestring):
self._w.keypress(size, key)
else:
return key
else:
if key == "enter":
self._editbox = urwid.Edit("", "%s%s" % (self.instruction.mnemonic.ljust(8, ' '),
self.instruction.op_str))
self.mode_edit1()
self.edit_mode = True
elif key == "h":
self._hexeditbox = urwid.Edit("", ' '.join(["%02x" % j for j in self.instruction.bytes]))
self.mode_edit2()
self.hex_edit_mode = True
elif key == "f":
followAddress = False
mnemonic = self.instruction.mnemonic
if self.disasmblr.arch in ['x86', 'x64'] and (mnemonic[0] == 'j' or mnemonic == 'call'):
if self.instruction.operands[0].type == X86_OP_IMM:
followAddress = True
elif self.disasmblr.arch == 'ARM' and mnemonic[0] == 'b':
if self.instruction.operands[0].type == ARM_OP_IMM:
followAddress = True
if followAddress:
address = int(self.instruction.op_str.lstrip('#'), 16)
try:
self.view.disasmlist.set_focus(self.view.index_map[address])
self.view.history.append(self.instruction.address)
msg = "Jump to "+hex(address)
signals.set_message.send(0, message=msg, expire=1)
except:
msg = "Error: Fail to jump... please report it"
signals.set_message.send(0, message=msg, expire=2)
elif key == "d" or key == "D":
def fill_with_nop(yn, arg):
if yn == 'y':
if self.disasmblr.arch == 'ARM':
self.modify_opcode("\x00")
else:
self.modify_opcode("\x90")
signals.set_prompt_yn.send(self, text="Remove this line?", callback=fill_with_nop, arg=None)
else:
if key == "j" or key == "J":
key = "down"
elif key == "k" or key == "K":
key = "up"
return key
class SymbolText(urwid.Text):
def selectable(self):
return False
def keypress(self, size, key):
return key
class DisassembleList(urwid.WidgetWrap):
def __init__(self, dList):
urwid.WidgetWrap.__init__(self, None)
self.update_list(dList)
def set_focus(self, idx):
self._w.set_focus(idx)
def update_list(self, dList, focus=0):
self._w = urwid.ListBox(urwid.SimpleListWalker(dList))
if focus:
self._w.set_focus(focus)
def selectable(self):
return True
def keypress(self, size, key):
key = super(self.__class__, self).keypress(size, key)
if key == "j":
key = "down"
elif key == "k":
key = "up"
return key
class DisassembleWindow(urwid.Frame):
def __init__(self, view, body, header, footer):
urwid.Frame.__init__(
self, body,
header if header else None,
footer if footer else None
)
self.view = view
signals.focus.connect(self.sig_focus)
def sig_focus(self, sender, section):
self.focus_position = section
def keypress(self, size, key):
key = super(self.__class__, self).keypress(size, key)
return key
class DisassembleView:
palette = [('header', 'white', 'black'),
('reveal focus', 'black', 'light gray', 'standout'),
('status', 'white', 'dark blue', 'standout')]
def __init__(self, filename):
self.header = urwid.Text(" BINCH: %s" % (filename))
self.disasmblr = Disassembler(filename)
items = self.setup_list(True)
self.disasmlist = DisassembleList(items)
start_index = self.find_index(self.disasmblr.entry)
if start_index != -1:
self.disasmlist.set_focus(start_index)
self.history = list()
self.body = urwid.Padding(self.disasmlist, 'center', 105)
self.body = urwid.Filler(self.body, ('fixed top',1), ('fixed bottom',1))
self.footer = StatusBar("HotKeys -> g: Go to a address | s: Save | d: Remove | enter: Modify | q: Quit", self)
self.view = DisassembleWindow(self,
urwid.AttrWrap(self.body, 'body'),
urwid.AttrWrap(self.header, 'head'),
self.footer)
signals.call_delay.connect(self.sig_call_delay)
def find_index(self, address):
try:
if self.disasmblr.is_thumb_addr(address):
return self.index_map[address & -2]
else:
return self.index_map[address]
except KeyError:
return -1
def setup_list(self, show_progressbar = False):
if self.disasmblr.arch in ['x86','x64']:
NOPCODE = [0x90]
elif self.disasmblr.arch == 'ARM':
NOPCODE = [0x00, 0x00]
body = []
for code in self.disasmblr.code_addrs:
body.extend(self.disasmblr.disasm(code['address'], code['size']))
items = []
idx = 0
self.index_map = dict()
if show_progressbar:
instr_list = progressbar.ProgressBar(widgets=[progressbar.Percentage(), ' ',
progressbar.Bar(), ' ', progressbar.ETA()])(body)
else:
instr_list = body
for i in instr_list:
address = i.address
symbol = None
try: symbol = self.disasmblr.symtab[address]
except:
if self.disasmblr.is_thumb_instr(i):
try: symbol = self.disasmblr.symtab[address - 1]
except: pass
if symbol:
items.append(SymbolText(" "))
items.append(SymbolText(" < %s >" % symbol))
idx+=2
hexcode = list(i.bytes)
if hexcode == NOPCODE and (isinstance(items[-1], DisassembleInstruction) and items[-1].hexcode == NOPCODE):
items[-1].repeat_inc()
else:
items.append(DisassembleInstruction(i, self.disasmblr, self))
self.index_map[address] = idx
idx+=1
sys.stdout.write("\033[F")
return items
def update_list(self, focus=0):
items = self.setup_list()
self.disasmlist.update_list(items, focus)
def update_status(self, *arg):
signals.redraw_status.send(self)
self.loop.set_alarm_in(0.03, self.update_status)
def main(self):
self.loop = urwid.MainLoop(self.view, self.palette,
handle_mouse=False,
unhandled_input=self.unhandled_input)
self.loop.set_alarm_in(0.03, self.update_status)
try:
self.loop.run()
except:
self.loop.stop()
print(traceback.format_exc())
def unhandled_input(self, k):
def goto(text):
try:
if bool(re.match(r'^([0-9]|0x[0-9a-fA-F]+|\+|\-| )+$',text)):
address = eval(text)
else:
return "It is invalid number: "+text
except:
return "Fail to calculate address: "+text
if address in self.index_map:
self.history.append(self.disasmlist._w.body[self.disasmlist._w.focus_position].instruction.address)
self.disasmlist.set_focus(self.index_map[address])
return "Jump to "+hex(address)
else:
for i in range(1, 0x10):
if address - i in self.index_map:
self.history.append(self.disasmlist._w.body[self.disasmlist._w.focus_position].instruction.address)
self.disasmlist.set_focus(self.index_map[address - i])
return "Jump to "+hex(address - i)
elif address + i in self.index_map:
self.history.append(self.disasmlist._w.body[self.disasmlist._w.focus_position].instruction.address)
self.disasmlist.set_focus(self.index_map[address + i])
return "Jump to "+hex(address + i)
return "Invalid address: "+hex(address)
if k in ('q', 'Q'):
def ask_quit(yn, arg):
if yn == 'y':
raise urwid.ExitMainLoop()
signals.set_prompt_yn.send(self, text="Quit?", callback=ask_quit, arg=None)
elif k in ('g', 'G'):
signals.set_prompt.send(self, text="Goto: ", callback=goto)
elif k in ('s', 'S'):
self.disasmblr.save()
elif k == "esc":
if len(self.history) > 0:
address = self.history[-1]
del self.history[-1]
self.disasmlist.set_focus(self.index_map[address])
def sig_call_delay(self, sender, seconds, callback):
def cb(*_):
return callback()
self.loop.set_alarm_in(seconds, cb)
| python | MIT | b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34 | 2026-01-05T07:13:47.889227Z | false |
tunz/binch | https://github.com/tunz/binch/blob/b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34/binchlib/signals.py | binchlib/signals.py | import blinker
focus = blinker.Signal()
set_prompt = blinker.Signal()
set_prompt_yn = blinker.Signal()
set_message = blinker.Signal()
redraw_status = blinker.Signal()
call_delay = blinker.Signal()
| python | MIT | b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34 | 2026-01-05T07:13:47.889227Z | false |
tunz/binch | https://github.com/tunz/binch/blob/b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34/binchlib/main.py | binchlib/main.py | #!/usr/bin/env python
from .view import DisassembleView
import argparse
import sys
import os
def binch(args=None):
parser = argparse.ArgumentParser(description='A light ELF binary patch tool.')
parser.add_argument('filename', metavar='filename', type=str,
help='a binary filename to patch')
args = parser.parse_args()
filepath = os.path.abspath(args.filename)
if os.path.isfile(filepath):
DisassembleView(filepath).main()
else:
print("[-] There is no file: %s" % (filepath))
| python | MIT | b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34 | 2026-01-05T07:13:47.889227Z | false |
tunz/binch | https://github.com/tunz/binch/blob/b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34/binchlib/__init__.py | binchlib/__init__.py | python | MIT | b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34 | 2026-01-05T07:13:47.889227Z | false | |
tunz/binch | https://github.com/tunz/binch/blob/b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34/binchlib/statusbar.py | binchlib/statusbar.py | from . import signals
import view
import urwid
class CommandLine(urwid.WidgetWrap):
def __init__(self):
urwid.WidgetWrap.__init__(self, None)
self.clear()
signals.set_prompt.connect(self.sig_prompt)
signals.set_prompt_yn.connect(self.sig_prompt_yn)
signals.set_message.connect(self.sig_message)
self.prompt_callback = False
self.prompt_yn_callback = False
def clear(self):
self._w = urwid.Text("")
def sig_message(self, sender, message, expire=None):
w = urwid.Text(message)
self._w = w
if expire:
def cb(*args):
if w == self._w:
self.clear()
signals.call_delay.send(seconds=expire, callback=cb)
def sig_prompt(self, sender, text, callback):
self.prompt_yn_callback = False
signals.focus.send(self, section='footer')
self._w = urwid.Edit(text, "")
self.prompt_callback = callback
def sig_prompt_yn(self, sender, text, callback, arg):
self.prompt_callback = False
signals.focus.send(self, section='footer')
self.ask_yn(text, callback, arg)
def ask_yn(self, text, callback, arg):
self._w = urwid.Edit(text + " (y/n):", '')
self.prompt_yn_callback = (callback, arg)
def prompt(self, text):
msg = self.prompt_callback(text)
self.prompt_callback = False
if isinstance(msg, tuple):
msg, callback, arg = msg
self.ask_yn(msg, callback, arg)
else:
signals.focus.send(self, section='body')
if isinstance(msg, str):
signals.set_message.send(self, message=msg, expire=1)
def prompt_yn(self, yn):
func, arg = self.prompt_yn_callback
msg = func(yn, arg)
signals.focus.send(self, section='body')
self.prompt_yn_callback = False
if msg:
signals.set_message.send(self, message=msg, expire=1)
else:
self.clear()
def prompt_clear(self):
self.prompt_callback = False
self.prompt_yn_callback = False
signals.focus.send(self, section='body')
self.clear()
def selectable(self):
return True
def keypress(self, size, k):
if self.prompt_callback:
if k == "esc":
self.prompt_clear()
elif k == "enter":
self.prompt(self._w.get_edit_text())
elif isinstance(k, basestring):
self._w.keypress(size, k)
else:
return k
elif self.prompt_yn_callback:
if k == "esc":
self.prompt_clear()
elif k == "y" or k == "Y":
self.prompt_yn('y')
elif k == "n" or k == "N":
self.prompt_yn('n')
class StatusBar(urwid.WidgetWrap):
def __init__(self, text, view):
urwid.WidgetWrap.__init__(self, None)
self.view = view
self.commandline = CommandLine()
self.default_text = text
self.update_status()
signals.redraw_status.connect(self.sig_redraw_status)
def sig_redraw_status(self, sender):
self.update_status()
def update_status(self):
if self.view.disasmblr.arch == 'ARM' and isinstance(self.view.disasmlist._w.focus, view.DisassembleInstruction):
if self.view.disasmlist._w.focus.isthumb:
mode = "[Thumb]"
else:
mode = "[ ARM ]"
self.status = urwid.Columns([
urwid.WidgetWrap(urwid.Text(self.default_text)),
('fixed', 20, urwid.WidgetWrap(urwid.Text(mode)))
])
else:
self.status = urwid.WidgetWrap(urwid.Text(self.default_text))
self.status = urwid.AttrMap(self.status, 'status')
self._w = urwid.Pile([self.status, self.commandline])
def keypress(self, *args, **kwargs):
return self.commandline.keypress(*args, **kwargs)
def selectable(self):
return True
| python | MIT | b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34 | 2026-01-05T07:13:47.889227Z | false |
tunz/binch | https://github.com/tunz/binch/blob/b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34/binchlib/disassemble.py | binchlib/disassemble.py | from capstone import *
from keystone import *
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
import sys, os
from . import signals
class Disassembler():
memory = []
def __init__(self, filename):
self.filename = filename
self.loadELF(filename)
self.init_disasmblr()
self.init_asmblr()
def read_memory(self, address, size):
for vaddr, foffset, memsize, mem in self.memory:
if address >= vaddr and address <= vaddr + memsize:
if size:
return mem[address - vaddr : address - vaddr + size]
else:
return mem[address - vaddr:]
return ""
def write_memory(self, address, data):
offset = self.addr2offset(address)
for idx, (vaddr, foffset, memsize, mem) in enumerate(self.memory):
if offset >= foffset and offset <= foffset + memsize:
mem=list(mem)
for i in range(0, len(data)):
if offset - foffset + i < len(mem):
mem[offset - foffset + i] = data[i]
else:
mem.append(data[i])
memsize+=1
self.memory[idx] = (vaddr, foffset, memsize, ''.join(mem))
def addr2offset(self, address):
for vaddr, foffset, memsize, mem in self.memory:
if address >= vaddr and address <= vaddr + memsize:
return address - vaddr + foffset
return -1
def load_code_segments(self, segments, filename):
memory = []
for elf_segment in segments:
if elf_segment.header.p_type != 'PT_LOAD':
continue
align = 0x1000
ELF_PAGEOFFSET = elf_segment.header.p_vaddr & (align-1)
memsz = elf_segment.header.p_memsz + ELF_PAGEOFFSET
offset = elf_segment.header.p_offset - ELF_PAGEOFFSET
filesz = elf_segment.header.p_filesz + ELF_PAGEOFFSET
vaddr = elf_segment.header.p_vaddr - ELF_PAGEOFFSET
memsz = (memsz + align ) & ~(align-1)
with open(filename, 'rb') as f:
f.seek(offset, 0)
data = f.read(filesz)
memory.append((vaddr, offset, memsz, data))
return memory
def load_symbol_table(self, symbols):
syms = dict()
thumbs = list()
for symbol in symbols:
if symbol['st_info']['type'] == 'STT_FUNC':
if self.is_thumb_addr(symbol['st_value']):
syms[symbol['st_value'] - 1] = symbol.name
else:
syms[symbol['st_value']] = symbol.name
elif self.arch == 'ARM' and symbol['st_info']['type'] == 'STT_NOTYPE':
if symbol.name == '$t': # Thumb
thumbs.append((symbol['st_value'], True))
elif symbol.name == '$a': #ARM
thumbs.append((symbol['st_value'], False))
return syms, thumbs
def load_section_info(self, sections):
symtab = dict()
thumbtab = list()
code_addrs = []
for section in sections:
if isinstance(section, SymbolTableSection):
syms, thumbs = self.load_symbol_table(section.iter_symbols())
symtab.update(syms)
thumbtab.extend(thumbs)
elif section['sh_flags'] == 6: # Assumption: Code section's flag is AX (ALLOC=2 & EXEC=4)
code_addrs.append({'address': section['sh_addr'], 'size': section['sh_size']})
return symtab, thumbtab, code_addrs
def loadELF(self, filename):
try:
elf = ELFFile(open(filename, 'rb'))
except:
raise Exception("[-] This file is not an ELF file: %s" % filename)
self.arch = elf.get_machine_arch()
self.entry = elf.header.e_entry
self.memory = self.load_code_segments(elf.iter_segments(), filename)
self.symtab, self.thumbtab, self.code_addrs = self.load_section_info(elf.iter_sections())
self.thumbtab.sort(key=lambda tup: tup[0])
self.code_addrs = sorted(self.code_addrs, key=lambda k: k['address'])
def init_asmblr(self):
arch = {'x86':KS_ARCH_X86,'x64':KS_ARCH_X86, 'ARM':KS_ARCH_ARM}[self.arch]
mode = {'x86':KS_MODE_32, 'x64':KS_MODE_64, 'ARM':KS_MODE_ARM}[self.arch]
self.ks = Ks(arch, mode)
if self.arch == 'ARM':
self.t_ks = Ks(arch, CS_MODE_THUMB)
def init_disasmblr(self):
arch = {'x86':CS_ARCH_X86,'x64':CS_ARCH_X86, 'ARM':CS_ARCH_ARM}[self.arch]
mode = {'x86':CS_MODE_32, 'x64':CS_MODE_64, 'ARM':CS_MODE_ARM}[self.arch]
self.md = Cs(arch, mode)
self.md.detail = True
if self.arch == 'ARM':
self.t_md = Cs(arch, CS_MODE_THUMB)
self.t_md.detail = True
def disasm(self, address, size=None):
if self.arch == 'ARM' and self.thumbtab:
disasms = []
thumb = bool(address & 1)
address = address & 0xfffffffe
for addr, isthumb in self.thumbtab:
if address < addr:
md = self.md if not thumb else self.t_md
disasms.extend([i for i in md.disasm(self.read_memory(address, addr-address), address)])
address = addr
thumb = isthumb
return disasms
else:
return [i for i in self.md.disasm(self.read_memory(address, size), address)]
def asm(self, asmcode, thumb=False):
ks = self.ks if not thumb else self.t_ks
try:
encoding, count = ks.asm(asmcode)
except KsError as err:
msg = "Error: %s" % err
signals.set_message.send(0, message=msg, expire=2)
return ""
return ''.join(map(chr, encoding))
def is_thumb_instr(self, instr):
return instr._cs.mode == CS_MODE_THUMB
def save(self):
def save_binary(filename):
def save_binary_yes(yn, filename):
if yn == 'y':
try:
original_binary = open(self.filename, 'rb').read()
f = open(filename, 'wb')
f.write(original_binary)
for vaddr, foffset, memsize, mem in self.memory:
f.seek(foffset, 0)
f.write(mem)
f.close()
os.chmod(filename, 0o755)
return "Successfully save to '%s'" % filename
except Exception as e:
return "Fail to save binary: "+str(e)
return "Fail to save binary"
if filename == "":
filename = self.filename
if os.path.exists(filename):
return (filename+" already exists, Overwrite?", save_binary_yes, filename)
else:
return save_binary_yes('y', filename)
signals.set_prompt.send(self, text="Save to (filename): ", callback=save_binary)
def is_thumb_addr(self, address):
return self.arch == 'ARM' and (address & 1) == 1
| python | MIT | b3a0558b4f0468e1fd7fa58c3559f7dff14b1e34 | 2026-01-05T07:13:47.889227Z | false |
taomanwai/tensorboardcolab | https://github.com/taomanwai/tensorboardcolab/blob/337dce9890dadcb977b60322c30c76dee7fb42b1/setup.py | setup.py | from distutils.core import setup
setup(
name='tensorboardcolab',
version='0.0.21',
packages=['tensorboardcolab'],
url='https://github.com/taomanwai/tensorboardcolab',
license='MIT',
author='Tommy Tao',
author_email='o_otaotao@hotmail.com',
description=''
)
| python | MIT | 337dce9890dadcb977b60322c30c76dee7fb42b1 | 2026-01-05T07:13:48.796857Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.