id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
16,701 | from typing import Literal, Union, Optional, Tuple, List
import torch
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextModelWithProjection
from diffusers import (
UNet2DConditionModel,
SchedulerMixin,
StableDiffusionPipeline,
StableDiffusionXLPipeline,
AutoencoderKL,
)
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
convert_ldm_unet_checkpoint,
)
from safetensors.torch import load_file
from diffusers.schedulers import (
DDIMScheduler,
DDPMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
UniPCMultistepScheduler,
)
from omegaconf import OmegaConf
def load_diffusers_model(
pretrained_model_name_or_path: str,
v2: bool = False,
clip_skip: Optional[int] = None,
weight_dtype: torch.dtype = torch.float32,
) -> Tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel,]:
if v2:
tokenizer = CLIPTokenizer.from_pretrained(
TOKENIZER_V2_MODEL_NAME,
subfolder="tokenizer",
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
)
text_encoder = CLIPTextModel.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
# default is clip skip 2
num_hidden_layers=24 - (clip_skip - 1) if clip_skip is not None else 23,
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
)
else:
tokenizer = CLIPTokenizer.from_pretrained(
TOKENIZER_V1_MODEL_NAME,
subfolder="tokenizer",
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
)
text_encoder = CLIPTextModel.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
num_hidden_layers=12 - (clip_skip - 1) if clip_skip is not None else 12,
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
)
unet = UNet2DConditionModel.from_pretrained(
pretrained_model_name_or_path,
subfolder="unet",
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
)
vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae")
return tokenizer, text_encoder, unet, vae
def load_checkpoint_model(
checkpoint_path: str,
v2: bool = False,
clip_skip: Optional[int] = None,
weight_dtype: torch.dtype = torch.float32,
) -> Tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel,]:
pipe = StableDiffusionPipeline.from_single_file(
checkpoint_path,
upcast_attention=True if v2 else False,
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
)
_, state_dict = load_checkpoint_with_text_encoder_conversion(checkpoint_path)
unet_config = create_unet_diffusers_config(v2, use_linear_projection_in_v2=v2)
unet_config["class_embed_type"] = None
unet_config["addition_embed_type"] = None
converted_unet_checkpoint = convert_ldm_unet_checkpoint(state_dict, unet_config)
unet = UNet2DConditionModel(**unet_config)
unet.load_state_dict(converted_unet_checkpoint)
tokenizer = pipe.tokenizer
text_encoder = pipe.text_encoder
vae = pipe.vae
if clip_skip is not None:
if v2:
text_encoder.config.num_hidden_layers = 24 - (clip_skip - 1)
else:
text_encoder.config.num_hidden_layers = 12 - (clip_skip - 1)
del pipe
return tokenizer, text_encoder, unet, vae
def create_noise_scheduler(
scheduler_name: AVAILABLE_SCHEDULERS = "ddpm",
noise_scheduler_kwargs=None,
prediction_type: Literal["epsilon", "v_prediction"] = "epsilon",
) -> SchedulerMixin:
name = scheduler_name.lower().replace(" ", "_")
if name.lower() == "ddim":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/ddim
scheduler = DDIMScheduler(**OmegaConf.to_container(noise_scheduler_kwargs))
elif name.lower() == "ddpm":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/ddpm
scheduler = DDPMScheduler(**OmegaConf.to_container(noise_scheduler_kwargs))
elif name.lower() == "lms":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/lms_discrete
scheduler = LMSDiscreteScheduler(
**OmegaConf.to_container(noise_scheduler_kwargs)
)
elif name.lower() == "euler_a":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/euler_ancestral
scheduler = EulerAncestralDiscreteScheduler(
**OmegaConf.to_container(noise_scheduler_kwargs)
)
elif name.lower() == "euler":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/euler_ancestral
scheduler = EulerDiscreteScheduler(
**OmegaConf.to_container(noise_scheduler_kwargs)
)
elif name.lower() == "unipc":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/unipc
scheduler = UniPCMultistepScheduler(
**OmegaConf.to_container(noise_scheduler_kwargs)
)
else:
raise ValueError(f"Unknown scheduler name: {name}")
return scheduler
from enum import Enum
try:
if torch.backends.mps.is_available():
cpu_state = CPUState.MPS
import torch.mps
except:
pass
def load_models(
pretrained_model_name_or_path: str,
scheduler_name: str,
v2: bool = False,
v_pred: bool = False,
weight_dtype: torch.dtype = torch.float32,
) -> Tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, SchedulerMixin,]:
if pretrained_model_name_or_path.endswith(
".ckpt"
) or pretrained_model_name_or_path.endswith(".safetensors"):
tokenizer, text_encoder, unet, vae = load_checkpoint_model(
pretrained_model_name_or_path, v2=v2, weight_dtype=weight_dtype
)
else: # diffusers
tokenizer, text_encoder, unet, vae = load_diffusers_model(
pretrained_model_name_or_path, v2=v2, weight_dtype=weight_dtype
)
if scheduler_name:
scheduler = create_noise_scheduler(
scheduler_name,
prediction_type="v_prediction" if v_pred else "epsilon",
)
else:
scheduler = None
return tokenizer, text_encoder, unet, scheduler, vae | null |
16,702 | from typing import Literal, Union, Optional, Tuple, List
import torch
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextModelWithProjection
from diffusers import (
UNet2DConditionModel,
SchedulerMixin,
StableDiffusionPipeline,
StableDiffusionXLPipeline,
AutoencoderKL,
)
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
convert_ldm_unet_checkpoint,
)
from safetensors.torch import load_file
from diffusers.schedulers import (
DDIMScheduler,
DDPMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
UniPCMultistepScheduler,
)
from omegaconf import OmegaConf
SDXL_TEXT_ENCODER_TYPE = Union[CLIPTextModel, CLIPTextModelWithProjection]
def load_diffusers_model_xl(
pretrained_model_name_or_path: str,
weight_dtype: torch.dtype = torch.float32,
) -> Tuple[List[CLIPTokenizer], List[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel,]:
# returns tokenizer, tokenizer_2, text_encoder, text_encoder_2, unet
tokenizers = [
CLIPTokenizer.from_pretrained(
pretrained_model_name_or_path,
subfolder="tokenizer",
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
),
CLIPTokenizer.from_pretrained(
pretrained_model_name_or_path,
subfolder="tokenizer_2",
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
pad_token_id=0, # same as open clip
),
]
text_encoders = [
CLIPTextModel.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder",
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
),
CLIPTextModelWithProjection.from_pretrained(
pretrained_model_name_or_path,
subfolder="text_encoder_2",
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
),
]
unet = UNet2DConditionModel.from_pretrained(
pretrained_model_name_or_path,
subfolder="unet",
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
)
vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae")
return tokenizers, text_encoders, unet, vae
def load_checkpoint_model_xl(
checkpoint_path: str,
weight_dtype: torch.dtype = torch.float32,
) -> Tuple[List[CLIPTokenizer], List[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel,]:
pipe = StableDiffusionXLPipeline.from_single_file(
checkpoint_path,
torch_dtype=weight_dtype,
cache_dir=DIFFUSERS_CACHE_DIR,
)
unet = pipe.unet
vae = pipe.vae
tokenizers = [pipe.tokenizer, pipe.tokenizer_2]
text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
if len(text_encoders) == 2:
text_encoders[1].pad_token_id = 0
del pipe
return tokenizers, text_encoders, unet, vae
def create_noise_scheduler(
scheduler_name: AVAILABLE_SCHEDULERS = "ddpm",
noise_scheduler_kwargs=None,
prediction_type: Literal["epsilon", "v_prediction"] = "epsilon",
) -> SchedulerMixin:
name = scheduler_name.lower().replace(" ", "_")
if name.lower() == "ddim":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/ddim
scheduler = DDIMScheduler(**OmegaConf.to_container(noise_scheduler_kwargs))
elif name.lower() == "ddpm":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/ddpm
scheduler = DDPMScheduler(**OmegaConf.to_container(noise_scheduler_kwargs))
elif name.lower() == "lms":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/lms_discrete
scheduler = LMSDiscreteScheduler(
**OmegaConf.to_container(noise_scheduler_kwargs)
)
elif name.lower() == "euler_a":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/euler_ancestral
scheduler = EulerAncestralDiscreteScheduler(
**OmegaConf.to_container(noise_scheduler_kwargs)
)
elif name.lower() == "euler":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/euler_ancestral
scheduler = EulerDiscreteScheduler(
**OmegaConf.to_container(noise_scheduler_kwargs)
)
elif name.lower() == "unipc":
# https://huggingface.co/docs/diffusers/v0.17.1/en/api/schedulers/unipc
scheduler = UniPCMultistepScheduler(
**OmegaConf.to_container(noise_scheduler_kwargs)
)
else:
raise ValueError(f"Unknown scheduler name: {name}")
return scheduler
from enum import Enum
try:
if torch.backends.mps.is_available():
cpu_state = CPUState.MPS
import torch.mps
except:
pass
def load_models_xl(
pretrained_model_name_or_path: str,
scheduler_name: str,
weight_dtype: torch.dtype = torch.float32,
noise_scheduler_kwargs=None,
) -> Tuple[
List[CLIPTokenizer],
List[SDXL_TEXT_ENCODER_TYPE],
UNet2DConditionModel,
SchedulerMixin,
]:
if pretrained_model_name_or_path.endswith(
".ckpt"
) or pretrained_model_name_or_path.endswith(".safetensors"):
(tokenizers, text_encoders, unet, vae) = load_checkpoint_model_xl(
pretrained_model_name_or_path, weight_dtype
)
else: # diffusers
(tokenizers, text_encoders, unet, vae) = load_diffusers_model_xl(
pretrained_model_name_or_path, weight_dtype
)
if scheduler_name:
scheduler = create_noise_scheduler(scheduler_name, noise_scheduler_kwargs)
else:
scheduler = None
return tokenizers, text_encoders, unet, scheduler, vae | null |
16,703 | from typing import Literal, Union, Optional, Tuple, List
import torch
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextModelWithProjection
from diffusers import (
UNet2DConditionModel,
SchedulerMixin,
StableDiffusionPipeline,
StableDiffusionXLPipeline,
AutoencoderKL,
)
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
convert_ldm_unet_checkpoint,
)
from safetensors.torch import load_file
from diffusers.schedulers import (
DDIMScheduler,
DDPMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
UniPCMultistepScheduler,
)
from omegaconf import OmegaConf
from enum import Enum
try:
if torch.backends.mps.is_available():
cpu_state = CPUState.MPS
import torch.mps
except:
pass
def torch_gc():
import gc
gc.collect()
if torch.cuda.is_available():
with torch.cuda.device("cuda"):
torch.cuda.empty_cache()
torch.cuda.ipc_collect() | null |
16,704 | from typing import Literal, Union, Optional, Tuple, List
import torch
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextModelWithProjection
from diffusers import (
UNet2DConditionModel,
SchedulerMixin,
StableDiffusionPipeline,
StableDiffusionXLPipeline,
AutoencoderKL,
)
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
convert_ldm_unet_checkpoint,
)
from safetensors.torch import load_file
from diffusers.schedulers import (
DDIMScheduler,
DDPMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
UniPCMultistepScheduler,
)
from omegaconf import OmegaConf
from enum import Enum
class CPUState(Enum):
GPU = 0
CPU = 1
MPS = 2
cpu_state = CPUState.GPU
directml_enabled = False
def is_intel_xpu():
global cpu_state
global xpu_available
if cpu_state == CPUState.GPU:
if xpu_available:
return True
return False
try:
if torch.backends.mps.is_available():
cpu_state = CPUState.MPS
import torch.mps
except:
pass
def get_torch_device():
global directml_enabled
global cpu_state
if directml_enabled:
global directml_device
return directml_device
if cpu_state == CPUState.MPS:
return torch.device("mps")
if cpu_state == CPUState.CPU:
return torch.device("cpu")
else:
if is_intel_xpu():
return torch.device("xpu")
else:
return torch.device(torch.cuda.current_device()) | null |
16,705 | import os
import sys
import time
import subprocess
from cog import BasePredictor, Input, Path
import cv2
import torch
import numpy as np
from PIL import Image
from diffusers.utils import load_image
from diffusers.models import ControlNetModel
from insightface.app import FaceAnalysis
from pipeline_stable_diffusion_xl_instantid import (
StableDiffusionXLInstantIDPipeline,
draw_kps,
)
def resize_img(
input_image,
max_side=1280,
min_side=1024,
size=None,
pad_to_max_side=False,
mode=Image.BILINEAR,
base_pixel_number=64,
):
w, h = input_image.size
if size is not None:
w_resize_new, h_resize_new = size
else:
ratio = min_side / min(h, w)
w, h = round(ratio * w), round(ratio * h)
ratio = max_side / max(h, w)
input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode)
w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
input_image = input_image.resize([w_resize_new, h_resize_new], mode)
if pad_to_max_side:
res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
offset_x = (max_side - w_resize_new) // 2
offset_y = (max_side - h_resize_new) // 2
res[
offset_y : offset_y + h_resize_new, offset_x : offset_x + w_resize_new
] = np.array(input_image)
input_image = Image.fromarray(res)
return input_image | null |
16,706 | import os
import sys
import time
import subprocess
from cog import BasePredictor, Input, Path
import cv2
import torch
import numpy as np
from PIL import Image
from diffusers.utils import load_image
from diffusers.models import ControlNetModel
from insightface.app import FaceAnalysis
from pipeline_stable_diffusion_xl_instantid import (
StableDiffusionXLInstantIDPipeline,
draw_kps,
)
def download_weights(url, dest):
start = time.time()
print("downloading url: ", url)
print("downloading to: ", dest)
subprocess.check_call(["pget", "-x", url, dest], close_fds=False)
print("downloading took: ", time.time() - start) | null |
16,707 | from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import cv2
import math
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
from diffusers.image_processor import PipelineImageInput
from diffusers.models import ControlNetModel
from diffusers.utils import (
deprecate,
logging,
replace_example_docstring,
)
from diffusers.utils.torch_utils import is_compiled_module, is_torch_version
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
from diffusers import StableDiffusionXLControlNetPipeline
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from diffusers.utils.import_utils import is_xformers_available
from ip_adapter.resampler import Resampler
from ip_adapter.utils import is_torch2_available
from ip_adapter.attention_processor import region_control
from transformers import CLIPTokenizer
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipeline
def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]):
stickwidth = 4
limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
kps = np.array(kps)
w, h = image_pil.size
out_img = np.zeros([h, w, 3])
for i in range(len(limbSeq)):
index = limbSeq[i]
color = color_list[index[0]]
x = kps[index][:, 0]
y = kps[index][:, 1]
length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
out_img = (out_img * 0.6).astype(np.uint8)
for idx_kp, kp in enumerate(kps):
color = color_list[idx_kp]
x, y = kp
out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
return out_img_pil | null |
16,708 | from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import cv2
import math
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
from diffusers.image_processor import PipelineImageInput
from diffusers.models import ControlNetModel
from diffusers.utils import (
deprecate,
logging,
replace_example_docstring,
)
from diffusers.utils.torch_utils import is_compiled_module, is_torch_version
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
from diffusers import StableDiffusionXLControlNetPipeline
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from diffusers.utils.import_utils import is_xformers_available
from ip_adapter.resampler import Resampler
from ip_adapter.utils import is_torch2_available
def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]):
stickwidth = 4
limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
kps = np.array(kps)
w, h = image_pil.size
out_img = np.zeros([h, w, 3])
for i in range(len(limbSeq)):
index = limbSeq[i]
color = color_list[index[0]]
x = kps[index][:, 0]
y = kps[index][:, 1]
length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
out_img = (out_img * 0.6).astype(np.uint8)
for idx_kp, kp in enumerate(kps):
color = color_list[idx_kp]
x, y = kp
out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
return out_img_pil | null |
16,709 | import torch.nn.functional as F
def is_torch2_available():
return hasattr(F, "scaled_dot_product_attention") | null |
16,710 | import math
import torch
import torch.nn as nn
def FeedForward(dim, mult=4):
inner_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim, bias=False),
nn.GELU(),
nn.Linear(inner_dim, dim, bias=False),
) | null |
16,711 | import math
import torch
import torch.nn as nn
def reshape_tensor(x, heads):
bs, length, width = x.shape
#(bs, length, width) --> (bs, length, n_heads, dim_per_head)
x = x.view(bs, length, heads, -1)
# (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
x = x.transpose(1, 2)
# (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
x = x.reshape(bs, heads, length, -1)
return x | null |
16,712 | import cv2
import torch
import numpy as np
from PIL import Image
from diffusers.utils import load_image
from diffusers.models import ControlNetModel
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from insightface.app import FaceAnalysis
from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
from controlnet_aux import MidasDetector
def convert_from_image_to_cv2(img: Image) -> np.ndarray:
return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) | null |
16,713 | import cv2
import torch
import numpy as np
from PIL import Image
from diffusers.utils import load_image
from diffusers.models import ControlNetModel
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from insightface.app import FaceAnalysis
from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
from controlnet_aux import MidasDetector
def resize_img(input_image, max_side=1280, min_side=1024, size=None,
pad_to_max_side=False, mode=Image.BILINEAR, base_pixel_number=64):
w, h = input_image.size
if size is not None:
w_resize_new, h_resize_new = size
else:
ratio = min_side / min(h, w)
w, h = round(ratio*w), round(ratio*h)
ratio = max_side / max(h, w)
input_image = input_image.resize([round(ratio*w), round(ratio*h)], mode)
w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
input_image = input_image.resize([w_resize_new, h_resize_new], mode)
if pad_to_max_side:
res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
offset_x = (max_side - w_resize_new) // 2
offset_y = (max_side - h_resize_new) // 2
res[offset_y:offset_y+h_resize_new, offset_x:offset_x+w_resize_new] = np.array(input_image)
input_image = Image.fromarray(res)
return input_image | null |
16,714 | import cv2
import torch
import numpy as np
from PIL import Image
from diffusers.utils import load_image
from diffusers.models import ControlNetModel
from insightface.app import FaceAnalysis
from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps
def resize_img(input_image, max_side=1280, min_side=1024, size=None,
pad_to_max_side=False, mode=Image.BILINEAR, base_pixel_number=64):
w, h = input_image.size
if size is not None:
w_resize_new, h_resize_new = size
else:
ratio = min_side / min(h, w)
w, h = round(ratio*w), round(ratio*h)
ratio = max_side / max(h, w)
input_image = input_image.resize([round(ratio*w), round(ratio*h)], mode)
w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
input_image = input_image.resize([w_resize_new, h_resize_new], mode)
if pad_to_max_side:
res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
offset_x = (max_side - w_resize_new) // 2
offset_y = (max_side - h_resize_new) // 2
res[offset_y:offset_y+h_resize_new, offset_x:offset_x+w_resize_new] = np.array(input_image)
input_image = Image.fromarray(res)
return input_image | null |
16,715 | import torch
import torch.nn as nn
def fixed_pos_embedding(x):
seq_len, dim = x.shape
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim))
sinusoid_inp = (
torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x)
)
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp) | null |
16,716 | import torch
import torch.nn as nn
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
if x.shape[-1]%2 == 1:
# fill last dim with zero if hidden_size is odd
x2 = torch.concat((x2, torch.zeros_like(x2[:, :, :1])), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\
def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m
def apply_rotary_pos_emb(x, sin, cos, scale=1):
sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos))
# einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
return (x * cos[:, :x.shape[-1]]) + (rotate_every_two(x) * sin)[:, :, :x.shape[-1]] | null |
16,717 | import os
import ffmpeg
import whisper
import argparse
import warnings
import tempfile
from .utils import filename, str2bool, write_srt
def filename(path):
def get_audio(paths):
temp_dir = tempfile.gettempdir()
audio_paths = {}
for path in paths:
print(f"Extracting audio from {filename(path)}...")
output_path = os.path.join(temp_dir, f"{filename(path)}.wav")
ffmpeg.input(path).output(
output_path,
acodec="pcm_s16le", ac=1, ar="16k"
).run(quiet=True, overwrite_output=True)
audio_paths[path] = output_path
return audio_paths | null |
16,718 | import os
import ffmpeg
import whisper
import argparse
import warnings
import tempfile
from .utils import filename, str2bool, write_srt
def write_srt(transcript: Iterator[dict], file: TextIO):
for i, segment in enumerate(transcript, start=1):
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True)} --> "
f"{format_timestamp(segment['end'], always_include_hours=True)}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
def filename(path):
return os.path.splitext(os.path.basename(path))[0]
def get_subtitles(audio_paths: list, output_srt: bool, output_dir: str, transcribe: callable):
subtitles_path = {}
for path, audio_path in audio_paths.items():
srt_path = output_dir if output_srt else tempfile.gettempdir()
srt_path = os.path.join(srt_path, f"{filename(path)}.srt")
print(
f"Generating subtitles for {filename(path)}... This might take a while."
)
warnings.filterwarnings("ignore")
result = transcribe(audio_path)
warnings.filterwarnings("default")
with open(srt_path, "w", encoding="utf-8") as srt:
write_srt(result["segments"], file=srt)
subtitles_path[path] = srt_path
return subtitles_path | null |
16,719 | import os
from typing import Iterator, TextIO
def str2bool(string):
string = string.lower()
str2val = {"true": True, "false": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(
f"Expected one of {set(str2val.keys())}, got {string}") | null |
16,720 | import cv2
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import add_arg_scope
def gate_conv(x_in, cnum, ksize, stride=1, rate=1, name='conv',
padding='SAME', activation='leaky_relu', use_lrn=True,training=True):
assert padding in ['SYMMETRIC', 'SAME', 'REFELECT']
if padding == 'SYMMETRIC' or padding == 'REFELECT':
p = int(rate*(ksize-1)/2)
x = tf.pad(x_in, [[0,0], [p, p], [p, p], [0,0]], mode=padding)
padding = 'VALID'
x = tf.layers.conv2d(
x_in, cnum, ksize, stride, dilation_rate=rate,
activation=None, padding=padding, name=name)
if use_lrn:
x = tf.nn.lrn(x, bias=0.00005)
if activation=='leaky_relu':
x = tf.nn.leaky_relu(x)
g = tf.layers.conv2d(
x_in, cnum, ksize, stride, dilation_rate=rate,
activation=tf.nn.sigmoid, padding=padding, name=name+'_g')
x = tf.multiply(x,g)
return x, g | null |
16,721 | import cv2
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import add_arg_scope
def gate_deconv(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv", training=True):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases1', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
deconv = tf.nn.leaky_relu(deconv)
g = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
b = tf.get_variable('biases2', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
g = tf.reshape(tf.nn.bias_add(g, b), deconv.get_shape())
g = tf.nn.sigmoid(deconv)
deconv = tf.multiply(g,deconv)
return deconv, g | null |
16,722 | import os
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
def make_cuda_ext(
name, module, sources, sources_cuda=[], extra_args=[], extra_include_path=[]
):
define_macros = []
extra_compile_args = {"cxx": [] + extra_args}
if torch.cuda.is_available() or os.getenv("FORCE_CUDA", "0") == "1":
define_macros += [("WITH_CUDA", None)]
extension = CUDAExtension
extra_compile_args["nvcc"] = extra_args + [
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
"-gencode=arch=compute_70,code=sm_70",
"-gencode=arch=compute_75,code=sm_75",
"-gencode=arch=compute_80,code=sm_80",
"-gencode=arch=compute_86,code=sm_86",
]
sources += sources_cuda
else:
print("Compiling {} without CUDA".format(name))
extension = CppExtension
return extension(
name="{}.{}".format(module, name),
sources=[os.path.join(*module.split("."), p) for p in sources],
include_dirs=extra_include_path,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
) | null |
16,723 | import torch
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import (
DistSamplerSeedHook,
EpochBasedRunner,
GradientCumulativeFp16OptimizerHook,
Fp16OptimizerHook,
OptimizerHook,
build_optimizer,
build_runner,
)
from mmdet3d.runner import CustomEpochBasedRunner
from mmdet3d.utils import get_root_logger
from mmdet.core import DistEvalHook
from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor
def train_model(
model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
):
logger = get_root_logger()
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
None,
dist=distributed,
seed=cfg.seed,
)
for ds in dataset
]
# put model on gpus
find_unused_parameters = cfg.get("find_unused_parameters", False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters,
)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.run_dir,
logger=logger,
meta={},
),
)
if hasattr(runner, "set_dataset"):
runner.set_dataset(dataset)
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get("fp16", None)
if fp16_cfg is not None:
if "cumulative_iters" in cfg.optimizer_config:
optimizer_config = GradientCumulativeFp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed
)
else:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed
)
elif distributed and "type" not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get("momentum_config", None),
)
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
# Support batch_size > 1 in validation
val_samples_per_gpu = cfg.data.val.pop("samples_per_gpu", 1)
if val_samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=val_samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
)
eval_cfg = cfg.get("evaluation", {})
eval_cfg["by_epoch"] = cfg.runner["type"] != "IterBasedRunner"
eval_hook = DistEvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, [("train", 1)]) | null |
16,724 | import numba
import numpy as np
def camera_to_lidar(points, r_rect, velo2cam):
"""Convert points in camera coordinate to lidar coordinate.
Args:
points (np.ndarray, shape=[N, 3]): Points in camera coordinate.
r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in
specific camera coordinate (e.g. CAM2) to CAM0.
velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in
camera coordinate to lidar coordinate.
Returns:
np.ndarray, shape=[N, 3]: Points in lidar coordinate.
"""
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)
return lidar_points[..., :3]
The provided code snippet includes necessary dependencies for implementing the `box_camera_to_lidar` function. Write a Python function `def box_camera_to_lidar(data, r_rect, velo2cam)` to solve the following problem:
Covert boxes in camera coordinate to lidar coordinate. Args: data (np.ndarray, shape=[N, 7]): Boxes in camera coordinate. r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in specific camera coordinate (e.g. CAM2) to CAM0. velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in camera coordinate to lidar coordinate. Returns: np.ndarray, shape=[N, 3]: Boxes in lidar coordinate.
Here is the function:
def box_camera_to_lidar(data, r_rect, velo2cam):
"""Covert boxes in camera coordinate to lidar coordinate.
Args:
data (np.ndarray, shape=[N, 7]): Boxes in camera coordinate.
r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in
specific camera coordinate (e.g. CAM2) to CAM0.
velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in
camera coordinate to lidar coordinate.
Returns:
np.ndarray, shape=[N, 3]: Boxes in lidar coordinate.
"""
xyz = data[:, 0:3]
l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]
r = data[:, 6:7]
xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)
return np.concatenate([xyz_lidar, w, l, h, r], axis=1) | Covert boxes in camera coordinate to lidar coordinate. Args: data (np.ndarray, shape=[N, 7]): Boxes in camera coordinate. r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in specific camera coordinate (e.g. CAM2) to CAM0. velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in camera coordinate to lidar coordinate. Returns: np.ndarray, shape=[N, 3]: Boxes in lidar coordinate. |
16,725 | import numba
import numpy as np
def camera_to_lidar(points, r_rect, velo2cam):
"""Convert points in camera coordinate to lidar coordinate.
Args:
points (np.ndarray, shape=[N, 3]): Points in camera coordinate.
r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in
specific camera coordinate (e.g. CAM2) to CAM0.
velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in
camera coordinate to lidar coordinate.
Returns:
np.ndarray, shape=[N, 3]: Points in lidar coordinate.
"""
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)
return lidar_points[..., :3]
def depth_to_points(depth, trunc_pixel):
"""Convert depth map to points.
Args:
depth (np.array, shape=[H, W]): Depth map which
the row of [0~`trunc_pixel`] are truncated.
trunc_pixel (int): The number of truncated row.
Returns:
np.ndarray: Points in camera coordinates.
"""
num_pts = np.sum(
depth[
trunc_pixel:,
]
> 0.1
)
points = np.zeros((num_pts, 3), dtype=depth.dtype)
x = np.array([0, 0, 1], dtype=depth.dtype)
k = 0
for i in range(trunc_pixel, depth.shape[0]):
for j in range(depth.shape[1]):
if depth[i, j] > 0.1:
x = np.array([j, i, 1], dtype=depth.dtype)
points[k] = x * depth[i, j]
k += 1
return points
The provided code snippet includes necessary dependencies for implementing the `depth_to_lidar_points` function. Write a Python function `def depth_to_lidar_points(depth, trunc_pixel, P2, r_rect, velo2cam)` to solve the following problem:
Convert depth map to points in lidar coordinate. Args: depth (np.array, shape=[H, W]): Depth map which the row of [0~`trunc_pixel`] are truncated. trunc_pixel (int): The number of truncated row. P2 (p.array, shape=[4, 4]): Intrinsics of Camera2. r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in specific camera coordinate (e.g. CAM2) to CAM0. velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in camera coordinate to lidar coordinate. Returns: np.ndarray: Points in lidar coordinates.
Here is the function:
def depth_to_lidar_points(depth, trunc_pixel, P2, r_rect, velo2cam):
"""Convert depth map to points in lidar coordinate.
Args:
depth (np.array, shape=[H, W]): Depth map which
the row of [0~`trunc_pixel`] are truncated.
trunc_pixel (int): The number of truncated row.
P2 (p.array, shape=[4, 4]): Intrinsics of Camera2.
r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in
specific camera coordinate (e.g. CAM2) to CAM0.
velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in
camera coordinate to lidar coordinate.
Returns:
np.ndarray: Points in lidar coordinates.
"""
pts = depth_to_points(depth, trunc_pixel)
points_shape = list(pts.shape[0:-1])
points = np.concatenate([pts, np.ones(points_shape + [1])], axis=-1)
points = points @ np.linalg.inv(P2.T)
lidar_points = camera_to_lidar(points, r_rect, velo2cam)
return lidar_points | Convert depth map to points in lidar coordinate. Args: depth (np.array, shape=[H, W]): Depth map which the row of [0~`trunc_pixel`] are truncated. trunc_pixel (int): The number of truncated row. P2 (p.array, shape=[4, 4]): Intrinsics of Camera2. r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in specific camera coordinate (e.g. CAM2) to CAM0. velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in camera coordinate to lidar coordinate. Returns: np.ndarray: Points in lidar coordinates. |
16,726 | import numba
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `rotation_points_single_angle` function. Write a Python function `def rotation_points_single_angle(points, angle, axis=0)` to solve the following problem:
Rotate points with a single angle. Args: points (np.ndarray, shape=[N, 3]]): angle (np.ndarray, shape=[1]]): axis (int, optional): Axis to rotate at. Defaults to 0. Returns: np.ndarray: Rotated points.
Here is the function:
def rotation_points_single_angle(points, angle, axis=0):
"""Rotate points with a single angle.
Args:
points (np.ndarray, shape=[N, 3]]):
angle (np.ndarray, shape=[1]]):
axis (int, optional): Axis to rotate at. Defaults to 0.
Returns:
np.ndarray: Rotated points.
"""
# points: [N, 3]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
if axis == 1:
rot_mat_T = np.array(
[[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], dtype=points.dtype
)
elif axis == 2 or axis == -1:
rot_mat_T = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], dtype=points.dtype
)
elif axis == 0:
rot_mat_T = np.array(
[[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], dtype=points.dtype
)
else:
raise ValueError("axis should in range")
return points @ rot_mat_T, rot_mat_T | Rotate points with a single angle. Args: points (np.ndarray, shape=[N, 3]]): angle (np.ndarray, shape=[1]]): axis (int, optional): Axis to rotate at. Defaults to 0. Returns: np.ndarray: Rotated points. |
16,727 | import numba
import numpy as np
def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 1.0, 0.5), axis=1):
"""Convert kitti locations, dimensions and angles to corners.
Args:
centers (np.ndarray): Locations in kitti label file with shape (N, 3).
dims (np.ndarray): Dimensions in kitti label file with shape (N, 3).
angles (np.ndarray, optional): Rotation_y in kitti label file with
shape (N). Defaults to None.
origin (list or array or float, optional): Origin point relate to
smallest point. Use (0.5, 1.0, 0.5) in camera and (0.5, 0.5, 0)
in lidar. Defaults to (0.5, 1.0, 0.5).
axis (int, optional): Rotation axis. 1 for camera and 2 for lidar.
Defaults to 1.
Returns:
np.ndarray: Corners with the shape of (N, 8, 3).
"""
# 'length' in kitti format is in x axis.
# yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 8, 3]
if angles is not None:
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.reshape([-1, 1, 3])
return corners
def points_cam2img(points_3d, proj_mat, with_depth=False):
"""Project points in camera coordinates to image coordinates.
Args:
points_3d (np.ndarray): Points in shape (N, 3)
proj_mat (np.ndarray): Transformation matrix between coordinates.
with_depth (bool, optional): Whether to keep depth in the output.
Defaults to False.
Returns:
np.ndarray: Points in image coordinates with shape [N, 2].
"""
points_shape = list(points_3d.shape)
points_shape[-1] = 1
assert len(proj_mat.shape) == 2, (
"The dimension of the projection" f" matrix should be 2 instead of {len(proj_mat.shape)}."
)
d1, d2 = proj_mat.shape[:2]
assert (d1 == 3 and d2 == 3) or (d1 == 3 and d2 == 4) or (d1 == 4 and d2 == 4), (
"The shape of the projection matrix" f" ({d1}*{d2}) is not supported."
)
if d1 == 3:
proj_mat_expanded = np.eye(4, dtype=proj_mat.dtype)
proj_mat_expanded[:d1, :d2] = proj_mat
proj_mat = proj_mat_expanded
points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1)
point_2d = points_4 @ proj_mat.T
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
if with_depth:
points_2d_depth = np.concatenate([point_2d_res, point_2d[..., 2:3]], axis=-1)
return points_2d_depth
return point_2d_res
The provided code snippet includes necessary dependencies for implementing the `box3d_to_bbox` function. Write a Python function `def box3d_to_bbox(box3d, P2)` to solve the following problem:
Convert box3d in camera coordinates to bbox in image coordinates. Args: box3d (np.ndarray, shape=[N, 7]): Boxes in camera coordinate. P2 (np.array, shape=[4, 4]): Intrinsics of Camera2. Returns: np.ndarray, shape=[N, 4]: Boxes 2d in image coordinates.
Here is the function:
def box3d_to_bbox(box3d, P2):
"""Convert box3d in camera coordinates to bbox in image coordinates.
Args:
box3d (np.ndarray, shape=[N, 7]): Boxes in camera coordinate.
P2 (np.array, shape=[4, 4]): Intrinsics of Camera2.
Returns:
np.ndarray, shape=[N, 4]: Boxes 2d in image coordinates.
"""
box_corners = center_to_corner_box3d(
box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1
)
box_corners_in_image = points_cam2img(box_corners, P2)
# box_corners_in_image: [N, 8, 2]
minxy = np.min(box_corners_in_image, axis=1)
maxxy = np.max(box_corners_in_image, axis=1)
bbox = np.concatenate([minxy, maxxy], axis=1)
return bbox | Convert box3d in camera coordinates to bbox in image coordinates. Args: box3d (np.ndarray, shape=[N, 7]): Boxes in camera coordinate. P2 (np.array, shape=[4, 4]): Intrinsics of Camera2. Returns: np.ndarray, shape=[N, 4]: Boxes 2d in image coordinates. |
16,728 | import numba
import numpy as np
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
"""Convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (np.ndarray): Locations in kitti label file with shape (N, 2).
dims (np.ndarray): Dimensions in kitti label file with shape (N, 2).
angles (np.ndarray, optional): Rotation_y in kitti label file with
shape (N). Defaults to None.
origin (list or array or float, optional): origin point relate to
smallest point. Defaults to 0.5.
Returns:
np.ndarray: Corners with the shape of (N, 4, 2).
"""
# 'length' in kitti format is in x axis.
# xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 4, 2]
if angles is not None:
corners = rotation_2d(corners, angles)
corners += centers.reshape([-1, 1, 2])
return corners
The provided code snippet includes necessary dependencies for implementing the `minmax_to_corner_2d` function. Write a Python function `def minmax_to_corner_2d(minmax_box)` to solve the following problem:
Convert minmax box to corners2d. Args: minmax_box (np.ndarray, shape=[N, dims]): minmax boxes. Returns: np.ndarray: 2d corners of boxes
Here is the function:
def minmax_to_corner_2d(minmax_box):
"""Convert minmax box to corners2d.
Args:
minmax_box (np.ndarray, shape=[N, dims]): minmax boxes.
Returns:
np.ndarray: 2d corners of boxes
"""
ndim = minmax_box.shape[-1] // 2
center = minmax_box[..., :ndim]
dims = minmax_box[..., ndim:] - center
return center_to_corner_box2d(center, dims, origin=0.0) | Convert minmax box to corners2d. Args: minmax_box (np.ndarray, shape=[N, dims]): minmax boxes. Returns: np.ndarray: 2d corners of boxes |
16,729 | import numba
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `create_anchors_3d_range` function. Write a Python function `def create_anchors_3d_range( feature_size, anchor_range, sizes=((1.6, 3.9, 1.56),), rotations=(0, np.pi / 2), dtype=np.float32, )` to solve the following problem:
Create anchors 3d by range. Args: feature_size (list[float] | tuple[float]): Feature map size. It is either a list of a tuple of [D, H, W](in order of z, y, and x). anchor_range (torch.Tensor | list[float]): Range of anchors with shape [6]. The order is consistent with that of anchors, i.e., (x_min, y_min, z_min, x_max, y_max, z_max). sizes (list[list] | np.ndarray | torch.Tensor, optional): Anchor size with shape [N, 3], in order of x, y, z. Defaults to ((1.6, 3.9, 1.56), ). rotations (list[float] | np.ndarray | torch.Tensor, optional): Rotations of anchors in a single feature grid. Defaults to (0, np.pi / 2). dtype (type, optional): Data type. Default to np.float32. Returns: np.ndarray: Range based anchors with shape of \ (*feature_size, num_sizes, num_rots, 7).
Here is the function:
def create_anchors_3d_range(
feature_size,
anchor_range,
sizes=((1.6, 3.9, 1.56),),
rotations=(0, np.pi / 2),
dtype=np.float32,
):
"""Create anchors 3d by range.
Args:
feature_size (list[float] | tuple[float]): Feature map size. It is
either a list of a tuple of [D, H, W](in order of z, y, and x).
anchor_range (torch.Tensor | list[float]): Range of anchors with
shape [6]. The order is consistent with that of anchors, i.e.,
(x_min, y_min, z_min, x_max, y_max, z_max).
sizes (list[list] | np.ndarray | torch.Tensor, optional):
Anchor size with shape [N, 3], in order of x, y, z.
Defaults to ((1.6, 3.9, 1.56), ).
rotations (list[float] | np.ndarray | torch.Tensor, optional):
Rotations of anchors in a single feature grid.
Defaults to (0, np.pi / 2).
dtype (type, optional): Data type. Default to np.float32.
Returns:
np.ndarray: Range based anchors with shape of \
(*feature_size, num_sizes, num_rots, 7).
"""
anchor_range = np.array(anchor_range, dtype)
z_centers = np.linspace(anchor_range[2], anchor_range[5], feature_size[0], dtype=dtype)
y_centers = np.linspace(anchor_range[1], anchor_range[4], feature_size[1], dtype=dtype)
x_centers = np.linspace(anchor_range[0], anchor_range[3], feature_size[2], dtype=dtype)
sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])
rotations = np.array(rotations, dtype=dtype)
rets = np.meshgrid(x_centers, y_centers, z_centers, rotations, indexing="ij")
tile_shape = [1] * 5
tile_shape[-2] = int(sizes.shape[0])
for i in range(len(rets)):
rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)
rets[i] = rets[i][..., np.newaxis] # for concat
sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = np.tile(sizes, tile_size_shape)
rets.insert(3, sizes)
ret = np.concatenate(rets, axis=-1)
return np.transpose(ret, [2, 1, 0, 3, 4, 5]) | Create anchors 3d by range. Args: feature_size (list[float] | tuple[float]): Feature map size. It is either a list of a tuple of [D, H, W](in order of z, y, and x). anchor_range (torch.Tensor | list[float]): Range of anchors with shape [6]. The order is consistent with that of anchors, i.e., (x_min, y_min, z_min, x_max, y_max, z_max). sizes (list[list] | np.ndarray | torch.Tensor, optional): Anchor size with shape [N, 3], in order of x, y, z. Defaults to ((1.6, 3.9, 1.56), ). rotations (list[float] | np.ndarray | torch.Tensor, optional): Rotations of anchors in a single feature grid. Defaults to (0, np.pi / 2). dtype (type, optional): Data type. Default to np.float32. Returns: np.ndarray: Range based anchors with shape of \ (*feature_size, num_sizes, num_rots, 7). |
16,730 | import numba
import numpy as np
def limit_period(val, offset=0.5, period=np.pi):
"""Limit the value into a period for periodic function.
Args:
val (np.ndarray): The value to be converted.
offset (float, optional): Offset to set the value range. \
Defaults to 0.5.
period (float, optional): Period of the value. Defaults to np.pi.
Returns:
torch.Tensor: Value in the range of \
[-offset * period, (1-offset) * period]
"""
return val - np.floor(val / period + offset) * period
def center_to_minmax_2d(centers, dims, origin=0.5):
"""Center to minmax.
Args:
centers (np.ndarray): Center points.
dims (np.ndarray): Dimensions.
origin (list or array or float, optional): Origin point relate
to smallest point. Defaults to 0.5.
Returns:
np.ndarray: Minmax points.
"""
if origin == 0.5:
return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)
corners = center_to_corner_box2d(centers, dims, origin=origin)
return corners[:, [0, 2]].reshape([-1, 4])
The provided code snippet includes necessary dependencies for implementing the `rbbox2d_to_near_bbox` function. Write a Python function `def rbbox2d_to_near_bbox(rbboxes)` to solve the following problem:
convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes (np.ndarray): Rotated bboxes with shape of \ (N, 5(x, y, xdim, ydim, rad)). Returns: np.ndarray: Bounding boxes with the shpae of (N, 4(xmin, ymin, xmax, ymax)).
Here is the function:
def rbbox2d_to_near_bbox(rbboxes):
"""convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes (np.ndarray): Rotated bboxes with shape of \
(N, 5(x, y, xdim, ydim, rad)).
Returns:
np.ndarray: Bounding boxes with the shpae of
(N, 4(xmin, ymin, xmax, ymax)).
"""
rots = rbboxes[..., -1]
rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))
cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]
bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])
bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])
return bboxes | convert rotated bbox to nearest 'standing' or 'lying' bbox. Args: rbboxes (np.ndarray): Rotated bboxes with shape of \ (N, 5(x, y, xdim, ydim, rad)). Returns: np.ndarray: Bounding boxes with the shpae of (N, 4(xmin, ymin, xmax, ymax)). |
16,731 | import numba
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `iou_jit` function. Write a Python function `def iou_jit(boxes, query_boxes, mode="iou", eps=0.0)` to solve the following problem:
Calculate box iou. Note that jit version runs ~10x faster than the box_overlaps function in mmdet3d.core.evaluation. Args: boxes (np.ndarray): Input bounding boxes with shape of (N, 4). query_boxes (np.ndarray): Query boxes with shape of (K, 4). mode (str, optional): IoU mode. Defaults to 'iou'. eps (float, optional): Value added to denominator. Defaults to 0. Returns: np.ndarray: Overlap between boxes and query_boxes with the shape of [N, K].
Here is the function:
def iou_jit(boxes, query_boxes, mode="iou", eps=0.0):
"""Calculate box iou. Note that jit version runs ~10x faster than the
box_overlaps function in mmdet3d.core.evaluation.
Args:
boxes (np.ndarray): Input bounding boxes with shape of (N, 4).
query_boxes (np.ndarray): Query boxes with shape of (K, 4).
mode (str, optional): IoU mode. Defaults to 'iou'.
eps (float, optional): Value added to denominator. Defaults to 0.
Returns:
np.ndarray: Overlap between boxes and query_boxes
with the shape of [N, K].
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
box_area = (query_boxes[k, 2] - query_boxes[k, 0] + eps) * (
query_boxes[k, 3] - query_boxes[k, 1] + eps
)
for n in range(N):
iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + eps
if iw > 0:
ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + eps
if ih > 0:
if mode == "iou":
ua = (
(boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps)
+ box_area
- iw * ih
)
else:
ua = (boxes[n, 2] - boxes[n, 0] + eps) * (boxes[n, 3] - boxes[n, 1] + eps)
overlaps[n, k] = iw * ih / ua
return overlaps | Calculate box iou. Note that jit version runs ~10x faster than the box_overlaps function in mmdet3d.core.evaluation. Args: boxes (np.ndarray): Input bounding boxes with shape of (N, 4). query_boxes (np.ndarray): Query boxes with shape of (K, 4). mode (str, optional): IoU mode. Defaults to 'iou'. eps (float, optional): Value added to denominator. Defaults to 0. Returns: np.ndarray: Overlap between boxes and query_boxes with the shape of [N, K]. |
16,732 | import numba
import numpy as np
def camera_to_lidar(points, r_rect, velo2cam):
"""Convert points in camera coordinate to lidar coordinate.
Args:
points (np.ndarray, shape=[N, 3]): Points in camera coordinate.
r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in
specific camera coordinate (e.g. CAM2) to CAM0.
velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in
camera coordinate to lidar coordinate.
Returns:
np.ndarray, shape=[N, 3]: Points in lidar coordinate.
"""
points_shape = list(points.shape[0:-1])
if points.shape[-1] == 3:
points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)
lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)
return lidar_points[..., :3]
def corner_to_surfaces_3d_jit(corners):
"""Convert 3d box corners from corner function above to surfaces that
normal vectors all direct to internal.
Args:
corners (np.ndarray): 3d box corners with the shape of (N, 8, 3).
Returns:
np.ndarray: Surfaces with the shape of (N, 6, 4, 3).
"""
# box_corners: [N, 8, 3], must from corner functions in this module
num_boxes = corners.shape[0]
surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)
corner_idxes = np.array(
[0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7]
).reshape(6, 4)
for i in range(num_boxes):
for j in range(6):
for k in range(4):
surfaces[i, j, k] = corners[i, corner_idxes[j, k]]
return surfaces
def projection_matrix_to_CRT_kitti(proj):
"""Split projection matrix of kitti.
P = C @ [R|T]
C is upper triangular matrix, so we need to inverse CR and use QR
stable for all kitti camera projection matrix.
Args:
proj (p.array, shape=[4, 4]): Intrinsics of camera.
Returns:
tuple[np.ndarray]: Splited matrix of C, R and T.
"""
CR = proj[0:3, 0:3]
CT = proj[0:3, 3]
RinvCinv = np.linalg.inv(CR)
Rinv, Cinv = np.linalg.qr(RinvCinv)
C = np.linalg.inv(Cinv)
R = np.linalg.inv(Rinv)
T = Cinv @ CT
return C, R, T
def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):
"""Get frustum corners in camera coordinates.
Args:
bbox_image (list[int]): box in image coordinates.
C (np.ndarray): Intrinsics.
near_clip (float, optional): Nearest distance of frustum.
Defaults to 0.001.
far_clip (float, optional): Farthest distance of frustum.
Defaults to 100.
Returns:
np.ndarray, shape=[8, 3]: coordinates of frustum corners.
"""
fku = C[0, 0]
fkv = -C[1, 1]
u0v0 = C[0:2, 2]
z_points = np.array([near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis]
b = bbox_image
box_corners = np.array([[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], dtype=C.dtype)
near_box_corners = (box_corners - u0v0) / np.array(
[fku / near_clip, -fkv / near_clip], dtype=C.dtype
)
far_box_corners = (box_corners - u0v0) / np.array(
[fku / far_clip, -fkv / far_clip], dtype=C.dtype
)
ret_xy = np.concatenate([near_box_corners, far_box_corners], axis=0) # [8, 2]
ret_xyz = np.concatenate([ret_xy, z_points], axis=1)
return ret_xyz
def points_in_convex_polygon_3d_jit(points, polygon_surfaces, num_surfaces=None):
"""Check points is in 3d convex polygons.
Args:
points (np.ndarray): Input points with shape of (num_points, 3).
polygon_surfaces (np.ndarray): Polygon surfaces with shape of
(num_polygon, max_num_surfaces, max_num_points_of_surface, 3).
All surfaces' normal vector must direct to internal.
Max_num_points_of_surface must at least 3.
num_surfaces (np.ndarray, optional): Number of surfaces a polygon
contains shape of (num_polygon). Defaults to None.
Returns:
np.ndarray: Result matrix with the shape of [num_points, num_polygon].
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
# num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
if num_surfaces is None:
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
return _points_in_convex_polygon_3d_jit(points, polygon_surfaces, normal_vec, d, num_surfaces)
The provided code snippet includes necessary dependencies for implementing the `remove_outside_points` function. Write a Python function `def remove_outside_points(points, rect, Trv2c, P2, image_shape)` to solve the following problem:
Remove points which are outside of image. Args: points (np.ndarray, shape=[N, 3+dims]): Total points. rect (np.ndarray, shape=[4, 4]): Matrix to project points in specific camera coordinate (e.g. CAM2) to CAM0. Trv2c (np.ndarray, shape=[4, 4]): Matrix to project points in camera coordinate to lidar coordinate. P2 (p.array, shape=[4, 4]): Intrinsics of Camera2. image_shape (list[int]): Shape of image. Returns: np.ndarray, shape=[N, 3+dims]: Filtered points.
Here is the function:
def remove_outside_points(points, rect, Trv2c, P2, image_shape):
"""Remove points which are outside of image.
Args:
points (np.ndarray, shape=[N, 3+dims]): Total points.
rect (np.ndarray, shape=[4, 4]): Matrix to project points in
specific camera coordinate (e.g. CAM2) to CAM0.
Trv2c (np.ndarray, shape=[4, 4]): Matrix to project points in
camera coordinate to lidar coordinate.
P2 (p.array, shape=[4, 4]): Intrinsics of Camera2.
image_shape (list[int]): Shape of image.
Returns:
np.ndarray, shape=[N, 3+dims]: Filtered points.
"""
# 5x faster than remove_outside_points_v1(2ms vs 10ms)
C, R, T = projection_matrix_to_CRT_kitti(P2)
image_bbox = [0, 0, image_shape[1], image_shape[0]]
frustum = get_frustum(image_bbox, C)
frustum -= T
frustum = np.linalg.inv(R) @ frustum.T
frustum = camera_to_lidar(frustum.T, rect, Trv2c)
frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])
indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)
points = points[indices.reshape([-1])]
return points | Remove points which are outside of image. Args: points (np.ndarray, shape=[N, 3+dims]): Total points. rect (np.ndarray, shape=[4, 4]): Matrix to project points in specific camera coordinate (e.g. CAM2) to CAM0. Trv2c (np.ndarray, shape=[4, 4]): Matrix to project points in camera coordinate to lidar coordinate. P2 (p.array, shape=[4, 4]): Intrinsics of Camera2. image_shape (list[int]): Shape of image. Returns: np.ndarray, shape=[N, 3+dims]: Filtered points. |
16,733 | import numba
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `points_in_convex_polygon_jit` function. Write a Python function `def points_in_convex_polygon_jit(points, polygon, clockwise=True)` to solve the following problem:
Check points is in 2d convex polygons. True when point in polygon. Args: points (np.ndarray): Input points with the shape of [num_points, 2]. polygon (np.ndarray): Input polygon with the shape of [num_polygon, num_points_of_polygon, 2]. clockwise (bool, optional): Indicate polygon is clockwise. Defaults to True. Returns: np.ndarray: Result matrix with the shape of [num_points, num_polygon].
Here is the function:
def points_in_convex_polygon_jit(points, polygon, clockwise=True):
"""Check points is in 2d convex polygons. True when point in polygon.
Args:
points (np.ndarray): Input points with the shape of [num_points, 2].
polygon (np.ndarray): Input polygon with the shape of
[num_polygon, num_points_of_polygon, 2].
clockwise (bool, optional): Indicate polygon is clockwise. Defaults
to True.
Returns:
np.ndarray: Result matrix with the shape of [num_points, num_polygon].
"""
# first convert polygon to directed lines
num_points_of_polygon = polygon.shape[1]
num_points = points.shape[0]
num_polygons = polygon.shape[0]
# if clockwise:
# vec1 = polygon - polygon[:, [num_points_of_polygon - 1] +
# list(range(num_points_of_polygon - 1)), :]
# else:
# vec1 = polygon[:, [num_points_of_polygon - 1] +
# list(range(num_points_of_polygon - 1)), :] - polygon
# vec1: [num_polygon, num_points_of_polygon, 2]
vec1 = np.zeros((2), dtype=polygon.dtype)
ret = np.zeros((num_points, num_polygons), dtype=np.bool_)
success = True
cross = 0.0
for i in range(num_points):
for j in range(num_polygons):
success = True
for k in range(num_points_of_polygon):
if clockwise:
vec1 = polygon[j, k] - polygon[j, k - 1]
else:
vec1 = polygon[j, k - 1] - polygon[j, k]
cross = vec1[1] * (polygon[j, k, 0] - points[i, 0])
cross -= vec1[0] * (polygon[j, k, 1] - points[i, 1])
if cross >= 0:
success = False
break
ret[i, j] = success
return ret | Check points is in 2d convex polygons. True when point in polygon. Args: points (np.ndarray): Input points with the shape of [num_points, 2]. polygon (np.ndarray): Input polygon with the shape of [num_polygon, num_points_of_polygon, 2]. clockwise (bool, optional): Indicate polygon is clockwise. Defaults to True. Returns: np.ndarray: Result matrix with the shape of [num_points, num_polygon]. |
16,734 | import numba
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `boxes3d_to_corners3d_lidar` function. Write a Python function `def boxes3d_to_corners3d_lidar(boxes3d, bottom_center=True)` to solve the following problem:
Convert kitti center boxes to corners. 7 -------- 4 /| /| 6 -------- 5 . | | | | . 3 -------- 0 |/ |/ 2 -------- 1 Args: boxes3d (np.ndarray): Boxes with shape of (N, 7) [x, y, z, w, l, h, ry] in LiDAR coords, see the definition of ry in KITTI dataset. bottom_center (bool, optional): Whether z is on the bottom center of object. Defaults to True. Returns: np.ndarray: Box corners with the shape of [N, 8, 3].
Here is the function:
def boxes3d_to_corners3d_lidar(boxes3d, bottom_center=True):
"""Convert kitti center boxes to corners.
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d (np.ndarray): Boxes with shape of (N, 7)
[x, y, z, w, l, h, ry] in LiDAR coords, see the definition of ry
in KITTI dataset.
bottom_center (bool, optional): Whether z is on the bottom center
of object. Defaults to True.
Returns:
np.ndarray: Box corners with the shape of [N, 8, 3].
"""
boxes_num = boxes3d.shape[0]
w, l, h = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array(
[w / 2.0, -w / 2.0, -w / 2.0, w / 2.0, w / 2.0, -w / 2.0, -w / 2.0, w / 2.0],
dtype=np.float32,
).T
y_corners = np.array(
[-l / 2.0, -l / 2.0, l / 2.0, l / 2.0, -l / 2.0, -l / 2.0, l / 2.0, l / 2.0],
dtype=np.float32,
).T
if bottom_center:
z_corners = np.zeros((boxes_num, 8), dtype=np.float32)
z_corners[:, 4:8] = h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)
else:
z_corners = np.array(
[-h / 2.0, -h / 2.0, -h / 2.0, -h / 2.0, h / 2.0, h / 2.0, h / 2.0, h / 2.0],
dtype=np.float32,
).T
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32)
rot_list = np.array(
[[np.cos(ry), -np.sin(ry), zeros], [np.sin(ry), np.cos(ry), zeros], [zeros, zeros, ones]]
) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate(
(x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1), z_corners.reshape(-1, 8, 1)),
axis=2,
) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners = rotated_corners[:, :, 0]
y_corners = rotated_corners[:, :, 1]
z_corners = rotated_corners[:, :, 2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate(
(x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2
)
return corners.astype(np.float32) | Convert kitti center boxes to corners. 7 -------- 4 /| /| 6 -------- 5 . | | | | . 3 -------- 0 |/ |/ 2 -------- 1 Args: boxes3d (np.ndarray): Boxes with shape of (N, 7) [x, y, z, w, l, h, ry] in LiDAR coords, see the definition of ry in KITTI dataset. bottom_center (bool, optional): Whether z is on the bottom center of object. Defaults to True. Returns: np.ndarray: Box corners with the shape of [N, 8, 3]. |
16,735 | import numpy as np
import torch
from logging import warning
The provided code snippet includes necessary dependencies for implementing the `limit_period` function. Write a Python function `def limit_period(val, offset=0.5, period=np.pi)` to solve the following problem:
Limit the value into a period for periodic function. Args: val (torch.Tensor): The value to be converted. offset (float, optional): Offset to set the value range. \ Defaults to 0.5. period ([type], optional): Period of the value. Defaults to np.pi. Returns: torch.Tensor: Value in the range of \ [-offset * period, (1-offset) * period]
Here is the function:
def limit_period(val, offset=0.5, period=np.pi):
"""Limit the value into a period for periodic function.
Args:
val (torch.Tensor): The value to be converted.
offset (float, optional): Offset to set the value range. \
Defaults to 0.5.
period ([type], optional): Period of the value. Defaults to np.pi.
Returns:
torch.Tensor: Value in the range of \
[-offset * period, (1-offset) * period]
"""
return val - torch.floor(val / period + offset) * period | Limit the value into a period for periodic function. Args: val (torch.Tensor): The value to be converted. offset (float, optional): Offset to set the value range. \ Defaults to 0.5. period ([type], optional): Period of the value. Defaults to np.pi. Returns: torch.Tensor: Value in the range of \ [-offset * period, (1-offset) * period] |
16,736 | import numpy as np
import torch
from logging import warning
The provided code snippet includes necessary dependencies for implementing the `rotation_3d_in_axis` function. Write a Python function `def rotation_3d_in_axis(points, angles, axis=0)` to solve the following problem:
Rotate points by angles according to axis. Args: points (torch.Tensor): Points of shape (N, M, 3). angles (torch.Tensor): Vector of angles in shape (N,) axis (int, optional): The axis to be rotated. Defaults to 0. Raises: ValueError: when the axis is not in range [0, 1, 2], it will \ raise value error. Returns: torch.Tensor: Rotated points in shape (N, M, 3)
Here is the function:
def rotation_3d_in_axis(points, angles, axis=0):
"""Rotate points by angles according to axis.
Args:
points (torch.Tensor): Points of shape (N, M, 3).
angles (torch.Tensor): Vector of angles in shape (N,)
axis (int, optional): The axis to be rotated. Defaults to 0.
Raises:
ValueError: when the axis is not in range [0, 1, 2], it will \
raise value error.
Returns:
torch.Tensor: Rotated points in shape (N, M, 3)
"""
rot_sin = torch.sin(angles)
rot_cos = torch.cos(angles)
ones = torch.ones_like(rot_cos)
zeros = torch.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = torch.stack(
[
torch.stack([rot_cos, zeros, -rot_sin]),
torch.stack([zeros, ones, zeros]),
torch.stack([rot_sin, zeros, rot_cos]),
]
)
elif axis == 2 or axis == -1:
rot_mat_T = torch.stack(
[
torch.stack([rot_cos, -rot_sin, zeros]),
torch.stack([rot_sin, rot_cos, zeros]),
torch.stack([zeros, zeros, ones]),
]
)
elif axis == 0:
rot_mat_T = torch.stack(
[
torch.stack([zeros, rot_cos, -rot_sin]),
torch.stack([zeros, rot_sin, rot_cos]),
torch.stack([ones, zeros, zeros]),
]
)
else:
raise ValueError(f"axis should in range [0, 1, 2], got {axis}")
return torch.einsum("aij,jka->aik", (points, rot_mat_T)) | Rotate points by angles according to axis. Args: points (torch.Tensor): Points of shape (N, M, 3). angles (torch.Tensor): Vector of angles in shape (N,) axis (int, optional): The axis to be rotated. Defaults to 0. Raises: ValueError: when the axis is not in range [0, 1, 2], it will \ raise value error. Returns: torch.Tensor: Rotated points in shape (N, M, 3) |
16,737 | import numpy as np
import torch
from logging import warning
The provided code snippet includes necessary dependencies for implementing the `xywhr2xyxyr` function. Write a Python function `def xywhr2xyxyr(boxes_xywhr)` to solve the following problem:
Convert a rotated boxes in XYWHR format to XYXYR format. Args: boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format. Returns: torch.Tensor: Converted boxes in XYXYR format.
Here is the function:
def xywhr2xyxyr(boxes_xywhr):
"""Convert a rotated boxes in XYWHR format to XYXYR format.
Args:
boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format.
Returns:
torch.Tensor: Converted boxes in XYXYR format.
"""
boxes = torch.zeros_like(boxes_xywhr)
half_w = boxes_xywhr[:, 2] / 2
half_h = boxes_xywhr[:, 3] / 2
boxes[:, 0] = boxes_xywhr[:, 0] - half_w
boxes[:, 1] = boxes_xywhr[:, 1] - half_h
boxes[:, 2] = boxes_xywhr[:, 0] + half_w
boxes[:, 3] = boxes_xywhr[:, 1] + half_h
boxes[:, 4] = boxes_xywhr[:, 4]
return boxes | Convert a rotated boxes in XYWHR format to XYXYR format. Args: boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format. Returns: torch.Tensor: Converted boxes in XYXYR format. |
16,738 | import numpy as np
import torch
from logging import warning
class Box3DMode(IntEnum):
r"""Enum of different ways to represent a box.
Coordinates in LiDAR:
.. code-block:: none
up z
^ x front
| /
| /
left y <------ 0
The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0),
and the yaw is around the z axis, thus the rotation axis=2.
Coordinates in camera:
.. code-block:: none
z front
/
/
0 ------> x right
|
|
v
down y
The relative coordinate of bottom center in a CAM box is [0.5, 1.0, 0.5],
and the yaw is around the y axis, thus the rotation axis=1.
Coordinates in Depth mode:
.. code-block:: none
up z
^ y front
| /
| /
0 ------> x right
The relative coordinate of bottom center in a DEPTH box is (0.5, 0.5, 0),
and the yaw is around the z axis, thus the rotation axis=2.
"""
LIDAR = 0
CAM = 1
DEPTH = 2
def convert(box, src, dst, rt_mat=None):
"""Convert boxes from `src` mode to `dst` mode.
Args:
box (tuple | list | np.ndarray |
torch.Tensor | BaseInstance3DBoxes):
Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7.
src (:obj:`Box3DMode`): The src Box mode.
dst (:obj:`Box3DMode`): The target Box mode.
rt_mat (np.ndarray | torch.Tensor): The rotation and translation
matrix between different coordinates. Defaults to None.
The conversion from `src` coordinates to `dst` coordinates
usually comes along the change of sensors, e.g., from camera
to LiDAR. This requires a transformation matrix.
Returns:
(tuple | list | np.ndarray | torch.Tensor | BaseInstance3DBoxes): \
The converted box of the same type.
"""
if src == dst:
return box
is_numpy = isinstance(box, np.ndarray)
is_Instance3DBoxes = isinstance(box, BaseInstance3DBoxes)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) >= 7, (
"Box3DMode.convert takes either a k-tuple/list or "
"an Nxk array/tensor, where k >= 7"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
elif is_Instance3DBoxes:
arr = box.tensor.clone()
else:
arr = box.clone()
# convert box from `src` mode to `dst` mode.
x_size, y_size, z_size = arr[..., 3:4], arr[..., 4:5], arr[..., 5:6]
if src == Box3DMode.LIDAR and dst == Box3DMode.CAM:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]])
xyz_size = torch.cat([y_size, z_size, x_size], dim=-1)
elif src == Box3DMode.CAM and dst == Box3DMode.LIDAR:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
xyz_size = torch.cat([z_size, x_size, y_size], dim=-1)
elif src == Box3DMode.DEPTH and dst == Box3DMode.CAM:
if rt_mat is None:
rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)
elif src == Box3DMode.CAM and dst == Box3DMode.DEPTH:
if rt_mat is None:
rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
xyz_size = torch.cat([x_size, z_size, y_size], dim=-1)
elif src == Box3DMode.LIDAR and dst == Box3DMode.DEPTH:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)
elif src == Box3DMode.DEPTH and dst == Box3DMode.LIDAR:
if rt_mat is None:
rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
xyz_size = torch.cat([y_size, x_size, z_size], dim=-1)
else:
raise NotImplementedError(
f"Conversion from Box3DMode {src} to {dst} " "is not supported yet"
)
if not isinstance(rt_mat, torch.Tensor):
rt_mat = arr.new_tensor(rt_mat)
if rt_mat.size(1) == 4:
extended_xyz = torch.cat([arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1)
xyz = extended_xyz @ rt_mat.t()
else:
xyz = arr[:, :3] @ rt_mat.t()
remains = arr[..., 6:]
arr = torch.cat([xyz[:, :3], xyz_size, remains], dim=-1)
# convert arr to the original type
original_type = type(box)
if single_box:
return original_type(arr.flatten().tolist())
if is_numpy:
return arr.numpy()
elif is_Instance3DBoxes:
if dst == Box3DMode.CAM:
target_type = CameraInstance3DBoxes
elif dst == Box3DMode.LIDAR:
target_type = LiDARInstance3DBoxes
elif dst == Box3DMode.DEPTH:
target_type = DepthInstance3DBoxes
else:
raise NotImplementedError(
f"Conversion to {dst} through {original_type}" " is not supported yet"
)
return target_type(arr, box_dim=arr.size(-1), with_yaw=box.with_yaw)
else:
return arr
The provided code snippet includes necessary dependencies for implementing the `get_box_type` function. Write a Python function `def get_box_type(box_type)` to solve the following problem:
Get the type and mode of box structure. Args: box_type (str): The type of box structure. The valid value are "LiDAR", "Camera", or "Depth". Returns: tuple: Box type and box mode.
Here is the function:
def get_box_type(box_type):
"""Get the type and mode of box structure.
Args:
box_type (str): The type of box structure.
The valid value are "LiDAR", "Camera", or "Depth".
Returns:
tuple: Box type and box mode.
"""
from .box_3d_mode import (
Box3DMode,
CameraInstance3DBoxes,
DepthInstance3DBoxes,
LiDARInstance3DBoxes,
)
box_type_lower = box_type.lower()
if box_type_lower == "lidar":
box_type_3d = LiDARInstance3DBoxes
box_mode_3d = Box3DMode.LIDAR
elif box_type_lower == "camera":
box_type_3d = CameraInstance3DBoxes
box_mode_3d = Box3DMode.CAM
elif box_type_lower == "depth":
box_type_3d = DepthInstance3DBoxes
box_mode_3d = Box3DMode.DEPTH
else:
raise ValueError(
'Only "box_type" of "camera", "lidar", "depth"'
f" are supported, got {box_type}"
)
return box_type_3d, box_mode_3d | Get the type and mode of box structure. Args: box_type (str): The type of box structure. The valid value are "LiDAR", "Camera", or "Depth". Returns: tuple: Box type and box mode. |
16,739 | import numpy as np
import torch
from logging import warning
The provided code snippet includes necessary dependencies for implementing the `points_cam2img` function. Write a Python function `def points_cam2img(points_3d, proj_mat, with_depth=False)` to solve the following problem:
Project points from camera coordicates to image coordinates. Args: points_3d (torch.Tensor): Points in shape (N, 3). proj_mat (torch.Tensor): Transformation matrix between coordinates. with_depth (bool, optional): Whether to keep depth in the output. Defaults to False. Returns: torch.Tensor: Points in image coordinates with shape [N, 2].
Here is the function:
def points_cam2img(points_3d, proj_mat, with_depth=False):
"""Project points from camera coordicates to image coordinates.
Args:
points_3d (torch.Tensor): Points in shape (N, 3).
proj_mat (torch.Tensor): Transformation matrix between coordinates.
with_depth (bool, optional): Whether to keep depth in the output.
Defaults to False.
Returns:
torch.Tensor: Points in image coordinates with shape [N, 2].
"""
points_num = list(points_3d.shape)[:-1]
points_shape = np.concatenate([points_num, [1]], axis=0).tolist()
assert len(proj_mat.shape) == 2, (
"The dimension of the projection"
f" matrix should be 2 instead of {len(proj_mat.shape)}."
)
d1, d2 = proj_mat.shape[:2]
assert (d1 == 3 and d2 == 3) or (d1 == 3 and d2 == 4) or (d1 == 4 and d2 == 4), (
"The shape of the projection matrix" f" ({d1}*{d2}) is not supported."
)
if d1 == 3:
proj_mat_expanded = torch.eye(4, device=proj_mat.device, dtype=proj_mat.dtype)
proj_mat_expanded[:d1, :d2] = proj_mat
proj_mat = proj_mat_expanded
# previous implementation use new_zeros, new_one yeilds better results
points_4 = torch.cat([points_3d, points_3d.new_ones(*points_shape)], dim=-1)
point_2d = torch.matmul(points_4, proj_mat.t())
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
if with_depth:
return torch.cat([point_2d_res, point_2d[..., 2:3]], dim=-1)
return point_2d_res | Project points from camera coordicates to image coordinates. Args: points_3d (torch.Tensor): Points in shape (N, 3). proj_mat (torch.Tensor): Transformation matrix between coordinates. with_depth (bool, optional): Whether to keep depth in the output. Defaults to False. Returns: torch.Tensor: Points in image coordinates with shape [N, 2]. |
16,740 | import numpy as np
import torch
from logging import warning
The provided code snippet includes necessary dependencies for implementing the `mono_cam_box2vis` function. Write a Python function `def mono_cam_box2vis(cam_box)` to solve the following problem:
This is a post-processing function on the bboxes from Mono-3D task. If we want to perform projection visualization, we need to: 1. rotate the box along x-axis for np.pi / 2 (roll) 2. change orientation from local yaw to global yaw 3. convert yaw by (np.pi / 2 - yaw) After applying this function, we can project and draw it on 2D images. Args: cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate \ system before conversion. Could be gt bbox loaded from dataset or \ network prediction output. Returns: :obj:`CameraInstance3DBoxes`: Box after conversion.
Here is the function:
def mono_cam_box2vis(cam_box):
"""This is a post-processing function on the bboxes from Mono-3D task. If
we want to perform projection visualization, we need to:
1. rotate the box along x-axis for np.pi / 2 (roll)
2. change orientation from local yaw to global yaw
3. convert yaw by (np.pi / 2 - yaw)
After applying this function, we can project and draw it on 2D images.
Args:
cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate \
system before conversion. Could be gt bbox loaded from dataset or \
network prediction output.
Returns:
:obj:`CameraInstance3DBoxes`: Box after conversion.
"""
warning.warn(
"DeprecationWarning: The hack of yaw and dimension in the "
"monocular 3D detection on nuScenes has been removed. The "
"function mono_cam_box2vis will be deprecated."
)
from . import CameraInstance3DBoxes
assert isinstance(
cam_box, CameraInstance3DBoxes
), "input bbox should be CameraInstance3DBoxes!"
loc = cam_box.gravity_center
dim = cam_box.dims
yaw = cam_box.yaw
feats = cam_box.tensor[:, 7:]
# rotate along x-axis for np.pi / 2
# see also here: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L557 # noqa
dim[:, [1, 2]] = dim[:, [2, 1]]
# change local yaw to global yaw for visualization
# refer to https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L164-L166 # noqa
yaw += torch.atan2(loc[:, 0], loc[:, 2])
# convert yaw by (-yaw - np.pi / 2)
# this is because mono 3D box class such as `NuScenesBox` has different
# definition of rotation with our `CameraInstance3DBoxes`
yaw = -yaw - np.pi / 2
cam_box = torch.cat([loc, dim, yaw[:, None], feats], dim=1)
cam_box = CameraInstance3DBoxes(
cam_box, box_dim=cam_box.shape[-1], origin=(0.5, 0.5, 0.5)
)
return cam_box | This is a post-processing function on the bboxes from Mono-3D task. If we want to perform projection visualization, we need to: 1. rotate the box along x-axis for np.pi / 2 (roll) 2. change orientation from local yaw to global yaw 3. convert yaw by (np.pi / 2 - yaw) After applying this function, we can project and draw it on 2D images. Args: cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate \ system before conversion. Could be gt bbox loaded from dataset or \ network prediction output. Returns: :obj:`CameraInstance3DBoxes`: Box after conversion. |
16,741 | import numpy as np
import torch
from logging import warning
The provided code snippet includes necessary dependencies for implementing the `get_proj_mat_by_coord_type` function. Write a Python function `def get_proj_mat_by_coord_type(img_meta, coord_type)` to solve the following problem:
Obtain image features using points. Args: img_meta (dict): Meta info. coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'. Can be case-insensitive. Returns: torch.Tensor: transformation matrix.
Here is the function:
def get_proj_mat_by_coord_type(img_meta, coord_type):
"""Obtain image features using points.
Args:
img_meta (dict): Meta info.
coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'.
Can be case-insensitive.
Returns:
torch.Tensor: transformation matrix.
"""
coord_type = coord_type.upper()
mapping = {"LIDAR": "lidar2image", "DEPTH": "depth2img", "CAMERA": "cam2img"}
assert coord_type in mapping.keys()
return img_meta[mapping[coord_type]] | Obtain image features using points. Args: img_meta (dict): Meta info. coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'. Can be case-insensitive. Returns: torch.Tensor: transformation matrix. |
16,742 | import torch
def normalize_bbox(bboxes, pc_range):
cx = bboxes[..., 0:1]
cy = bboxes[..., 1:2]
cz = bboxes[..., 2:3]
w = bboxes[..., 3:4].log()
l = bboxes[..., 4:5].log()
h = bboxes[..., 5:6].log()
rot = bboxes[..., 6:7]
if bboxes.size(-1) > 7:
vx = bboxes[..., 7:8]
vy = bboxes[..., 8:9]
normalized_bboxes = torch.cat((cx, cy, w, l, cz, h, rot.sin(), rot.cos(), vx, vy), dim=-1)
else:
normalized_bboxes = torch.cat((cx, cy, w, l, cz, h, rot.sin(), rot.cos()), dim=-1)
return normalized_bboxes | null |
16,743 | import torch
def denormalize_bbox(normalized_bboxes, pc_range):
# rotation
rot_sine = normalized_bboxes[..., 6:7]
rot_cosine = normalized_bboxes[..., 7:8]
rot = torch.atan2(rot_sine, rot_cosine)
# center in the bev
cx = normalized_bboxes[..., 0:1]
cy = normalized_bboxes[..., 1:2]
cz = normalized_bboxes[..., 4:5]
# size
w = normalized_bboxes[..., 2:3]
l = normalized_bboxes[..., 3:4]
h = normalized_bboxes[..., 5:6]
w = w.exp()
l = l.exp()
h = h.exp()
if normalized_bboxes.size(-1) > 8:
# velocity
vx = normalized_bboxes[:, 8:9]
vy = normalized_bboxes[:, 9:10]
denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot, vx, vy], dim=-1)
else:
denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot], dim=-1)
return denormalized_bboxes | null |
16,744 | import torch
from mmdet.core.bbox import bbox_overlaps
from mmdet.core.bbox.iou_calculators.builder import IOU_CALCULATORS
from ..structures import get_box_type
The provided code snippet includes necessary dependencies for implementing the `bbox_overlaps_nearest_3d` function. Write a Python function `def bbox_overlaps_nearest_3d( bboxes1, bboxes2, mode="iou", is_aligned=False, coordinate="lidar" )` to solve the following problem:
Calculate nearest 3D IoU. Note: This function first finds the nearest 2D boxes in bird eye view (BEV), and then calculates the 2D IoU using :meth:`bbox_overlaps`. Ths IoU calculator :class:`BboxOverlapsNearest3D` uses this function to calculate IoUs of boxes. If ``is_aligned`` is ``False``, then it calculates the ious between each bbox of bboxes1 and bboxes2, otherwise the ious between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (torch.Tensor): shape (N, 7+C) [x, y, z, h, w, l, ry, v]. bboxes2 (torch.Tensor): shape (M, 7+C) [x, y, z, h, w, l, ry, v]. mode (str): "iou" (intersection over union) or iof (intersection over foreground). is_aligned (bool): Whether the calculation is aligned Return: torch.Tensor: If ``is_aligned`` is ``True``, return ious between \ bboxes1 and bboxes2 with shape (M, N). If ``is_aligned`` is \ ``False``, return shape is M.
Here is the function:
def bbox_overlaps_nearest_3d(
bboxes1, bboxes2, mode="iou", is_aligned=False, coordinate="lidar"
):
"""Calculate nearest 3D IoU.
Note:
This function first finds the nearest 2D boxes in bird eye view
(BEV), and then calculates the 2D IoU using :meth:`bbox_overlaps`.
Ths IoU calculator :class:`BboxOverlapsNearest3D` uses this
function to calculate IoUs of boxes.
If ``is_aligned`` is ``False``, then it calculates the ious between
each bbox of bboxes1 and bboxes2, otherwise the ious between each
aligned pair of bboxes1 and bboxes2.
Args:
bboxes1 (torch.Tensor): shape (N, 7+C) [x, y, z, h, w, l, ry, v].
bboxes2 (torch.Tensor): shape (M, 7+C) [x, y, z, h, w, l, ry, v].
mode (str): "iou" (intersection over union) or iof
(intersection over foreground).
is_aligned (bool): Whether the calculation is aligned
Return:
torch.Tensor: If ``is_aligned`` is ``True``, return ious between \
bboxes1 and bboxes2 with shape (M, N). If ``is_aligned`` is \
``False``, return shape is M.
"""
assert bboxes1.size(-1) == bboxes2.size(-1) >= 7
box_type, _ = get_box_type(coordinate)
bboxes1 = box_type(bboxes1, box_dim=bboxes1.shape[-1])
bboxes2 = box_type(bboxes2, box_dim=bboxes2.shape[-1])
# Change the bboxes to bev
# box conversion and iou calculation in torch version on CUDA
# is 10x faster than that in numpy version
bboxes1_bev = bboxes1.nearest_bev
bboxes2_bev = bboxes2.nearest_bev
ret = bbox_overlaps(bboxes1_bev, bboxes2_bev, mode=mode, is_aligned=is_aligned)
return ret | Calculate nearest 3D IoU. Note: This function first finds the nearest 2D boxes in bird eye view (BEV), and then calculates the 2D IoU using :meth:`bbox_overlaps`. Ths IoU calculator :class:`BboxOverlapsNearest3D` uses this function to calculate IoUs of boxes. If ``is_aligned`` is ``False``, then it calculates the ious between each bbox of bboxes1 and bboxes2, otherwise the ious between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (torch.Tensor): shape (N, 7+C) [x, y, z, h, w, l, ry, v]. bboxes2 (torch.Tensor): shape (M, 7+C) [x, y, z, h, w, l, ry, v]. mode (str): "iou" (intersection over union) or iof (intersection over foreground). is_aligned (bool): Whether the calculation is aligned Return: torch.Tensor: If ``is_aligned`` is ``True``, return ious between \ bboxes1 and bboxes2 with shape (M, N). If ``is_aligned`` is \ ``False``, return shape is M. |
16,745 | import torch
from mmdet.core.bbox import bbox_overlaps
from mmdet.core.bbox.iou_calculators.builder import IOU_CALCULATORS
from ..structures import get_box_type
The provided code snippet includes necessary dependencies for implementing the `bbox_overlaps_3d` function. Write a Python function `def bbox_overlaps_3d(bboxes1, bboxes2, mode="iou", coordinate="camera")` to solve the following problem:
Calculate 3D IoU using cuda implementation. Note: This function calculates the IoU of 3D boxes based on their volumes. IoU calculator :class:`BboxOverlaps3D` uses this function to calculate the actual IoUs of boxes. Args: bboxes1 (torch.Tensor): shape (N, 7+C) [x, y, z, h, w, l, ry]. bboxes2 (torch.Tensor): shape (M, 7+C) [x, y, z, h, w, l, ry]. mode (str): "iou" (intersection over union) or iof (intersection over foreground). coordinate (str): 'camera' or 'lidar' coordinate system. Return: torch.Tensor: Bbox overlaps results of bboxes1 and bboxes2 \ with shape (M, N) (aligned mode is not supported currently).
Here is the function:
def bbox_overlaps_3d(bboxes1, bboxes2, mode="iou", coordinate="camera"):
"""Calculate 3D IoU using cuda implementation.
Note:
This function calculates the IoU of 3D boxes based on their volumes.
IoU calculator :class:`BboxOverlaps3D` uses this function to
calculate the actual IoUs of boxes.
Args:
bboxes1 (torch.Tensor): shape (N, 7+C) [x, y, z, h, w, l, ry].
bboxes2 (torch.Tensor): shape (M, 7+C) [x, y, z, h, w, l, ry].
mode (str): "iou" (intersection over union) or
iof (intersection over foreground).
coordinate (str): 'camera' or 'lidar' coordinate system.
Return:
torch.Tensor: Bbox overlaps results of bboxes1 and bboxes2 \
with shape (M, N) (aligned mode is not supported currently).
"""
assert bboxes1.size(-1) == bboxes2.size(-1) >= 7
box_type, _ = get_box_type(coordinate)
bboxes1 = box_type(bboxes1, box_dim=bboxes1.shape[-1])
bboxes2 = box_type(bboxes2, box_dim=bboxes2.shape[-1])
return bboxes1.overlaps(bboxes1, bboxes2, mode=mode) | Calculate 3D IoU using cuda implementation. Note: This function calculates the IoU of 3D boxes based on their volumes. IoU calculator :class:`BboxOverlaps3D` uses this function to calculate the actual IoUs of boxes. Args: bboxes1 (torch.Tensor): shape (N, 7+C) [x, y, z, h, w, l, ry]. bboxes2 (torch.Tensor): shape (M, 7+C) [x, y, z, h, w, l, ry]. mode (str): "iou" (intersection over union) or iof (intersection over foreground). coordinate (str): 'camera' or 'lidar' coordinate system. Return: torch.Tensor: Bbox overlaps results of bboxes1 and bboxes2 \ with shape (M, N) (aligned mode is not supported currently). |
16,746 | import torch
from mmdet.core.bbox import bbox_overlaps
from mmdet.core.bbox.iou_calculators.builder import IOU_CALCULATORS
from ..structures import get_box_type
The provided code snippet includes necessary dependencies for implementing the `axis_aligned_bbox_overlaps_3d` function. Write a Python function `def axis_aligned_bbox_overlaps_3d( bboxes1, bboxes2, mode="iou", is_aligned=False, eps=1e-6 )` to solve the following problem:
Calculate overlap between two set of axis aligned 3D bboxes. If ``is_aligned`` is ``False``, then calculate the overlaps between each bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (B, m, 6) in <x1, y1, z1, x2, y2, z2> format or empty. bboxes2 (Tensor): shape (B, n, 6) in <x1, y1, z1, x2, y2, z2> format or empty. B indicates the batch dim, in shape (B1, B2, ..., Bn). If ``is_aligned`` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union) or "giou" (generalized intersection over union). is_aligned (bool, optional): If True, then m and n must be equal. Default False. eps (float, optional): A value added to the denominator for numerical stability. Default 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) Example: >>> bboxes1 = torch.FloatTensor([ >>> [0, 0, 0, 10, 10, 10], >>> [10, 10, 10, 20, 20, 20], >>> [32, 32, 32, 38, 40, 42], >>> ]) >>> bboxes2 = torch.FloatTensor([ >>> [0, 0, 0, 10, 20, 20], >>> [0, 10, 10, 10, 19, 20], >>> [10, 10, 10, 20, 20, 20], >>> ]) >>> overlaps = axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2) >>> assert overlaps.shape == (3, 3) >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) >>> assert overlaps.shape == (3, ) Example: >>> empty = torch.empty(0, 6) >>> nonempty = torch.FloatTensor([[0, 0, 0, 10, 9, 10]]) >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
Here is the function:
def axis_aligned_bbox_overlaps_3d(
bboxes1, bboxes2, mode="iou", is_aligned=False, eps=1e-6
):
"""Calculate overlap between two set of axis aligned 3D bboxes. If
``is_aligned`` is ``False``, then calculate the overlaps between each bbox
of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of
bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 6) in <x1, y1, z1, x2, y2, z2>
format or empty.
bboxes2 (Tensor): shape (B, n, 6) in <x1, y1, z1, x2, y2, z2>
format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned`` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "giou" (generalized
intersection over union).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 0, 10, 10, 10],
>>> [10, 10, 10, 20, 20, 20],
>>> [32, 32, 32, 38, 40, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 0, 10, 20, 20],
>>> [0, 10, 10, 10, 19, 20],
>>> [10, 10, 10, 20, 20, 20],
>>> ])
>>> overlaps = axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 6)
>>> nonempty = torch.FloatTensor([[0, 0, 0, 10, 9, 10]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ["iou", "giou"], f"Unsupported mode {mode}"
# Either the boxes are empty or the length of boxes's last dimenstion is 6
assert bboxes1.size(-1) == 6 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 6 or bboxes2.size(0) == 0
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (
(bboxes1[..., 3] - bboxes1[..., 0])
* (bboxes1[..., 4] - bboxes1[..., 1])
* (bboxes1[..., 5] - bboxes1[..., 2])
)
area2 = (
(bboxes2[..., 3] - bboxes2[..., 0])
* (bboxes2[..., 4] - bboxes2[..., 1])
* (bboxes2[..., 5] - bboxes2[..., 2])
)
if is_aligned:
lt = torch.max(bboxes1[..., :3], bboxes2[..., :3]) # [B, rows, 3]
rb = torch.min(bboxes1[..., 3:], bboxes2[..., 3:]) # [B, rows, 3]
wh = (rb - lt).clamp(min=0) # [B, rows, 2]
overlap = wh[..., 0] * wh[..., 1] * wh[..., 2]
if mode in ["iou", "giou"]:
union = area1 + area2 - overlap
else:
union = area1
if mode == "giou":
enclosed_lt = torch.min(bboxes1[..., :3], bboxes2[..., :3])
enclosed_rb = torch.max(bboxes1[..., 3:], bboxes2[..., 3:])
else:
lt = torch.max(
bboxes1[..., :, None, :3], bboxes2[..., None, :, :3]
) # [B, rows, cols, 3]
rb = torch.min(
bboxes1[..., :, None, 3:], bboxes2[..., None, :, 3:]
) # [B, rows, cols, 3]
wh = (rb - lt).clamp(min=0) # [B, rows, cols, 3]
overlap = wh[..., 0] * wh[..., 1] * wh[..., 2]
if mode in ["iou", "giou"]:
union = area1[..., None] + area2[..., None, :] - overlap
if mode == "giou":
enclosed_lt = torch.min(
bboxes1[..., :, None, :3], bboxes2[..., None, :, :3]
)
enclosed_rb = torch.max(
bboxes1[..., :, None, 3:], bboxes2[..., None, :, 3:]
)
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ["iou"]:
return ious
# calculate gious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] * enclose_wh[..., 2]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious | Calculate overlap between two set of axis aligned 3D bboxes. If ``is_aligned`` is ``False``, then calculate the overlaps between each bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (B, m, 6) in <x1, y1, z1, x2, y2, z2> format or empty. bboxes2 (Tensor): shape (B, n, 6) in <x1, y1, z1, x2, y2, z2> format or empty. B indicates the batch dim, in shape (B1, B2, ..., Bn). If ``is_aligned`` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union) or "giou" (generalized intersection over union). is_aligned (bool, optional): If True, then m and n must be equal. Default False. eps (float, optional): A value added to the denominator for numerical stability. Default 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) Example: >>> bboxes1 = torch.FloatTensor([ >>> [0, 0, 0, 10, 10, 10], >>> [10, 10, 10, 20, 20, 20], >>> [32, 32, 32, 38, 40, 42], >>> ]) >>> bboxes2 = torch.FloatTensor([ >>> [0, 0, 0, 10, 20, 20], >>> [0, 10, 10, 10, 19, 20], >>> [10, 10, 10, 20, 20, 20], >>> ]) >>> overlaps = axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2) >>> assert overlaps.shape == (3, 3) >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) >>> assert overlaps.shape == (3, ) Example: >>> empty = torch.empty(0, 6) >>> nonempty = torch.FloatTensor([[0, 0, 0, 10, 9, 10]]) >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) |
16,747 | import numba
import numpy as np
def _points_to_voxel_reverse_kernel(
points,
voxel_size,
coors_range,
num_points_per_voxel,
coor_to_voxelidx,
voxels,
coors,
max_points=35,
max_voxels=20000,
):
"""convert kitti points(N, >=3) to voxels.
Args:
points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and \
points[:, 3:] contain other information such as reflectivity.
voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size \
coors_range (list[float | tuple[float] | ndarray]): Range of voxels. \
format: xyzxyz, minmax
num_points_per_voxel (int): Number of points per voxel.
coor_to_voxel_idx (np.ndarray): A voxel grid of shape (D, H, W), \
which has the same shape as the complete voxel map. It indicates \
the index of each corresponding voxel.
voxels (np.ndarray): Created empty voxels.
coors (np.ndarray): Created coordinates of each voxel.
max_points (int): Indicate maximum points contained in a voxel.
max_voxels (int): Maximum number of voxels this function create. \
for second, 20000 is a good choice. Points should be shuffled for \
randomness before this function because max_voxels drops points.
Returns:
tuple[np.ndarray]:
voxels: Shape [M, max_points, ndim], only contain points.
coordinates: Shape [M, 3].
num_points_per_voxel: Shape [M].
"""
# put all computations to one loop.
# we shouldn't create large array in main jit code, otherwise
# reduce performance
N = points.shape[0]
# ndim = points.shape[1] - 1
ndim = 3
ndim_minus_1 = ndim - 1
grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size
# np.round(grid_size)
# grid_size = np.round(grid_size).astype(np.int64)(np.int32)
grid_size = np.round(grid_size, 0, grid_size).astype(np.int32)
coor = np.zeros(shape=(3,), dtype=np.int32)
voxel_num = 0
failed = False
for i in range(N):
failed = False
for j in range(ndim):
c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j])
if c < 0 or c >= grid_size[j]:
failed = True
break
coor[ndim_minus_1 - j] = c
if failed:
continue
voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]]
if voxelidx == -1:
voxelidx = voxel_num
if voxel_num >= max_voxels:
continue
voxel_num += 1
coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx
coors[voxelidx] = coor
num = num_points_per_voxel[voxelidx]
if num < max_points:
voxels[voxelidx, num] = points[i]
num_points_per_voxel[voxelidx] += 1
return voxel_num
def _points_to_voxel_kernel(
points,
voxel_size,
coors_range,
num_points_per_voxel,
coor_to_voxelidx,
voxels,
coors,
max_points=35,
max_voxels=20000,
):
"""convert kitti points(N, >=3) to voxels.
Args:
points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and \
points[:, 3:] contain other information such as reflectivity.
voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size.
coors_range (list[float | tuple[float] | ndarray]): Range of voxels. \
format: xyzxyz, minmax
num_points_per_voxel (int): Number of points per voxel.
coor_to_voxel_idx (np.ndarray): A voxel grid of shape (D, H, W), \
which has the same shape as the complete voxel map. It indicates \
the index of each corresponding voxel.
voxels (np.ndarray): Created empty voxels.
coors (np.ndarray): Created coordinates of each voxel.
max_points (int): Indicate maximum points contained in a voxel.
max_voxels (int): Maximum number of voxels this function create. \
for second, 20000 is a good choice. Points should be shuffled for \
randomness before this function because max_voxels drops points.
Returns:
tuple[np.ndarray]:
voxels: Shape [M, max_points, ndim], only contain points.
coordinates: Shape [M, 3].
num_points_per_voxel: Shape [M].
"""
N = points.shape[0]
# ndim = points.shape[1] - 1
ndim = 3
grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size
# grid_size = np.round(grid_size).astype(np.int64)(np.int32)
grid_size = np.round(grid_size, 0, grid_size).astype(np.int32)
# lower_bound = coors_range[:3]
# upper_bound = coors_range[3:]
coor = np.zeros(shape=(3,), dtype=np.int32)
voxel_num = 0
failed = False
for i in range(N):
failed = False
for j in range(ndim):
c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j])
if c < 0 or c >= grid_size[j]:
failed = True
break
coor[j] = c
if failed:
continue
voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]]
if voxelidx == -1:
voxelidx = voxel_num
if voxel_num >= max_voxels:
continue
voxel_num += 1
coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx
coors[voxelidx] = coor
num = num_points_per_voxel[voxelidx]
if num < max_points:
voxels[voxelidx, num] = points[i]
num_points_per_voxel[voxelidx] += 1
return voxel_num
The provided code snippet includes necessary dependencies for implementing the `points_to_voxel` function. Write a Python function `def points_to_voxel( points, voxel_size, coors_range, max_points=35, reverse_index=True, max_voxels=20000 )` to solve the following problem:
convert kitti points(N, >=3) to voxels. Args: points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and \ points[:, 3:] contain other information such as reflectivity. voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size coors_range (list[float | tuple[float] | ndarray]): Voxel range. \ format: xyzxyz, minmax max_points (int): Indicate maximum points contained in a voxel. reverse_index (bool): Whether return reversed coordinates. \ if points has xyz format and reverse_index is True, output \ coordinates will be zyx format, but points in features always \ xyz format. max_voxels (int): Maximum number of voxels this function creates. \ For second, 20000 is a good choice. Points should be shuffled for \ randomness before this function because max_voxels drops points. Returns: tuple[np.ndarray]: voxels: [M, max_points, ndim] float tensor. only contain points. coordinates: [M, 3] int32 tensor. num_points_per_voxel: [M] int32 tensor.
Here is the function:
def points_to_voxel(
points, voxel_size, coors_range, max_points=35, reverse_index=True, max_voxels=20000
):
"""convert kitti points(N, >=3) to voxels.
Args:
points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and \
points[:, 3:] contain other information such as reflectivity.
voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size
coors_range (list[float | tuple[float] | ndarray]): Voxel range. \
format: xyzxyz, minmax
max_points (int): Indicate maximum points contained in a voxel.
reverse_index (bool): Whether return reversed coordinates. \
if points has xyz format and reverse_index is True, output \
coordinates will be zyx format, but points in features always \
xyz format.
max_voxels (int): Maximum number of voxels this function creates. \
For second, 20000 is a good choice. Points should be shuffled for \
randomness before this function because max_voxels drops points.
Returns:
tuple[np.ndarray]:
voxels: [M, max_points, ndim] float tensor. only contain points.
coordinates: [M, 3] int32 tensor.
num_points_per_voxel: [M] int32 tensor.
"""
if not isinstance(voxel_size, np.ndarray):
voxel_size = np.array(voxel_size, dtype=points.dtype)
if not isinstance(coors_range, np.ndarray):
coors_range = np.array(coors_range, dtype=points.dtype)
voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size
voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist())
if reverse_index:
voxelmap_shape = voxelmap_shape[::-1]
# don't create large array in jit(nopython=True) code.
num_points_per_voxel = np.zeros(shape=(max_voxels,), dtype=np.int32)
coor_to_voxelidx = -np.ones(shape=voxelmap_shape, dtype=np.int32)
voxels = np.zeros(
shape=(max_voxels, max_points, points.shape[-1]), dtype=points.dtype
)
coors = np.zeros(shape=(max_voxels, 3), dtype=np.int32)
if reverse_index:
voxel_num = _points_to_voxel_reverse_kernel(
points,
voxel_size,
coors_range,
num_points_per_voxel,
coor_to_voxelidx,
voxels,
coors,
max_points,
max_voxels,
)
else:
voxel_num = _points_to_voxel_kernel(
points,
voxel_size,
coors_range,
num_points_per_voxel,
coor_to_voxelidx,
voxels,
coors,
max_points,
max_voxels,
)
coors = coors[:voxel_num]
voxels = voxels[:voxel_num]
num_points_per_voxel = num_points_per_voxel[:voxel_num]
return voxels, coors, num_points_per_voxel | convert kitti points(N, >=3) to voxels. Args: points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and \ points[:, 3:] contain other information such as reflectivity. voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size coors_range (list[float | tuple[float] | ndarray]): Voxel range. \ format: xyzxyz, minmax max_points (int): Indicate maximum points contained in a voxel. reverse_index (bool): Whether return reversed coordinates. \ if points has xyz format and reverse_index is True, output \ coordinates will be zyx format, but points in features always \ xyz format. max_voxels (int): Maximum number of voxels this function creates. \ For second, 20000 is a good choice. Points should be shuffled for \ randomness before this function because max_voxels drops points. Returns: tuple[np.ndarray]: voxels: [M, max_points, ndim] float tensor. only contain points. coordinates: [M, 3] int32 tensor. num_points_per_voxel: [M] int32 tensor. |
16,748 | import mmcv
from . import voxel_generator
The provided code snippet includes necessary dependencies for implementing the `build_voxel_generator` function. Write a Python function `def build_voxel_generator(cfg, **kwargs)` to solve the following problem:
Builder of voxel generator.
Here is the function:
def build_voxel_generator(cfg, **kwargs):
"""Builder of voxel generator."""
if isinstance(cfg, voxel_generator.VoxelGenerator):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(cfg, voxel_generator, default_args=kwargs)
else:
raise TypeError("Invalid type {} for building a sampler".format(type(cfg))) | Builder of voxel generator. |
16,749 | import numba
import numpy as np
import torch
from mmdet3d.ops.iou3d.iou3d_utils import nms_gpu, nms_normal_gpu
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, post_max_size=None):
"""Nms function with gpu implementation.
Args:
boxes (torch.Tensor): Input boxes with the shape of [N, 5]
([x1, y1, x2, y2, ry]).
scores (torch.Tensor): Scores of boxes with the shape of [N].
thresh (int): Threshold.
pre_maxsize (int): Max size of boxes before nms. Default: None.
post_maxsize (int): Max size of boxes after nms. Default: None.
Returns:
torch.Tensor: Indexes after nms.
"""
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long)
num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh, boxes.device.index)
keep = order[keep[:num_out].cuda(boxes.device)].contiguous()
if post_max_size is not None:
keep = keep[:post_max_size]
return keep
def nms_normal_gpu(boxes, scores, thresh):
"""Normal non maximum suppression on GPU.
Args:
boxes (torch.Tensor): Input boxes with shape (N, 5).
scores (torch.Tensor): Scores of predicted boxes with shape (N).
thresh (torch.Tensor): Threshold of non maximum suppression.
Returns:
torch.Tensor: Remaining indices with scores in descending order.
"""
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long)
num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh, boxes.device.index)
return order[keep[:num_out].cuda(boxes.device)].contiguous()
The provided code snippet includes necessary dependencies for implementing the `box3d_multiclass_nms` function. Write a Python function `def box3d_multiclass_nms( mlvl_bboxes, mlvl_bboxes_for_nms, mlvl_scores, score_thr, max_num, cfg, mlvl_dir_scores=None, mlvl_attr_scores=None, mlvl_bboxes2d=None, )` to solve the following problem:
Multi-class nms for 3D boxes. Args: mlvl_bboxes (torch.Tensor): Multi-level boxes with shape (N, M). M is the dimensions of boxes. mlvl_bboxes_for_nms (torch.Tensor): Multi-level boxes with shape (N, 5) ([x1, y1, x2, y2, ry]). N is the number of boxes. mlvl_scores (torch.Tensor): Multi-level boxes with shape (N, C + 1). N is the number of boxes. C is the number of classes. score_thr (float): Score thredhold to filter boxes with low confidence. max_num (int): Maximum number of boxes will be kept. cfg (dict): Configuration dict of NMS. mlvl_dir_scores (torch.Tensor, optional): Multi-level scores of direction classifier. Defaults to None. mlvl_attr_scores (torch.Tensor, optional): Multi-level scores of attribute classifier. Defaults to None. mlvl_bboxes2d (torch.Tensor, optional): Multi-level 2D bounding boxes. Defaults to None. Returns: tuple[torch.Tensor]: Return results after nms, including 3D \ bounding boxes, scores, labels, direction scores, attribute \ scores (optional) and 2D bounding boxes (optional).
Here is the function:
def box3d_multiclass_nms(
mlvl_bboxes,
mlvl_bboxes_for_nms,
mlvl_scores,
score_thr,
max_num,
cfg,
mlvl_dir_scores=None,
mlvl_attr_scores=None,
mlvl_bboxes2d=None,
):
"""Multi-class nms for 3D boxes.
Args:
mlvl_bboxes (torch.Tensor): Multi-level boxes with shape (N, M).
M is the dimensions of boxes.
mlvl_bboxes_for_nms (torch.Tensor): Multi-level boxes with shape
(N, 5) ([x1, y1, x2, y2, ry]). N is the number of boxes.
mlvl_scores (torch.Tensor): Multi-level boxes with shape
(N, C + 1). N is the number of boxes. C is the number of classes.
score_thr (float): Score thredhold to filter boxes with low
confidence.
max_num (int): Maximum number of boxes will be kept.
cfg (dict): Configuration dict of NMS.
mlvl_dir_scores (torch.Tensor, optional): Multi-level scores
of direction classifier. Defaults to None.
mlvl_attr_scores (torch.Tensor, optional): Multi-level scores
of attribute classifier. Defaults to None.
mlvl_bboxes2d (torch.Tensor, optional): Multi-level 2D bounding
boxes. Defaults to None.
Returns:
tuple[torch.Tensor]: Return results after nms, including 3D \
bounding boxes, scores, labels, direction scores, attribute \
scores (optional) and 2D bounding boxes (optional).
"""
# do multi class nms
# the fg class id range: [0, num_classes-1]
num_classes = mlvl_scores.shape[1] - 1
bboxes = []
scores = []
labels = []
dir_scores = []
attr_scores = []
bboxes2d = []
for i in range(0, num_classes):
# get bboxes and scores of this class
cls_inds = mlvl_scores[:, i] > score_thr
if not cls_inds.any():
continue
_scores = mlvl_scores[cls_inds, i]
_bboxes_for_nms = mlvl_bboxes_for_nms[cls_inds, :]
if cfg.use_rotate_nms:
nms_func = nms_gpu
else:
nms_func = nms_normal_gpu
selected = nms_func(_bboxes_for_nms, _scores, cfg.nms_thr)
_mlvl_bboxes = mlvl_bboxes[cls_inds, :]
bboxes.append(_mlvl_bboxes[selected])
scores.append(_scores[selected])
cls_label = mlvl_bboxes.new_full((len(selected),), i, dtype=torch.long)
labels.append(cls_label)
if mlvl_dir_scores is not None:
_mlvl_dir_scores = mlvl_dir_scores[cls_inds]
dir_scores.append(_mlvl_dir_scores[selected])
if mlvl_attr_scores is not None:
_mlvl_attr_scores = mlvl_attr_scores[cls_inds]
attr_scores.append(_mlvl_attr_scores[selected])
if mlvl_bboxes2d is not None:
_mlvl_bboxes2d = mlvl_bboxes2d[cls_inds]
bboxes2d.append(_mlvl_bboxes2d[selected])
if bboxes:
bboxes = torch.cat(bboxes, dim=0)
scores = torch.cat(scores, dim=0)
labels = torch.cat(labels, dim=0)
if mlvl_dir_scores is not None:
dir_scores = torch.cat(dir_scores, dim=0)
if mlvl_attr_scores is not None:
attr_scores = torch.cat(attr_scores, dim=0)
if mlvl_bboxes2d is not None:
bboxes2d = torch.cat(bboxes2d, dim=0)
if bboxes.shape[0] > max_num:
_, inds = scores.sort(descending=True)
inds = inds[:max_num]
bboxes = bboxes[inds, :]
labels = labels[inds]
scores = scores[inds]
if mlvl_dir_scores is not None:
dir_scores = dir_scores[inds]
if mlvl_attr_scores is not None:
attr_scores = attr_scores[inds]
if mlvl_bboxes2d is not None:
bboxes2d = bboxes2d[inds]
else:
bboxes = mlvl_scores.new_zeros((0, mlvl_bboxes.size(-1)))
scores = mlvl_scores.new_zeros((0,))
labels = mlvl_scores.new_zeros((0,), dtype=torch.long)
if mlvl_dir_scores is not None:
dir_scores = mlvl_scores.new_zeros((0,))
if mlvl_attr_scores is not None:
attr_scores = mlvl_scores.new_zeros((0,))
if mlvl_bboxes2d is not None:
bboxes2d = mlvl_scores.new_zeros((0, 4))
results = (bboxes, scores, labels)
if mlvl_dir_scores is not None:
results = results + (dir_scores,)
if mlvl_attr_scores is not None:
results = results + (attr_scores,)
if mlvl_bboxes2d is not None:
results = results + (bboxes2d,)
return results | Multi-class nms for 3D boxes. Args: mlvl_bboxes (torch.Tensor): Multi-level boxes with shape (N, M). M is the dimensions of boxes. mlvl_bboxes_for_nms (torch.Tensor): Multi-level boxes with shape (N, 5) ([x1, y1, x2, y2, ry]). N is the number of boxes. mlvl_scores (torch.Tensor): Multi-level boxes with shape (N, C + 1). N is the number of boxes. C is the number of classes. score_thr (float): Score thredhold to filter boxes with low confidence. max_num (int): Maximum number of boxes will be kept. cfg (dict): Configuration dict of NMS. mlvl_dir_scores (torch.Tensor, optional): Multi-level scores of direction classifier. Defaults to None. mlvl_attr_scores (torch.Tensor, optional): Multi-level scores of attribute classifier. Defaults to None. mlvl_bboxes2d (torch.Tensor, optional): Multi-level 2D bounding boxes. Defaults to None. Returns: tuple[torch.Tensor]: Return results after nms, including 3D \ bounding boxes, scores, labels, direction scores, attribute \ scores (optional) and 2D bounding boxes (optional). |
16,750 | import numba
import numpy as np
import torch
from mmdet3d.ops.iou3d.iou3d_utils import nms_gpu, nms_normal_gpu
The provided code snippet includes necessary dependencies for implementing the `aligned_3d_nms` function. Write a Python function `def aligned_3d_nms(boxes, scores, classes, thresh)` to solve the following problem:
3d nms for aligned boxes. Args: boxes (torch.Tensor): Aligned box with shape [n, 6]. scores (torch.Tensor): Scores of each box. classes (torch.Tensor): Class of each box. thresh (float): Iou threshold for nms. Returns: torch.Tensor: Indices of selected boxes.
Here is the function:
def aligned_3d_nms(boxes, scores, classes, thresh):
"""3d nms for aligned boxes.
Args:
boxes (torch.Tensor): Aligned box with shape [n, 6].
scores (torch.Tensor): Scores of each box.
classes (torch.Tensor): Class of each box.
thresh (float): Iou threshold for nms.
Returns:
torch.Tensor: Indices of selected boxes.
"""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
z1 = boxes[:, 2]
x2 = boxes[:, 3]
y2 = boxes[:, 4]
z2 = boxes[:, 5]
area = (x2 - x1) * (y2 - y1) * (z2 - z1)
zero = boxes.new_zeros(
1,
)
score_sorted = torch.argsort(scores)
pick = []
while score_sorted.shape[0] != 0:
last = score_sorted.shape[0]
i = score_sorted[-1]
pick.append(i)
xx1 = torch.max(x1[i], x1[score_sorted[: last - 1]])
yy1 = torch.max(y1[i], y1[score_sorted[: last - 1]])
zz1 = torch.max(z1[i], z1[score_sorted[: last - 1]])
xx2 = torch.min(x2[i], x2[score_sorted[: last - 1]])
yy2 = torch.min(y2[i], y2[score_sorted[: last - 1]])
zz2 = torch.min(z2[i], z2[score_sorted[: last - 1]])
classes1 = classes[i]
classes2 = classes[score_sorted[: last - 1]]
inter_l = torch.max(zero, xx2 - xx1)
inter_w = torch.max(zero, yy2 - yy1)
inter_h = torch.max(zero, zz2 - zz1)
inter = inter_l * inter_w * inter_h
iou = inter / (area[i] + area[score_sorted[: last - 1]] - inter)
iou = iou * (classes1 == classes2).float()
score_sorted = score_sorted[torch.nonzero(iou <= thresh, as_tuple=False).flatten()]
indices = boxes.new_tensor(pick, dtype=torch.long)
return indices | 3d nms for aligned boxes. Args: boxes (torch.Tensor): Aligned box with shape [n, 6]. scores (torch.Tensor): Scores of each box. classes (torch.Tensor): Class of each box. thresh (float): Iou threshold for nms. Returns: torch.Tensor: Indices of selected boxes. |
16,751 | import numba
import numpy as np
import torch
from mmdet3d.ops.iou3d.iou3d_utils import nms_gpu, nms_normal_gpu
The provided code snippet includes necessary dependencies for implementing the `circle_nms` function. Write a Python function `def circle_nms(dets, thresh, post_max_size=83)` to solve the following problem:
Circular NMS. An object is only counted as positive if no other center with a higher confidence exists within a radius r using a bird-eye view distance metric. Args: dets (torch.Tensor): Detection results with the shape of [N, 3]. thresh (float): Value of threshold. post_max_size (int): Max number of prediction to be kept. Defaults to 83 Returns: torch.Tensor: Indexes of the detections to be kept.
Here is the function:
def circle_nms(dets, thresh, post_max_size=83):
"""Circular NMS.
An object is only counted as positive if no other center
with a higher confidence exists within a radius r using a
bird-eye view distance metric.
Args:
dets (torch.Tensor): Detection results with the shape of [N, 3].
thresh (float): Value of threshold.
post_max_size (int): Max number of prediction to be kept. Defaults
to 83
Returns:
torch.Tensor: Indexes of the detections to be kept.
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
scores = dets[:, 2]
order = scores.argsort()[::-1].astype(np.int32) # highest->lowest
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int32)
keep = []
for _i in range(ndets):
i = order[_i] # start with highest score box
if suppressed[i] == 1: # if any box have enough iou with this, remove it
continue
keep.append(i)
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
# calculate center distance between i and j box
dist = (x1[i] - x1[j]) ** 2 + (y1[i] - y1[j]) ** 2
# ovr = inter / areas[j]
if dist <= thresh:
suppressed[j] = 1
return keep[:post_max_size] | Circular NMS. An object is only counted as positive if no other center with a higher confidence exists within a radius r using a bird-eye view distance metric. Args: dets (torch.Tensor): Detection results with the shape of [N, 3]. thresh (float): Value of threshold. post_max_size (int): Max number of prediction to be kept. Defaults to 83 Returns: torch.Tensor: Indexes of the detections to be kept. |
16,752 | import copy
import os
from typing import List, Optional, Tuple
import cv2
import mmcv
import numpy as np
from matplotlib import pyplot as plt
from ..bbox import LiDARInstance3DBoxes
OBJECT_PALETTE = {
"car": (255, 158, 0),
"truck": (255, 99, 71),
"construction_vehicle": (233, 150, 70),
"bus": (255, 69, 0),
"trailer": (255, 140, 0),
"barrier": (112, 128, 144),
"motorcycle": (255, 61, 99),
"bicycle": (220, 20, 60),
"pedestrian": (0, 0, 230),
"traffic_cone": (47, 79, 79),
}
def visualize_camera(
fpath: str,
image: np.ndarray,
*,
bboxes: Optional[LiDARInstance3DBoxes] = None,
labels: Optional[np.ndarray] = None,
transform: Optional[np.ndarray] = None,
classes: Optional[List[str]] = None,
color: Optional[Tuple[int, int, int]] = None,
thickness: float = 4,
) -> None:
canvas = image.copy()
canvas = cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR)
if bboxes is not None and len(bboxes) > 0:
corners = bboxes.corners
num_bboxes = corners.shape[0]
coords = np.concatenate(
[corners.reshape(-1, 3), np.ones((num_bboxes * 8, 1))], axis=-1
)
transform = copy.deepcopy(transform).reshape(4, 4)
coords = coords @ transform.T
coords = coords.reshape(-1, 8, 4)
indices = np.all(coords[..., 2] > 0, axis=1)
coords = coords[indices]
labels = labels[indices]
indices = np.argsort(-np.min(coords[..., 2], axis=1))
coords = coords[indices]
labels = labels[indices]
coords = coords.reshape(-1, 4)
coords[:, 2] = np.clip(coords[:, 2], a_min=1e-5, a_max=1e5)
coords[:, 0] /= coords[:, 2]
coords[:, 1] /= coords[:, 2]
coords = coords[..., :2].reshape(-1, 8, 2)
for index in range(coords.shape[0]):
name = classes[labels[index]]
for start, end in [
(0, 1),
(0, 3),
(0, 4),
(1, 2),
(1, 5),
(3, 2),
(3, 7),
(4, 5),
(4, 7),
(2, 6),
(5, 6),
(6, 7),
]:
cv2.line(
canvas,
coords[index, start].astype(np.int),
coords[index, end].astype(np.int),
color or OBJECT_PALETTE[name],
thickness,
cv2.LINE_AA,
)
canvas = canvas.astype(np.uint8)
canvas = cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB)
mmcv.mkdir_or_exist(os.path.dirname(fpath))
mmcv.imwrite(canvas, fpath) | null |
16,753 | import copy
import os
from typing import List, Optional, Tuple
import cv2
import mmcv
import numpy as np
from matplotlib import pyplot as plt
from ..bbox import LiDARInstance3DBoxes
OBJECT_PALETTE = {
"car": (255, 158, 0),
"truck": (255, 99, 71),
"construction_vehicle": (233, 150, 70),
"bus": (255, 69, 0),
"trailer": (255, 140, 0),
"barrier": (112, 128, 144),
"motorcycle": (255, 61, 99),
"bicycle": (220, 20, 60),
"pedestrian": (0, 0, 230),
"traffic_cone": (47, 79, 79),
}
def visualize_lidar(
fpath: str,
lidar: Optional[np.ndarray] = None,
*,
bboxes: Optional[LiDARInstance3DBoxes] = None,
labels: Optional[np.ndarray] = None,
classes: Optional[List[str]] = None,
xlim: Tuple[float, float] = (-50, 50),
ylim: Tuple[float, float] = (-50, 50),
color: Optional[Tuple[int, int, int]] = None,
radius: float = 15,
thickness: float = 25,
) -> None:
fig = plt.figure(figsize=(xlim[1] - xlim[0], ylim[1] - ylim[0]))
ax = plt.gca()
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
ax.set_aspect(1)
ax.set_axis_off()
if lidar is not None:
plt.scatter(
lidar[:, 0],
lidar[:, 1],
s=radius,
c="white",
)
if bboxes is not None and len(bboxes) > 0:
coords = bboxes.corners[:, [0, 3, 7, 4, 0], :2]
for index in range(coords.shape[0]):
name = classes[labels[index]]
plt.plot(
coords[index, :, 0],
coords[index, :, 1],
linewidth=thickness,
color=np.array(color or OBJECT_PALETTE[name]) / 255,
)
mmcv.mkdir_or_exist(os.path.dirname(fpath))
fig.savefig(
fpath,
dpi=10,
facecolor="black",
format="png",
bbox_inches="tight",
pad_inches=0,
)
plt.close() | null |
16,754 | import copy
import os
from typing import List, Optional, Tuple
import cv2
import mmcv
import numpy as np
from matplotlib import pyplot as plt
from ..bbox import LiDARInstance3DBoxes
MAP_PALETTE = {
"drivable_area": (166, 206, 227),
"road_segment": (31, 120, 180),
"road_block": (178, 223, 138),
"lane": (51, 160, 44),
"ped_crossing": (251, 154, 153),
"walkway": (227, 26, 28),
"stop_line": (253, 191, 111),
"carpark_area": (255, 127, 0),
"road_divider": (202, 178, 214),
"lane_divider": (106, 61, 154),
"divider": (106, 61, 154),
}
def visualize_map(
fpath: str,
masks: np.ndarray,
*,
classes: List[str],
background: Tuple[int, int, int] = (240, 240, 240),
) -> None:
assert masks.dtype == np.bool, masks.dtype
canvas = np.zeros((*masks.shape[-2:], 3), dtype=np.uint8)
canvas[:] = background
for k, name in enumerate(classes):
if name in MAP_PALETTE:
canvas[masks[k], :] = MAP_PALETTE[name]
canvas = cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR)
mmcv.mkdir_or_exist(os.path.dirname(fpath))
mmcv.imwrite(canvas, fpath) | null |
16,755 | import numpy as np
import torch
def gaussian_2d(shape, sigma=1):
"""Generate gaussian map.
Args:
shape (list[int]): Shape of the map.
sigma (float): Sigma to generate gaussian map.
Defaults to 1.
Returns:
np.ndarray: Generated gaussian map.
"""
m, n = [(ss - 1.0) / 2.0 for ss in shape]
y, x = np.ogrid[-m : m + 1, -n : n + 1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
The provided code snippet includes necessary dependencies for implementing the `draw_heatmap_gaussian` function. Write a Python function `def draw_heatmap_gaussian(heatmap, center, radius, k=1)` to solve the following problem:
Get gaussian masked heatmap. Args: heatmap (torch.Tensor): Heatmap to be masked. center (torch.Tensor): Center coord of the heatmap. radius (int): Radius of gausian. K (int): Multiple of masked_gaussian. Defaults to 1. Returns: torch.Tensor: Masked heatmap.
Here is the function:
def draw_heatmap_gaussian(heatmap, center, radius, k=1):
"""Get gaussian masked heatmap.
Args:
heatmap (torch.Tensor): Heatmap to be masked.
center (torch.Tensor): Center coord of the heatmap.
radius (int): Radius of gausian.
K (int): Multiple of masked_gaussian. Defaults to 1.
Returns:
torch.Tensor: Masked heatmap.
"""
diameter = 2 * radius + 1
gaussian = gaussian_2d((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top : y + bottom, x - left : x + right]
masked_gaussian = torch.from_numpy(
gaussian[radius - top : radius + bottom, radius - left : radius + right]
).to(heatmap.device, torch.float32)
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap | Get gaussian masked heatmap. Args: heatmap (torch.Tensor): Heatmap to be masked. center (torch.Tensor): Center coord of the heatmap. radius (int): Radius of gausian. K (int): Multiple of masked_gaussian. Defaults to 1. Returns: torch.Tensor: Masked heatmap. |
16,756 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `gaussian_radius` function. Write a Python function `def gaussian_radius(det_size, min_overlap=0.5)` to solve the following problem:
Get radius of gaussian. Args: det_size (tuple[torch.Tensor]): Size of the detection result. min_overlap (float): Gaussian_overlap. Defaults to 0.5. Returns: torch.Tensor: Computed radius.
Here is the function:
def gaussian_radius(det_size, min_overlap=0.5):
"""Get radius of gaussian.
Args:
det_size (tuple[torch.Tensor]): Size of the detection result.
min_overlap (float): Gaussian_overlap. Defaults to 0.5.
Returns:
torch.Tensor: Computed radius.
"""
height, width = det_size
a1 = 1
b1 = height + width
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = torch.sqrt(b1**2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = torch.sqrt(b2**2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = torch.sqrt(b3**2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3) | Get radius of gaussian. Args: det_size (tuple[torch.Tensor]): Size of the detection result. min_overlap (float): Gaussian_overlap. Defaults to 0.5. Returns: torch.Tensor: Computed radius. |
16,757 | from mmcv.cnn import build_conv_layer, build_norm_layer
from torch import nn
from mmdet3d.ops import spconv
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
The provided code snippet includes necessary dependencies for implementing the `make_sparse_convmodule` function. Write a Python function `def make_sparse_convmodule( in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0, conv_type="SubMConv3d", norm_cfg=None, order=("conv", "norm", "act"), )` to solve the following problem:
Make sparse convolution module. Args: in_channels (int): the number of input channels out_channels (int): the number of out channels kernel_size (int|tuple(int)): kernel size of convolution indice_key (str): the indice key used for sparse tensor stride (int|tuple(int)): the stride of convolution padding (int or list[int]): the padding number of input conv_type (str): sparse conv type in spconv norm_cfg (dict[str]): config of normalization layer order (tuple[str]): The order of conv/norm/activation layers. It is a sequence of "conv", "norm" and "act". Common examples are ("conv", "norm", "act") and ("act", "conv", "norm"). Returns: spconv.SparseSequential: sparse convolution module.
Here is the function:
def make_sparse_convmodule(
in_channels,
out_channels,
kernel_size,
indice_key,
stride=1,
padding=0,
conv_type="SubMConv3d",
norm_cfg=None,
order=("conv", "norm", "act"),
):
"""Make sparse convolution module.
Args:
in_channels (int): the number of input channels
out_channels (int): the number of out channels
kernel_size (int|tuple(int)): kernel size of convolution
indice_key (str): the indice key used for sparse tensor
stride (int|tuple(int)): the stride of convolution
padding (int or list[int]): the padding number of input
conv_type (str): sparse conv type in spconv
norm_cfg (dict[str]): config of normalization layer
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Common examples are
("conv", "norm", "act") and ("act", "conv", "norm").
Returns:
spconv.SparseSequential: sparse convolution module.
"""
assert isinstance(order, tuple) and len(order) <= 3
assert set(order) | {"conv", "norm", "act"} == {"conv", "norm", "act"}
conv_cfg = dict(type=conv_type, indice_key=indice_key)
layers = list()
for layer in order:
if layer == "conv":
if conv_type not in [
"SparseInverseConv3d",
"SparseInverseConv2d",
"SparseInverseConv1d",
]:
layers.append(
build_conv_layer(
conv_cfg,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
)
)
else:
layers.append(
build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, bias=False)
)
elif layer == "norm":
layers.append(build_norm_layer(norm_cfg, out_channels)[1])
elif layer == "act":
layers.append(nn.ReLU(inplace=True))
layers = spconv.SparseSequential(*layers)
return layers | Make sparse convolution module. Args: in_channels (int): the number of input channels out_channels (int): the number of out channels kernel_size (int|tuple(int)): kernel size of convolution indice_key (str): the indice key used for sparse tensor stride (int|tuple(int)): the stride of convolution padding (int or list[int]): the padding number of input conv_type (str): sparse conv type in spconv norm_cfg (dict[str]): config of normalization layer order (tuple[str]): The order of conv/norm/activation layers. It is a sequence of "conv", "norm" and "act". Common examples are ("conv", "norm", "act") and ("act", "conv", "norm"). Returns: spconv.SparseSequential: sparse convolution module. |
16,758 | import torch
from . import bev_pool_ext
class QuickCumsumCuda(torch.autograd.Function):
def forward(ctx, x, geom_feats, ranks, B, D, H, W):
kept = torch.ones(x.shape[0], device=x.device, dtype=torch.bool)
kept[1:] = ranks[1:] != ranks[:-1]
interval_starts = torch.where(kept)[0].int()
interval_lengths = torch.zeros_like(interval_starts)
interval_lengths[:-1] = interval_starts[1:] - interval_starts[:-1]
interval_lengths[-1] = x.shape[0] - interval_starts[-1]
geom_feats = geom_feats.int()
out = bev_pool_ext.bev_pool_forward(
x,
geom_feats,
interval_lengths,
interval_starts,
B,
D,
H,
W,
)
ctx.save_for_backward(interval_starts, interval_lengths, geom_feats)
ctx.saved_shapes = B, D, H, W
return out
def backward(ctx, out_grad):
interval_starts, interval_lengths, geom_feats = ctx.saved_tensors
B, D, H, W = ctx.saved_shapes
out_grad = out_grad.contiguous()
x_grad = bev_pool_ext.bev_pool_backward(
out_grad,
geom_feats,
interval_lengths,
interval_starts,
B,
D,
H,
W,
)
return x_grad, None, None, None, None, None, None
def bev_pool(feats, coords, B, D, H, W):
assert feats.shape[0] == coords.shape[0]
ranks = (
coords[:, 0] * (W * D * B)
+ coords[:, 1] * (D * B)
+ coords[:, 2] * B
+ coords[:, 3]
)
indices = ranks.argsort()
feats, coords, ranks = feats[indices], coords[indices], ranks[indices]
x = QuickCumsumCuda.apply(feats, coords, ranks, B, D, H, W)
x = x.permute(0, 4, 1, 2, 3).contiguous()
return x | null |
16,759 | import torch
The provided code snippet includes necessary dependencies for implementing the `calc_square_dist` function. Write a Python function `def calc_square_dist(point_feat_a, point_feat_b, norm=True)` to solve the following problem:
Calculating square distance between a and b. Args: point_feat_a (Tensor): (B, N, C) Feature vector of each point. point_feat_b (Tensor): (B, M, C) Feature vector of each point. norm (Bool): Whether to normalize the distance. Default: True. Returns: Tensor: (B, N, M) Distance between each pair points.
Here is the function:
def calc_square_dist(point_feat_a, point_feat_b, norm=True):
"""Calculating square distance between a and b.
Args:
point_feat_a (Tensor): (B, N, C) Feature vector of each point.
point_feat_b (Tensor): (B, M, C) Feature vector of each point.
norm (Bool): Whether to normalize the distance.
Default: True.
Returns:
Tensor: (B, N, M) Distance between each pair points.
"""
length_a = point_feat_a.shape[1]
length_b = point_feat_b.shape[1]
num_channel = point_feat_a.shape[-1]
# [bs, n, 1]
a_square = torch.sum(point_feat_a.unsqueeze(dim=2).pow(2), dim=-1)
# [bs, 1, m]
b_square = torch.sum(point_feat_b.unsqueeze(dim=1).pow(2), dim=-1)
a_square = a_square.repeat((1, 1, length_b)) # [bs, n, m]
b_square = b_square.repeat((1, length_a, 1)) # [bs, n, m]
coor = torch.matmul(point_feat_a, point_feat_b.transpose(1, 2))
dist = a_square + b_square - 2 * coor
if norm:
dist = torch.sqrt(dist) / num_channel
return dist | Calculating square distance between a and b. Args: point_feat_a (Tensor): (B, N, C) Feature vector of each point. point_feat_b (Tensor): (B, M, C) Feature vector of each point. norm (Bool): Whether to normalize the distance. Default: True. Returns: Tensor: (B, N, M) Distance between each pair points. |
16,760 | import torch
from mmcv.runner import force_fp32
from torch import nn as nn
from typing import List
from .furthest_point_sample import furthest_point_sample, furthest_point_sample_with_dist
from .utils import calc_square_dist
class DFPS_Sampler(nn.Module):
"""DFPS_Sampling.
Using Euclidean distances of points for FPS.
"""
def __init__(self):
super(DFPS_Sampler, self).__init__()
def forward(self, points, features, npoint):
"""Sampling points with D-FPS."""
fps_idx = furthest_point_sample(points.contiguous(), npoint)
return fps_idx
class FFPS_Sampler(nn.Module):
"""FFPS_Sampler.
Using feature distances for FPS.
"""
def __init__(self):
super(FFPS_Sampler, self).__init__()
def forward(self, points, features, npoint):
"""Sampling points with F-FPS."""
assert features is not None, "feature input to FFPS_Sampler should not be None"
features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)
features_dist = calc_square_dist(features_for_fps, features_for_fps, norm=False)
fps_idx = furthest_point_sample_with_dist(features_dist, npoint)
return fps_idx
class FS_Sampler(nn.Module):
"""FS_Sampling.
Using F-FPS and D-FPS simultaneously.
"""
def __init__(self):
super(FS_Sampler, self).__init__()
def forward(self, points, features, npoint):
"""Sampling points with FS_Sampling."""
assert features is not None, "feature input to FS_Sampler should not be None"
features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)
features_dist = calc_square_dist(features_for_fps, features_for_fps, norm=False)
fps_idx_ffps = furthest_point_sample_with_dist(features_dist, npoint)
fps_idx_dfps = furthest_point_sample(points, npoint)
fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1)
return fps_idx
The provided code snippet includes necessary dependencies for implementing the `get_sampler_type` function. Write a Python function `def get_sampler_type(sampler_type)` to solve the following problem:
Get the type and mode of points sampler. Args: sampler_type (str): The type of points sampler. The valid value are "D-FPS", "F-FPS", or "FS". Returns: class: Points sampler type.
Here is the function:
def get_sampler_type(sampler_type):
"""Get the type and mode of points sampler.
Args:
sampler_type (str): The type of points sampler.
The valid value are "D-FPS", "F-FPS", or "FS".
Returns:
class: Points sampler type.
"""
if sampler_type == "D-FPS":
sampler = DFPS_Sampler
elif sampler_type == "F-FPS":
sampler = FFPS_Sampler
elif sampler_type == "FS":
sampler = FS_Sampler
else:
raise ValueError(
'Only "sampler_type" of "D-FPS", "F-FPS", or "FS"' f" are supported, got {sampler_type}"
)
return sampler | Get the type and mode of points sampler. Args: sampler_type (str): The type of points sampler. The valid value are "D-FPS", "F-FPS", or "FS". Returns: class: Points sampler type. |
16,761 | import torch
The provided code snippet includes necessary dependencies for implementing the `calc_euclidian_dist` function. Write a Python function `def calc_euclidian_dist(xyz1, xyz2)` to solve the following problem:
Calculate the Euclidian distance between two sets of points. Args: xyz1 (torch.Tensor): (N, 3), the first set of points. xyz2 (torch.Tensor): (N, 3), the second set of points. Returns: torch.Tensor: (N, ), the Euclidian distance between each point pair.
Here is the function:
def calc_euclidian_dist(xyz1, xyz2):
"""Calculate the Euclidian distance between two sets of points.
Args:
xyz1 (torch.Tensor): (N, 3), the first set of points.
xyz2 (torch.Tensor): (N, 3), the second set of points.
Returns:
torch.Tensor: (N, ), the Euclidian distance between each point pair.
"""
assert xyz1.shape[0] == xyz2.shape[0], "number of points are not the same"
assert xyz1.shape[1] == xyz2.shape[1] == 3, "points coordinates dimension is not 3"
return torch.norm(xyz1 - xyz2, dim=-1) | Calculate the Euclidian distance between two sets of points. Args: xyz1 (torch.Tensor): (N, 3), the first set of points. xyz2 (torch.Tensor): (N, 3), the second set of points. Returns: torch.Tensor: (N, ), the Euclidian distance between each point pair. |
16,762 | import torch
The provided code snippet includes necessary dependencies for implementing the `assign_score` function. Write a Python function `def assign_score(scores, point_features)` to solve the following problem:
Perform weighted sum to aggregate output features according to scores. This function is used in non-CUDA version of PAConv. Compared to the cuda op assigh_score_withk, this pytorch implementation pre-computes output features for the neighbors of all centers, and then performs aggregation. It consumes more GPU memories. Args: scores (torch.Tensor): (B, npoint, K, M), predicted scores to aggregate weight matrices in the weight bank. `npoint` is the number of sampled centers. `K` is the number of queried neighbors. `M` is the number of weight matrices in the weight bank. point_features (torch.Tensor): (B, npoint, K, M, out_dim) Pre-computed point features to be aggregated. Returns: torch.Tensor: (B, npoint, K, out_dim), the aggregated features.
Here is the function:
def assign_score(scores, point_features):
"""Perform weighted sum to aggregate output features according to scores.
This function is used in non-CUDA version of PAConv.
Compared to the cuda op assigh_score_withk, this pytorch implementation
pre-computes output features for the neighbors of all centers, and then
performs aggregation. It consumes more GPU memories.
Args:
scores (torch.Tensor): (B, npoint, K, M), predicted scores to
aggregate weight matrices in the weight bank.
`npoint` is the number of sampled centers.
`K` is the number of queried neighbors.
`M` is the number of weight matrices in the weight bank.
point_features (torch.Tensor): (B, npoint, K, M, out_dim)
Pre-computed point features to be aggregated.
Returns:
torch.Tensor: (B, npoint, K, out_dim), the aggregated features.
"""
B, npoint, K, M = scores.size()
scores = scores.view(B, npoint, K, 1, M)
output = torch.matmul(scores, point_features).view(B, npoint, K, -1)
return output | Perform weighted sum to aggregate output features according to scores. This function is used in non-CUDA version of PAConv. Compared to the cuda op assigh_score_withk, this pytorch implementation pre-computes output features for the neighbors of all centers, and then performs aggregation. It consumes more GPU memories. Args: scores (torch.Tensor): (B, npoint, K, M), predicted scores to aggregate weight matrices in the weight bank. `npoint` is the number of sampled centers. `K` is the number of queried neighbors. `M` is the number of weight matrices in the weight bank. point_features (torch.Tensor): (B, npoint, K, M, out_dim) Pre-computed point features to be aggregated. Returns: torch.Tensor: (B, npoint, K, out_dim), the aggregated features. |
16,763 | import torch
The provided code snippet includes necessary dependencies for implementing the `assign_kernel_withoutk` function. Write a Python function `def assign_kernel_withoutk(features, kernels, M)` to solve the following problem:
Pre-compute features with weight matrices in weight bank. This function is used before cuda op assign_score_withk in CUDA version PAConv. Args: features (torch.Tensor): (B, in_dim, N), input features of all points. `N` is the number of points in current point cloud. kernels (torch.Tensor): (2 * in_dim, M * out_dim), weight matrices in the weight bank, transformed from (M, 2 * in_dim, out_dim). `2 * in_dim` is because the input features are concatenation of (point_features - center_features, point_features). M (int): Number of weight matrices in the weight bank. Returns: Tuple[torch.Tensor]: both of shape (B, N, M, out_dim): - point_features: Pre-computed features for points. - center_features: Pre-computed features for centers.
Here is the function:
def assign_kernel_withoutk(features, kernels, M):
"""Pre-compute features with weight matrices in weight bank. This function
is used before cuda op assign_score_withk in CUDA version PAConv.
Args:
features (torch.Tensor): (B, in_dim, N), input features of all points.
`N` is the number of points in current point cloud.
kernels (torch.Tensor): (2 * in_dim, M * out_dim), weight matrices in
the weight bank, transformed from (M, 2 * in_dim, out_dim).
`2 * in_dim` is because the input features are concatenation of
(point_features - center_features, point_features).
M (int): Number of weight matrices in the weight bank.
Returns:
Tuple[torch.Tensor]: both of shape (B, N, M, out_dim):
- point_features: Pre-computed features for points.
- center_features: Pre-computed features for centers.
"""
B, in_dim, N = features.size()
feat_trans = features.permute(0, 2, 1) # [B, N, in_dim]
out_feat_half1 = torch.matmul(feat_trans, kernels[:in_dim]).view(
B, N, M, -1
) # [B, N, M, out_dim]
out_feat_half2 = torch.matmul(feat_trans, kernels[in_dim:]).view(
B, N, M, -1
) # [B, N, M, out_dim]
# TODO: why this hard-coded if condition?
# when the network input is only xyz without additional features
# xyz will be used as features, so that features.size(1) == 3 % 2 != 0
# we need to compensate center_features because otherwise
# `point_features - center_features` will result in all zeros?
if features.size(1) % 2 != 0:
out_feat_half_coord = torch.matmul(
feat_trans[:, :, :3], kernels[in_dim : in_dim + 3] # [B, N, 3]
).view(
B, N, M, -1
) # [B, N, M, out_dim]
else:
out_feat_half_coord = torch.zeros_like(out_feat_half2)
point_features = out_feat_half1 + out_feat_half2
center_features = out_feat_half1 + out_feat_half_coord
return point_features, center_features | Pre-compute features with weight matrices in weight bank. This function is used before cuda op assign_score_withk in CUDA version PAConv. Args: features (torch.Tensor): (B, in_dim, N), input features of all points. `N` is the number of points in current point cloud. kernels (torch.Tensor): (2 * in_dim, M * out_dim), weight matrices in the weight bank, transformed from (M, 2 * in_dim, out_dim). `2 * in_dim` is because the input features are concatenation of (point_features - center_features, point_features). M (int): Number of weight matrices in the weight bank. Returns: Tuple[torch.Tensor]: both of shape (B, N, M, out_dim): - point_features: Pre-computed features for points. - center_features: Pre-computed features for centers. |
16,764 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `scatter_nd` function. Write a Python function `def scatter_nd(indices, updates, shape)` to solve the following problem:
pytorch edition of tensorflow scatter_nd. this function don't contain except handle code. so use this carefully when indice repeats, don't support repeat add which is supported in tensorflow.
Here is the function:
def scatter_nd(indices, updates, shape):
"""pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully when
indice repeats, don't support repeat add which is supported in tensorflow.
"""
ret = torch.zeros(*shape, dtype=updates.dtype, device=updates.device)
ndim = indices.shape[-1]
output_shape = list(indices.shape[:-1]) + shape[indices.shape[-1] :]
flatted_indices = indices.view(-1, ndim)
slices = [flatted_indices[:, i] for i in range(ndim)]
slices += [Ellipsis]
ret[slices] = updates.view(*output_shape)
return ret | pytorch edition of tensorflow scatter_nd. this function don't contain except handle code. so use this carefully when indice repeats, don't support repeat add which is supported in tensorflow. |
16,765 | import math
import numpy as np
import torch
from mmcv.cnn import CONV_LAYERS
from torch.nn import init
from torch.nn.parameter import Parameter
from . import functional as Fsp
from . import ops
from .modules import SparseModule
from .structure import SparseConvTensor
def _calculate_fan_in_and_fan_out_hwio(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError(
"fan in and fan out can not be computed for tensor" "with fewer than 2 dimensions"
)
if dimensions == 2: # Linear
fan_in = tensor.size(-2)
fan_out = tensor.size(-1)
else:
num_input_fmaps = tensor.size(-2)
num_output_fmaps = tensor.size(-1)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[..., 0, 0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out | null |
16,766 | import sys
import torch
from collections import OrderedDict
from torch import nn
from .structure import SparseConvTensor
class SparseModule(nn.Module):
"""place holder, All module subclass from this will take sptensor in
SparseSequential."""
pass
def is_spconv_module(module):
spconv_modules = (SparseModule,)
return isinstance(module, spconv_modules) | null |
16,767 | import sys
import torch
from collections import OrderedDict
from torch import nn
from .structure import SparseConvTensor
class SparseConvolution(SparseModule):
def __init__(
self,
ndim,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
subm=False,
output_padding=0,
transposed=False,
inverse=False,
indice_key=None,
fused_bn=False,
):
super(SparseConvolution, self).__init__()
assert groups == 1
if not isinstance(kernel_size, (list, tuple)):
kernel_size = [kernel_size] * ndim
if not isinstance(stride, (list, tuple)):
stride = [stride] * ndim
if not isinstance(padding, (list, tuple)):
padding = [padding] * ndim
if not isinstance(dilation, (list, tuple)):
dilation = [dilation] * ndim
if not isinstance(output_padding, (list, tuple)):
output_padding = [output_padding] * ndim
for d, s in zip(dilation, stride):
assert any([s == 1, d == 1]), "don't support this."
self.ndim = ndim
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.conv1x1 = np.prod(kernel_size) == 1
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.inverse = inverse
self.output_padding = output_padding
self.groups = groups
self.subm = subm
self.indice_key = indice_key
self.fused_bn = fused_bn
self.weight = Parameter(torch.Tensor(*kernel_size, in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = _calculate_fan_in_and_fan_out_hwio(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
assert isinstance(input, SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
if not self.subm:
if self.transposed:
out_spatial_shape = ops.get_deconv_output_size(
spatial_shape,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.output_padding,
)
else:
out_spatial_shape = ops.get_conv_output_size(
spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation
)
else:
out_spatial_shape = spatial_shape
# input.update_grid(out_spatial_shape)
# t = time.time()
if self.conv1x1:
features = torch.mm(
input.features, self.weight.view(self.in_channels, self.out_channels)
)
if self.bias is not None:
features += self.bias
out_tensor = SparseConvTensor(
features, input.indices, input.spatial_shape, input.batch_size
)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
datas = input.find_indice_pair(self.indice_key)
if self.inverse:
assert datas is not None and self.indice_key is not None
_, outids, indice_pairs, indice_pair_num, out_spatial_shape = datas
assert indice_pairs.shape[0] == np.prod(
self.kernel_size
), "inverse conv must have same kernel size as its couple conv"
else:
if self.indice_key is not None and datas is not None:
outids, _, indice_pairs, indice_pair_num, _ = datas
else:
outids, indice_pairs, indice_pair_num = ops.get_indice_pairs(
indices,
batch_size,
spatial_shape,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.output_padding,
self.subm,
self.transposed,
grid=input.grid,
)
input.indice_dict[self.indice_key] = (
outids,
indices,
indice_pairs,
indice_pair_num,
spatial_shape,
)
if self.fused_bn:
assert self.bias is not None
out_features = ops.fused_indice_conv(
features,
self.weight,
self.bias,
indice_pairs.to(device),
indice_pair_num,
outids.shape[0],
self.inverse,
self.subm,
)
else:
if self.subm:
out_features = Fsp.indice_subm_conv(
features, self.weight, indice_pairs.to(device), indice_pair_num, outids.shape[0]
)
else:
if self.inverse:
out_features = Fsp.indice_inverse_conv(
features,
self.weight,
indice_pairs.to(device),
indice_pair_num,
outids.shape[0],
)
else:
out_features = Fsp.indice_conv(
features,
self.weight,
indice_pairs.to(device),
indice_pair_num,
outids.shape[0],
)
if self.bias is not None:
out_features += self.bias
out_tensor = SparseConvTensor(out_features, outids, out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
def is_sparse_conv(module):
from .conv import SparseConvolution
return isinstance(module, SparseConvolution) | null |
16,768 | import sys
import torch
from collections import OrderedDict
from torch import nn
from .structure import SparseConvTensor
def _mean_update(vals, m_vals, t):
outputs = []
if not isinstance(vals, list):
vals = [vals]
if not isinstance(m_vals, list):
m_vals = [m_vals]
for val, m_val in zip(vals, m_vals):
output = t / float(t + 1) * m_val + 1 / float(t + 1) * val
outputs.append(output)
if len(outputs) == 1:
outputs = outputs[0]
return outputs | null |
16,769 | import torch
from . import sparse_conv_ext
def get_conv_output_size(input_size, kernel_size, stride, padding, dilation):
ndim = len(input_size)
output_size = []
for i in range(ndim):
size = (input_size[i] + 2 * padding[i] - dilation[i] * (kernel_size[i] - 1) - 1) // stride[
i
] + 1
if kernel_size[i] == -1:
output_size.append(1)
else:
output_size.append(size)
return output_size
def get_deconv_output_size(input_size, kernel_size, stride, padding, dilation, output_padding):
ndim = len(input_size)
output_size = []
for i in range(ndim):
if kernel_size[i] == -1:
raise ValueError("deconv don't support kernel_size < 0")
size = (input_size[i] - 1) * stride[i] - 2 * padding[i] + kernel_size[i] + output_padding[i]
output_size.append(size)
return output_size
def get_indice_pairs(
indices,
batch_size,
spatial_shape,
ksize=3,
stride=1,
padding=0,
dilation=1,
out_padding=0,
subm=False,
transpose=False,
grid=None,
):
ndim = indices.shape[1] - 1
if not isinstance(ksize, (list, tuple)):
ksize = [ksize] * ndim
if not isinstance(stride, (list, tuple)):
stride = [stride] * ndim
if not isinstance(padding, (list, tuple)):
padding = [padding] * ndim
if not isinstance(dilation, (list, tuple)):
dilation = [dilation] * ndim
if not isinstance(out_padding, (list, tuple)):
out_padding = [out_padding] * ndim
for d, s in zip(dilation, stride):
assert any([s == 1, d == 1]), "don't support this."
if not subm:
if transpose:
out_shape = get_deconv_output_size(
spatial_shape, ksize, stride, padding, dilation, out_padding
)
else:
out_shape = get_conv_output_size(spatial_shape, ksize, stride, padding, dilation)
else:
out_shape = spatial_shape
if grid is None:
if ndim == 2:
get_indice_pairs_func = sparse_conv_ext.get_indice_pairs_2d
elif ndim == 3:
get_indice_pairs_func = sparse_conv_ext.get_indice_pairs_3d
elif ndim == 4:
get_indice_pairs_func = sparse_conv_ext.get_indice_pairs_4d
else:
raise NotImplementedError
return get_indice_pairs_func(
indices,
batch_size,
out_shape,
spatial_shape,
ksize,
stride,
padding,
dilation,
out_padding,
int(subm),
int(transpose),
)
else:
if ndim == 2:
get_indice_pairs_func = sparse_conv_ext.get_indice_pairs_grid_2d
elif ndim == 3:
get_indice_pairs_func = sparse_conv_ext.get_indice_pairs_grid_3d
else:
raise NotImplementedError
return get_indice_pairs_func(
indices,
grid,
batch_size,
out_shape,
spatial_shape,
ksize,
stride,
padding,
dilation,
out_padding,
int(subm),
int(transpose),
) | null |
16,770 | import torch
from . import sparse_conv_ext
def indice_conv(
features, filters, indice_pairs, indice_pair_num, num_activate_out, inverse=False, subm=False
):
if filters.dtype == torch.float32:
return sparse_conv_ext.indice_conv_fp32(
features,
filters,
indice_pairs,
indice_pair_num,
num_activate_out,
int(inverse),
int(subm),
)
elif filters.dtype == torch.half:
return sparse_conv_ext.indice_conv_half(
features,
filters,
indice_pairs,
indice_pair_num,
num_activate_out,
int(inverse),
int(subm),
)
else:
raise NotImplementedError | null |
16,771 | import torch
from . import sparse_conv_ext
def fused_indice_conv(
features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, inverse, subm
):
if features.dtype == torch.half:
func = sparse_conv_ext.fused_indice_conv_half
elif filters.dtype == torch.float32:
func = sparse_conv_ext.fused_indice_conv_fp32
else:
raise NotImplementedError
return func(
features,
filters,
bias,
indice_pairs,
indice_pair_num,
num_activate_out,
int(inverse),
int(subm),
) | null |
16,772 | import torch
from . import sparse_conv_ext
def indice_conv_backward(
features, filters, out_bp, indice_pairs, indice_pair_num, inverse=False, subm=False
):
if filters.dtype == torch.float32:
return sparse_conv_ext.indice_conv_backward_fp32(
features, filters, out_bp, indice_pairs, indice_pair_num, int(inverse), int(subm)
)
elif filters.dtype == torch.half:
return sparse_conv_ext.indice_conv_backward_half(
features, filters, out_bp, indice_pairs, indice_pair_num, int(inverse), int(subm)
)
else:
raise NotImplementedError | null |
16,773 | import torch
from . import sparse_conv_ext
def indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out):
if features.dtype == torch.float32:
return sparse_conv_ext.indice_maxpool_fp32(
features, indice_pairs, indice_pair_num, num_activate_out
)
elif features.dtype == torch.half:
return sparse_conv_ext.indice_maxpool_half(
features, indice_pairs, indice_pair_num, num_activate_out
)
else:
raise NotImplementedError | null |
16,774 | import torch
from . import sparse_conv_ext
def indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num):
if features.dtype == torch.float32:
return sparse_conv_ext.indice_maxpool_backward_fp32(
features, out_features, out_bp, indice_pairs, indice_pair_num
)
elif features.dtype == torch.half:
return sparse_conv_ext.indice_maxpool_backward_half(
features, out_features, out_bp, indice_pairs, indice_pair_num
)
else:
raise NotImplementedError | null |
16,775 | import torch
from . import roiaware_pool3d_ext
The provided code snippet includes necessary dependencies for implementing the `points_in_boxes_gpu` function. Write a Python function `def points_in_boxes_gpu(points, boxes)` to solve the following problem:
Find points that are in boxes (CUDA) Args: points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR coordinate boxes (torch.Tensor): [B, T, 7], num_valid_boxes <= T, [x, y, z, w, l, h, ry] in LiDAR coordinate, (x, y, z) is the bottom center Returns: box_idxs_of_pts (torch.Tensor): (B, M), default background = -1
Here is the function:
def points_in_boxes_gpu(points, boxes):
"""Find points that are in boxes (CUDA)
Args:
points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR coordinate
boxes (torch.Tensor): [B, T, 7],
num_valid_boxes <= T, [x, y, z, w, l, h, ry] in LiDAR coordinate,
(x, y, z) is the bottom center
Returns:
box_idxs_of_pts (torch.Tensor): (B, M), default background = -1
"""
assert boxes.shape[0] == points.shape[0], (
f"Points and boxes should have the same batch size, "
f"got {boxes.shape[0]} and {boxes.shape[0]}"
)
assert boxes.shape[2] == 7, (
f"boxes dimension should be 7, " f"got unexpected shape {boxes.shape[2]}"
)
assert points.shape[2] == 3, (
f"points dimension should be 3, " f"got unexpected shape {points.shape[2]}"
)
batch_size, num_points, _ = points.shape
box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(-1)
# If manually put the tensor 'points' or 'boxes' on a device
# which is not the current device, some temporary variables
# will be created on the current device in the cuda op,
# and the output will be incorrect.
# Therefore, we force the current device to be the same
# as the device of the tensors if it was not.
# Please refer to https://github.com/open-mmlab/mmdetection3d/issues/305
# for the incorrect output before the fix.
points_device = points.get_device()
assert points_device == boxes.get_device(), "Points and boxes should be put on the same device"
if torch.cuda.current_device() != points_device:
torch.cuda.set_device(points_device)
roiaware_pool3d_ext.points_in_boxes_gpu(
boxes.contiguous(), points.contiguous(), box_idxs_of_pts
)
return box_idxs_of_pts | Find points that are in boxes (CUDA) Args: points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR coordinate boxes (torch.Tensor): [B, T, 7], num_valid_boxes <= T, [x, y, z, w, l, h, ry] in LiDAR coordinate, (x, y, z) is the bottom center Returns: box_idxs_of_pts (torch.Tensor): (B, M), default background = -1 |
16,776 | import torch
from . import roiaware_pool3d_ext
The provided code snippet includes necessary dependencies for implementing the `points_in_boxes_cpu` function. Write a Python function `def points_in_boxes_cpu(points, boxes)` to solve the following problem:
Find points that are in boxes (CPU) Note: Currently, the output of this function is different from that of points_in_boxes_gpu. Args: points (torch.Tensor): [npoints, 3] boxes (torch.Tensor): [N, 7], in LiDAR coordinate, (x, y, z) is the bottom center Returns: point_indices (torch.Tensor): (N, npoints)
Here is the function:
def points_in_boxes_cpu(points, boxes):
"""Find points that are in boxes (CPU)
Note:
Currently, the output of this function is different from that of
points_in_boxes_gpu.
Args:
points (torch.Tensor): [npoints, 3]
boxes (torch.Tensor): [N, 7], in LiDAR coordinate,
(x, y, z) is the bottom center
Returns:
point_indices (torch.Tensor): (N, npoints)
"""
# TODO: Refactor this function as a CPU version of points_in_boxes_gpu
assert boxes.shape[1] == 7, (
f"boxes dimension should be 7, " f"got unexpected shape {boxes.shape[2]}"
)
assert points.shape[1] == 3, (
f"points dimension should be 3, " f"got unexpected shape {points.shape[2]}"
)
point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int)
roiaware_pool3d_ext.points_in_boxes_cpu(
boxes.float().contiguous(), points.float().contiguous(), point_indices
)
return point_indices | Find points that are in boxes (CPU) Note: Currently, the output of this function is different from that of points_in_boxes_gpu. Args: points (torch.Tensor): [npoints, 3] boxes (torch.Tensor): [N, 7], in LiDAR coordinate, (x, y, z) is the bottom center Returns: point_indices (torch.Tensor): (N, npoints) |
16,777 | import torch
from . import roiaware_pool3d_ext
The provided code snippet includes necessary dependencies for implementing the `points_in_boxes_batch` function. Write a Python function `def points_in_boxes_batch(points, boxes)` to solve the following problem:
Find points that are in boxes (CUDA) Args: points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR coordinate boxes (torch.Tensor): [B, T, 7], num_valid_boxes <= T, [x, y, z, w, l, h, ry] in LiDAR coordinate, (x, y, z) is the bottom center. Returns: box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0
Here is the function:
def points_in_boxes_batch(points, boxes):
"""Find points that are in boxes (CUDA)
Args:
points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR coordinate
boxes (torch.Tensor): [B, T, 7],
num_valid_boxes <= T, [x, y, z, w, l, h, ry] in LiDAR coordinate,
(x, y, z) is the bottom center.
Returns:
box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0
"""
assert boxes.shape[0] == points.shape[0], (
f"Points and boxes should have the same batch size, "
f"got {boxes.shape[0]} and {boxes.shape[0]}"
)
assert boxes.shape[2] == 7, (
f"boxes dimension should be 7, " f"got unexpected shape {boxes.shape[2]}"
)
assert points.shape[2] == 3, (
f"points dimension should be 3, " f"got unexpected shape {points.shape[2]}"
)
batch_size, num_points, _ = points.shape
num_boxes = boxes.shape[1]
box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes), dtype=torch.int).fill_(
0
)
# Same reason as line 25-32
points_device = points.get_device()
assert points_device == boxes.get_device(), "Points and boxes should be put on the same device"
if torch.cuda.current_device() != points_device:
torch.cuda.set_device(points_device)
roiaware_pool3d_ext.points_in_boxes_batch(
boxes.contiguous(), points.contiguous(), box_idxs_of_pts
)
return box_idxs_of_pts | Find points that are in boxes (CUDA) Args: points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR coordinate boxes (torch.Tensor): [B, T, 7], num_valid_boxes <= T, [x, y, z, w, l, h, ry] in LiDAR coordinate, (x, y, z) is the bottom center. Returns: box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0 |
16,778 | import torch
from . import feature_decorator_ext
def feature_decorator(features, num_voxels, coords, vx, vy, x_offset, y_offset, normalize_coords, use_cluster, use_center):
result = torch.ops.feature_decorator_ext.feature_decorator_forward(features, coords, num_voxels, vx, vy, x_offset, y_offset, normalize_coords, use_cluster, use_center)
return result | null |
16,779 | import torch
from . import iou3d_cuda
The provided code snippet includes necessary dependencies for implementing the `boxes_iou_bev` function. Write a Python function `def boxes_iou_bev(boxes_a, boxes_b)` to solve the following problem:
Calculate boxes IoU in the bird view. Args: boxes_a (torch.Tensor): Input boxes a with shape (M, 5). boxes_b (torch.Tensor): Input boxes b with shape (N, 5). Returns: ans_iou (torch.Tensor): IoU result with shape (M, N).
Here is the function:
def boxes_iou_bev(boxes_a, boxes_b):
"""Calculate boxes IoU in the bird view.
Args:
boxes_a (torch.Tensor): Input boxes a with shape (M, 5).
boxes_b (torch.Tensor): Input boxes b with shape (N, 5).
Returns:
ans_iou (torch.Tensor): IoU result with shape (M, N).
"""
ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
iou3d_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou | Calculate boxes IoU in the bird view. Args: boxes_a (torch.Tensor): Input boxes a with shape (M, 5). boxes_b (torch.Tensor): Input boxes b with shape (N, 5). Returns: ans_iou (torch.Tensor): IoU result with shape (M, N). |
16,780 | from mmcv.utils import Registry
SA_MODULES = Registry("point_sa_module")
The provided code snippet includes necessary dependencies for implementing the `build_sa_module` function. Write a Python function `def build_sa_module(cfg, *args, **kwargs)` to solve the following problem:
Build PointNet2 set abstraction (SA) module. Args: cfg (None or dict): The SA module config, which should contain: - type (str): Module type. - module args: Args needed to instantiate an SA module. args (argument list): Arguments passed to the `__init__` method of the corresponding module. kwargs (keyword arguments): Keyword arguments passed to the `__init__` method of the corresponding SA module . Returns: nn.Module: Created SA module.
Here is the function:
def build_sa_module(cfg, *args, **kwargs):
"""Build PointNet2 set abstraction (SA) module.
Args:
cfg (None or dict): The SA module config, which should contain:
- type (str): Module type.
- module args: Args needed to instantiate an SA module.
args (argument list): Arguments passed to the `__init__`
method of the corresponding module.
kwargs (keyword arguments): Keyword arguments passed to the `__init__`
method of the corresponding SA module .
Returns:
nn.Module: Created SA module.
"""
if cfg is None:
cfg_ = dict(type="PointSAModule")
else:
if not isinstance(cfg, dict):
raise TypeError("cfg must be a dict")
if "type" not in cfg:
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
module_type = cfg_.pop("type")
if module_type not in SA_MODULES:
raise KeyError(f"Unrecognized module type {module_type}")
else:
sa_module = SA_MODULES.get(module_type)
module = sa_module(*args, **kwargs, **cfg_)
return module | Build PointNet2 set abstraction (SA) module. Args: cfg (None or dict): The SA module config, which should contain: - type (str): Module type. - module args: Args needed to instantiate an SA module. args (argument list): Arguments passed to the `__init__` method of the corresponding module. kwargs (keyword arguments): Keyword arguments passed to the `__init__` method of the corresponding SA module . Returns: nn.Module: Created SA module. |
16,781 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `extract_result_dict` function. Write a Python function `def extract_result_dict(results, key)` to solve the following problem:
Extract and return the data corresponding to key in result dict. ``results`` is a dict output from `pipeline(input_dict)`, which is the loaded data from ``Dataset`` class. The data terms inside may be wrapped in list, tuple and DataContainer, so this function essentially extracts data from these wrappers. Args: results (dict): Data loaded using pipeline. key (str): Key of the desired data. Returns: np.ndarray | torch.Tensor | None: Data term.
Here is the function:
def extract_result_dict(results, key):
"""Extract and return the data corresponding to key in result dict.
``results`` is a dict output from `pipeline(input_dict)`, which is the
loaded data from ``Dataset`` class.
The data terms inside may be wrapped in list, tuple and DataContainer, so
this function essentially extracts data from these wrappers.
Args:
results (dict): Data loaded using pipeline.
key (str): Key of the desired data.
Returns:
np.ndarray | torch.Tensor | None: Data term.
"""
if key not in results.keys():
return None
# results[key] may be data or list[data] or tuple[data]
# data may be wrapped inside DataContainer
data = results[key]
if isinstance(data, (list, tuple)):
data = data[0]
if isinstance(data, mmcv.parallel.DataContainer):
data = data._data
return data | Extract and return the data corresponding to key in result dict. ``results`` is a dict output from `pipeline(input_dict)`, which is the loaded data from ``Dataset`` class. The data terms inside may be wrapped in list, tuple and DataContainer, so this function essentially extracts data from these wrappers. Args: results (dict): Data loaded using pipeline. key (str): Key of the desired data. Returns: np.ndarray | torch.Tensor | None: Data term. |
16,782 | import warnings
import numba
import numpy as np
from numba import errors
from mmdet3d.core.bbox import box_np_ops
def noise_per_box(boxes, valid_mask, loc_noises, rot_noises):
"""Add noise to every box (only on the horizontal plane).
Args:
boxes (np.ndarray): Input boxes with shape (N, 5).
valid_mask (np.ndarray): Mask to indicate which boxes are valid
with shape (N).
loc_noises (np.ndarray): Location noises with shape (N, M, 3).
rot_noises (np.ndarray): Rotation noises with shape (N, M).
Returns:
np.ndarray: Mask to indicate whether the noise is
added successfully (pass the collision test).
"""
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
# print(valid_mask)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_corners[:] = box_corners[i]
current_corners -= boxes[i, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += boxes[i, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
# print(coll_mat)
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
break
return success_mask
def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises, global_rot_noises):
"""Add noise to every box (only on the horizontal plane). Version 2 used
when enable global rotations.
Args:
boxes (np.ndarray): Input boxes with shape (N, 5).
valid_mask (np.ndarray): Mask to indicate which boxes are valid
with shape (N).
loc_noises (np.ndarray): Location noises with shape (N, M, 3).
rot_noises (np.ndarray): Rotation noises with shape (N, M).
Returns:
np.ndarray: Mask to indicate whether the noise is
added successfully (pass the collision test).
"""
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
current_box = np.zeros((1, 5), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
dst_pos = np.zeros((2,), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners_norm = corners_norm.reshape(4, 2)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_box[0, :] = boxes[i]
current_radius = np.sqrt(boxes[i, 0] ** 2 + boxes[i, 1] ** 2)
current_grot = np.arctan2(boxes[i, 0], boxes[i, 1])
dst_grot = current_grot + global_rot_noises[i, j]
dst_pos[0] = current_radius * np.sin(dst_grot)
dst_pos[1] = current_radius * np.cos(dst_grot)
current_box[0, :2] = dst_pos
current_box[0, -1] += dst_grot - current_grot
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[:] = (
current_box[0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2]
)
current_corners -= current_box[0, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += current_box[0, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
loc_noises[i, j, :2] += dst_pos - boxes[i, :2]
rot_noises[i, j] += dst_grot - current_grot
break
return success_mask
def _select_transform(transform, indices):
"""Select transform.
Args:
transform (np.ndarray): Transforms to select from.
indices (np.ndarray): Mask to indicate which transform to select.
Returns:
np.ndarray: Selected transforms.
"""
result = np.zeros((transform.shape[0], *transform.shape[2:]), dtype=transform.dtype)
for i in range(transform.shape[0]):
if indices[i] != -1:
result[i] = transform[i, indices[i]]
return result
def points_transform_(points, centers, point_masks, loc_transform, rot_transform, valid_mask):
"""Apply transforms to points and box centers.
Args:
points (np.ndarray): Input points.
centers (np.ndarray): Input box centers.
point_masks (np.ndarray): Mask to indicate which points need
to be transformed.
loc_transform (np.ndarray): Location transform to be applied.
rot_transform (np.ndarray): Rotation transform to be applied.
valid_mask (np.ndarray): Mask to indicate which boxes are valid.
"""
num_box = centers.shape[0]
num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
for i in range(num_box):
_rotation_matrix_3d_(rot_mat_T[i], rot_transform[i], 2)
for i in range(num_points):
for j in range(num_box):
if valid_mask[j]:
if point_masks[i, j] == 1:
points[i, :3] -= centers[j, :3]
points[i : i + 1, :3] = points[i : i + 1, :3] @ rot_mat_T[j]
points[i, :3] += centers[j, :3]
points[i, :3] += loc_transform[j]
break # only apply first box's transform
def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask):
"""Transform 3D boxes.
Args:
boxes (np.ndarray): 3D boxes to be transformed.
loc_transform (np.ndarray): Location transform to be applied.
rot_transform (np.ndarray): Rotation transform to be applied.
valid_mask (np.ndarray | None): Mask to indicate which boxes are valid.
"""
num_box = boxes.shape[0]
for i in range(num_box):
if valid_mask[i]:
boxes[i, :3] += loc_transform[i]
boxes[i, 6] += rot_transform[i]
The provided code snippet includes necessary dependencies for implementing the `noise_per_object_v3_` function. Write a Python function `def noise_per_object_v3_( gt_boxes, points=None, valid_mask=None, rotation_perturb=np.pi / 4, center_noise_std=1.0, global_random_rot_range=np.pi / 4, num_try=100, )` to solve the following problem:
Random rotate or remove each groundtruth independently. use kitti viewer to test this function points_transform_ Args: gt_boxes (np.ndarray): Ground truth boxes with shape (N, 7). points (np.ndarray | None): Input point cloud with shape (M, 4). Default: None. valid_mask (np.ndarray | None): Mask to indicate which boxes are valid. Default: None. rotation_perturb (float): Rotation perturbation. Default: pi / 4. center_noise_std (float): Center noise standard deviation. Default: 1.0. global_random_rot_range (float): Global random rotation range. Default: pi/4. num_try (int): Number of try. Default: 100.
Here is the function:
def noise_per_object_v3_(
gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=100,
):
"""Random rotate or remove each groundtruth independently. use kitti viewer
to test this function points_transform_
Args:
gt_boxes (np.ndarray): Ground truth boxes with shape (N, 7).
points (np.ndarray | None): Input point cloud with shape (M, 4).
Default: None.
valid_mask (np.ndarray | None): Mask to indicate which boxes are valid.
Default: None.
rotation_perturb (float): Rotation perturbation. Default: pi / 4.
center_noise_std (float): Center noise standard deviation.
Default: 1.0.
global_random_rot_range (float): Global random rotation range.
Default: pi/4.
num_try (int): Number of try. Default: 100.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [-global_random_rot_range, global_random_rot_range]
enable_grot = np.abs(global_random_rot_range[0] - global_random_rot_range[1]) >= 1e-3
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [center_noise_std, center_noise_std, center_noise_std]
if valid_mask is None:
valid_mask = np.ones((num_boxes,), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(scale=center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try]
)
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis], grot_uppers[..., np.newaxis], size=[num_boxes, num_try]
)
origin = (0.5, 0.5, 0)
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=origin, axis=2
)
# TODO: rewrite this noise box function?
if not enable_grot:
selected_noise = noise_per_box(
gt_boxes[:, [0, 1, 3, 4, 6]], valid_mask, loc_noises, rot_noises
)
else:
selected_noise = noise_per_box_v2_(
gt_boxes[:, [0, 1, 3, 4, 6]], valid_mask, loc_noises, rot_noises, global_rot_noises
)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
if points is not None:
# TODO: replace this points_in_convex function by my tools?
point_masks = box_np_ops.points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
points_transform_(
points, gt_boxes[:, :3], point_masks, loc_transforms, rot_transforms, valid_mask
)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask) | Random rotate or remove each groundtruth independently. use kitti viewer to test this function points_transform_ Args: gt_boxes (np.ndarray): Ground truth boxes with shape (N, 7). points (np.ndarray | None): Input point cloud with shape (M, 4). Default: None. valid_mask (np.ndarray | None): Mask to indicate which boxes are valid. Default: None. rotation_perturb (float): Rotation perturbation. Default: pi / 4. center_noise_std (float): Center noise standard deviation. Default: 1.0. global_random_rot_range (float): Global random rotation range. Default: pi/4. num_try (int): Number of try. Default: 100. |
16,783 | import os
import numpy as np
import torch
def load_augmented_point_cloud(path, virtual=False, reduce_beams=32):
# NOTE: following Tianwei's implementation, it is hard coded for nuScenes
points = np.fromfile(path, dtype=np.float32).reshape(-1, 5)
# NOTE: path definition different from Tianwei's implementation.
tokens = path.split("/")
vp_dir = "_VIRTUAL" if reduce_beams == 32 else f"_VIRTUAL_{reduce_beams}BEAMS"
seg_path = os.path.join(
*tokens[:-3],
"virtual_points",
tokens[-3],
tokens[-2] + vp_dir,
tokens[-1] + ".pkl.npy",
)
assert os.path.exists(seg_path)
data_dict = np.load(seg_path, allow_pickle=True).item()
virtual_points1 = data_dict["real_points"]
# NOTE: add zero reflectance to virtual points instead of removing them from real points
virtual_points2 = np.concatenate(
[
data_dict["virtual_points"][:, :3],
np.zeros([data_dict["virtual_points"].shape[0], 1]),
data_dict["virtual_points"][:, 3:],
],
axis=-1,
)
points = np.concatenate(
[
points,
np.ones([points.shape[0], virtual_points1.shape[1] - points.shape[1] + 1]),
],
axis=1,
)
virtual_points1 = np.concatenate(
[virtual_points1, np.zeros([virtual_points1.shape[0], 1])], axis=1
)
# note: this part is different from Tianwei's implementation, we don't have duplicate foreground real points.
if len(data_dict["real_points_indice"]) > 0:
points[data_dict["real_points_indice"]] = virtual_points1
if virtual:
virtual_points2 = np.concatenate(
[virtual_points2, -1 * np.ones([virtual_points2.shape[0], 1])], axis=1
)
points = np.concatenate([points, virtual_points2], axis=0).astype(np.float32)
return points | null |
16,784 | import os
import numpy as np
import torch
def reduce_LiDAR_beams(pts, reduce_beams_to=32):
# print(pts.size())
if isinstance(pts, np.ndarray):
pts = torch.from_numpy(pts)
radius = torch.sqrt(pts[:, 0].pow(2) + pts[:, 1].pow(2) + pts[:, 2].pow(2))
sine_theta = pts[:, 2] / radius
# [-pi/2, pi/2]
theta = torch.asin(sine_theta)
phi = torch.atan2(pts[:, 1], pts[:, 0])
top_ang = 0.1862
down_ang = -0.5353
beam_range = torch.zeros(32)
beam_range[0] = top_ang
beam_range[31] = down_ang
for i in range(1, 31):
beam_range[i] = beam_range[i - 1] - 0.023275
# beam_range = [1, 0.18, 0.15, 0.13, 0.11, 0.085, 0.065, 0.03, 0.01, -0.01, -0.03, -0.055, -0.08, -0.105, -0.13, -0.155, -0.18, -0.205, -0.228, -0.251, -0.275,
# -0.295, -0.32, -0.34, -0.36, -0.38, -0.40, -0.425, -0.45, -0.47, -0.49, -0.52, -0.54]
num_pts, _ = pts.size()
mask = torch.zeros(num_pts)
if reduce_beams_to == 16:
for id in [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]:
beam_mask = (theta < (beam_range[id - 1] - 0.012)) * (
theta > (beam_range[id] - 0.012)
)
mask = mask + beam_mask
mask = mask.bool()
elif reduce_beams_to == 4:
for id in [7, 9, 11, 13]:
beam_mask = (theta < (beam_range[id - 1] - 0.012)) * (
theta > (beam_range[id] - 0.012)
)
mask = mask + beam_mask
mask = mask.bool()
# [?] pick the 14th beam
elif reduce_beams_to == 1:
chosen_beam_id = 9
mask = (theta < (beam_range[chosen_beam_id - 1] - 0.012)) * (
theta > (beam_range[chosen_beam_id] - 0.012)
)
else:
raise NotImplementedError
# points = copy.copy(pts)
points = pts[mask]
# print(points.size())
return points.numpy() | null |
16,785 | import tempfile
from os import path as osp
from typing import Any, Dict
import mmcv
import numpy as np
import pyquaternion
import torch
from nuscenes.utils.data_classes import Box as NuScenesBox
from pyquaternion import Quaternion
from mmdet.datasets import DATASETS
from ..core.bbox import LiDARInstance3DBoxes
from .custom_3d import Custom3DDataset
The provided code snippet includes necessary dependencies for implementing the `output_to_nusc_box` function. Write a Python function `def output_to_nusc_box(detection)` to solve the following problem:
Convert the output to the box class in the nuScenes. Args: detection (dict): Detection results. - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. - scores_3d (torch.Tensor): Detection scores. - labels_3d (torch.Tensor): Predicted box labels. Returns: list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes.
Here is the function:
def output_to_nusc_box(detection):
"""Convert the output to the box class in the nuScenes.
Args:
detection (dict): Detection results.
- boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
- scores_3d (torch.Tensor): Detection scores.
- labels_3d (torch.Tensor): Predicted box labels.
Returns:
list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes.
"""
box3d = detection["boxes_3d"]
scores = detection["scores_3d"].numpy()
labels = detection["labels_3d"].numpy()
box_gravity_center = box3d.gravity_center.numpy()
box_dims = box3d.dims.numpy()
box_yaw = box3d.yaw.numpy()
# TODO: check whether this is necessary
# with dir_offset & dir_limit in the head
box_yaw = -box_yaw - np.pi / 2
box_list = []
for i in range(len(box3d)):
quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
velocity = (*box3d.tensor[i, 7:9], 0.0)
# velo_val = np.linalg.norm(box3d[i, 7:9])
# velo_ori = box3d[i, 6]
# velocity = (
# velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)
box = NuScenesBox(
box_gravity_center[i],
box_dims[i],
quat,
label=labels[i],
score=scores[i],
velocity=velocity,
)
box_list.append(box)
return box_list | Convert the output to the box class in the nuScenes. Args: detection (dict): Detection results. - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. - scores_3d (torch.Tensor): Detection scores. - labels_3d (torch.Tensor): Predicted box labels. Returns: list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes. |
16,786 | import tempfile
from os import path as osp
from typing import Any, Dict
import mmcv
import numpy as np
import pyquaternion
import torch
from nuscenes.utils.data_classes import Box as NuScenesBox
from pyquaternion import Quaternion
from mmdet.datasets import DATASETS
from ..core.bbox import LiDARInstance3DBoxes
from .custom_3d import Custom3DDataset
The provided code snippet includes necessary dependencies for implementing the `lidar_nusc_box_to_global` function. Write a Python function `def lidar_nusc_box_to_global( info, boxes, classes, eval_configs, eval_version="detection_cvpr_2019" )` to solve the following problem:
Convert the box from ego to global coordinate. Args: info (dict): Info for a specific sample data, including the calibration information. boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. classes (list[str]): Mapped classes in the evaluation. eval_configs : Evaluation configuration object. eval_version (str): Evaluation version. Default: 'detection_cvpr_2019' Returns: list: List of standard NuScenesBoxes in the global coordinate.
Here is the function:
def lidar_nusc_box_to_global(
info, boxes, classes, eval_configs, eval_version="detection_cvpr_2019"
):
"""Convert the box from ego to global coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
classes (list[str]): Mapped classes in the evaluation.
eval_configs : Evaluation configuration object.
eval_version (str): Evaluation version.
Default: 'detection_cvpr_2019'
Returns:
list: List of standard NuScenesBoxes in the global
coordinate.
"""
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(pyquaternion.Quaternion(info["lidar2ego_rotation"]))
box.translate(np.array(info["lidar2ego_translation"]))
# filter det in ego.
cls_range_map = eval_configs.class_range
radius = np.linalg.norm(box.center[:2], 2)
det_range = cls_range_map[classes[box.label]]
if radius > det_range:
continue
# Move box to global coord system
box.rotate(pyquaternion.Quaternion(info["ego2global_rotation"]))
box.translate(np.array(info["ego2global_translation"]))
box_list.append(box)
return box_list | Convert the box from ego to global coordinate. Args: info (dict): Info for a specific sample data, including the calibration information. boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. classes (list[str]): Mapped classes in the evaluation. eval_configs : Evaluation configuration object. eval_version (str): Evaluation version. Default: 'detection_cvpr_2019' Returns: list: List of standard NuScenesBoxes in the global coordinate. |
16,787 | import platform
from mmcv.utils import Registry, build_from_cfg
from mmdet.datasets import DATASETS
from mmdet.datasets.builder import _concat_dataset
class CBGSDataset:
"""A wrapper of class sampled dataset with ann_file path. Implementation of
paper `Class-balanced Grouping and Sampling for Point Cloud 3D Object
Detection <https://arxiv.org/abs/1908.09492.>`_.
Balance the number of scenes under different classes.
Args:
dataset (:obj:`CustomDataset`): The dataset to be class sampled.
"""
def __init__(self, dataset):
self.dataset = dataset
self.CLASSES = dataset.CLASSES
self.cat2id = {name: i for i, name in enumerate(self.CLASSES)}
self.sample_indices = self._get_sample_indices()
# self.dataset.data_infos = self.data_infos
if hasattr(self.dataset, "flag"):
self.flag = np.array(
[self.dataset.flag[ind] for ind in self.sample_indices], dtype=np.uint8
)
def set_epoch(self, epoch):
self.dataset.set_epoch(epoch)
def _get_sample_indices(self):
"""Load annotations from ann_file.
Args:
ann_file (str): Path of the annotation file.
Returns:
list[dict]: List of annotations after class sampling.
"""
class_sample_idxs = {cat_id: [] for cat_id in self.cat2id.values()}
for idx in range(len(self.dataset)):
sample_cat_ids = self.dataset.get_cat_ids(idx)
for cat_id in sample_cat_ids:
class_sample_idxs[cat_id].append(idx)
duplicated_samples = sum([len(v) for _, v in class_sample_idxs.items()])
class_distribution = {
k: len(v) / duplicated_samples for k, v in class_sample_idxs.items()
}
sample_indices = []
frac = 1.0 / len(self.CLASSES)
ratios = [frac / v for v in class_distribution.values()]
for cls_inds, ratio in zip(list(class_sample_idxs.values()), ratios):
sample_indices += np.random.choice(
cls_inds, int(len(cls_inds) * ratio)
).tolist()
return sample_indices
def __getitem__(self, idx):
"""Get item from infos according to the given index.
Returns:
dict: Data dictionary of the corresponding index.
"""
ori_idx = self.sample_indices[idx]
return self.dataset[ori_idx]
def __len__(self):
"""Return the length of data infos.
Returns:
int: Length of data infos.
"""
return len(self.sample_indices)
def build_dataset(cfg, default_args=None):
from mmdet3d.datasets.dataset_wrappers import CBGSDataset
from mmdet.datasets.dataset_wrappers import ClassBalancedDataset, ConcatDataset, RepeatDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg["type"] == "ConcatDataset":
dataset = ConcatDataset(
[build_dataset(c, default_args) for c in cfg["datasets"]],
cfg.get("separate_eval", True),
)
elif cfg["type"] == "RepeatDataset":
dataset = RepeatDataset(build_dataset(cfg["dataset"], default_args), cfg["times"])
elif cfg["type"] == "ClassBalancedDataset":
dataset = ClassBalancedDataset(
build_dataset(cfg["dataset"], default_args), cfg["oversample_thr"]
)
elif cfg["type"] == "CBGSDataset":
dataset = CBGSDataset(build_dataset(cfg["dataset"], default_args))
elif isinstance(cfg.get("ann_file"), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset | null |
16,788 | from collections import OrderedDict
from mmcv.runner import BaseModule, force_fp32
from mmdet.models.builder import BACKBONES
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
The provided code snippet includes necessary dependencies for implementing the `dw_conv3x3` function. Write a Python function `def dw_conv3x3(in_channels, out_channels, module_name, postfix, stride=1, kernel_size=3, padding=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def dw_conv3x3(in_channels, out_channels, module_name, postfix, stride=1, kernel_size=3, padding=1):
"""3x3 convolution with padding"""
return [
(
'{}_{}/dw_conv3x3'.format(module_name, postfix),
nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=out_channels,
bias=False
)
),
(
'{}_{}/pw_conv1x1'.format(module_name, postfix),
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=1, bias=False)
),
('{}_{}/pw_norm'.format(module_name, postfix), nn.BatchNorm2d(out_channels)),
('{}_{}/pw_relu'.format(module_name, postfix), nn.ReLU(inplace=True)),
] | 3x3 convolution with padding |
16,789 | from collections import OrderedDict
from mmcv.runner import BaseModule, force_fp32
from mmdet.models.builder import BACKBONES
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=3, padding=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=3, padding=1):
"""3x3 convolution with padding"""
return [
(
f"{module_name}_{postfix}/conv",
nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
),
),
(f"{module_name}_{postfix}/norm", nn.BatchNorm2d(out_channels)),
(f"{module_name}_{postfix}/relu", nn.ReLU(inplace=True)),
] | 3x3 convolution with padding |
16,790 | from collections import OrderedDict
from mmcv.runner import BaseModule, force_fp32
from mmdet.models.builder import BACKBONES
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=1, padding=0)` to solve the following problem:
1x1 convolution with padding
Here is the function:
def conv1x1(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=1, padding=0):
"""1x1 convolution with padding"""
return [
(
f"{module_name}_{postfix}/conv",
nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
),
),
(f"{module_name}_{postfix}/norm", nn.BatchNorm2d(out_channels)),
(f"{module_name}_{postfix}/relu", nn.ReLU(inplace=True)),
] | 1x1 convolution with padding |
16,791 | from torch import nn
from typing import Any, Dict
from functools import cached_property
import torch
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.cnn.resnet import make_res_layer, BasicBlock
from torch import nn
from torch.nn import functional as F
from mmdet3d.models.builder import build_backbone
from mmdet.models import BACKBONES
from torchvision.utils import save_image
from mmdet3d.ops import feature_decorator
from mmcv.cnn.bricks.non_local import NonLocal2d
from flash_attn.flash_attention import FlashMHA
The provided code snippet includes necessary dependencies for implementing the `get_paddings_indicator` function. Write a Python function `def get_paddings_indicator(actual_num, max_num, axis=0)` to solve the following problem:
Create boolean mask by actually number of a padded tensor. Args: actual_num ([type]): [description] max_num ([type]): [description] Returns: [type]: [description]
Here is the function:
def get_paddings_indicator(actual_num, max_num, axis=0):
"""Create boolean mask by actually number of a padded tensor.
Args:
actual_num ([type]): [description]
max_num ([type]): [description]
Returns:
[type]: [description]
"""
actual_num = torch.unsqueeze(actual_num, axis + 1)
# tiled_actual_num: [N, M, 1]
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(
max_num_shape
)
# tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.int() > max_num
# paddings_indicator shape: [batch_size, max_num]
return paddings_indicator | Create boolean mask by actually number of a padded tensor. Args: actual_num ([type]): [description] max_num ([type]): [description] Returns: [type]: [description] |
16,792 | from typing import Any, Dict
import torch
from mmcv.cnn import build_norm_layer
from torch import nn
from torch.nn import functional as F
from mmdet3d.models.builder import build_backbone
from mmdet.models import BACKBONES
The provided code snippet includes necessary dependencies for implementing the `get_paddings_indicator` function. Write a Python function `def get_paddings_indicator(actual_num, max_num, axis=0)` to solve the following problem:
Create boolean mask by actually number of a padded tensor. Args: actual_num ([type]): [description] max_num ([type]): [description] Returns: [type]: [description]
Here is the function:
def get_paddings_indicator(actual_num, max_num, axis=0):
"""Create boolean mask by actually number of a padded tensor.
Args:
actual_num ([type]): [description]
max_num ([type]): [description]
Returns:
[type]: [description]
"""
actual_num = torch.unsqueeze(actual_num, axis + 1)
# tiled_actual_num: [N, M, 1]
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(
max_num_shape
)
# tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.int() > max_num
# paddings_indicator shape: [batch_size, max_num]
return paddings_indicator | Create boolean mask by actually number of a padded tensor. Args: actual_num ([type]): [description] max_num ([type]): [description] Returns: [type]: [description] |
16,793 | from typing import Tuple
import torch
from mmcv.runner import force_fp32
from torch import nn
from mmdet3d.ops import bev_pool
def boolmask2idx(mask):
# A utility function, workaround for ONNX not supporting 'nonzero'
return torch.nonzero(mask).squeeze(1).tolist() | null |
16,794 | from typing import Tuple
import torch
from mmcv.runner import force_fp32
from torch import nn
from mmdet3d.ops import bev_pool
def gen_dx_bx(xbound, ybound, zbound):
dx = torch.Tensor([row[2] for row in [xbound, ybound, zbound]])
bx = torch.Tensor([row[0] + row[2] / 2.0 for row in [xbound, ybound, zbound]])
nx = torch.LongTensor(
[(row[1] - row[0]) / row[2] for row in [xbound, ybound, zbound]]
)
return dx, bx, nx | null |
16,795 | from mmcv.utils import Registry
from mmdet.models.builder import BACKBONES, HEADS, LOSSES, NECKS
def build_backbone(cfg):
return BACKBONES.build(cfg) | null |
16,796 | from mmcv.utils import Registry
from mmdet.models.builder import BACKBONES, HEADS, LOSSES, NECKS
def build_neck(cfg):
return NECKS.build(cfg) | null |
16,797 | from mmcv.utils import Registry
from mmdet.models.builder import BACKBONES, HEADS, LOSSES, NECKS
VTRANSFORMS = Registry("vtransforms")
def build_vtransform(cfg):
return VTRANSFORMS.build(cfg) | null |
16,798 | from mmcv.utils import Registry
from mmdet.models.builder import BACKBONES, HEADS, LOSSES, NECKS
FUSERS = Registry("fusers")
def build_fuser(cfg):
return FUSERS.build(cfg) | null |
16,799 | from mmcv.utils import Registry
from mmdet.models.builder import BACKBONES, HEADS, LOSSES, NECKS
def build_head(cfg):
return HEADS.build(cfg) | null |
16,800 | from mmcv.utils import Registry
from mmdet.models.builder import BACKBONES, HEADS, LOSSES, NECKS
def build_loss(cfg):
return LOSSES.build(cfg) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.