code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def morpher(imgpaths, width=500, height=600, num_frames=20, fps=10,
out_frames=None, out_video=None, plot=False, background='black'):
"""
Create a morph sequence from multiple images in imgpaths
:param imgpaths: array or generator of image paths
"""
video = videoer.Video(out_video, fps, width, he... |
Create a morph sequence from multiple images in imgpaths
:param imgpaths: array or generator of image paths
| morpher | python | OpenTalker/video-retalking | third_part/GPEN/face_morpher/facemorpher/morpher.py | https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/morpher.py | Apache-2.0 |
def bilinear_interpolate(img, coords):
""" Interpolates over every image channel
http://en.wikipedia.org/wiki/Bilinear_interpolation
:param img: max 3 channel image
:param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords
:returns: array of interpolated pixels with same shape as coords
"""
int_... | Interpolates over every image channel
http://en.wikipedia.org/wiki/Bilinear_interpolation
:param img: max 3 channel image
:param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords
:returns: array of interpolated pixels with same shape as coords
| bilinear_interpolate | python | OpenTalker/video-retalking | third_part/GPEN/face_morpher/facemorpher/warper.py | https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/warper.py | Apache-2.0 |
def grid_coordinates(points):
""" x,y grid coordinates within the ROI of supplied points
:param points: points to generate grid coordinates
:returns: array of (x, y) coordinates
"""
xmin = np.min(points[:, 0])
xmax = np.max(points[:, 0]) + 1
ymin = np.min(points[:, 1])
ymax = np.max(points[:, 1]) + 1
... | x,y grid coordinates within the ROI of supplied points
:param points: points to generate grid coordinates
:returns: array of (x, y) coordinates
| grid_coordinates | python | OpenTalker/video-retalking | third_part/GPEN/face_morpher/facemorpher/warper.py | https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/warper.py | Apache-2.0 |
def process_warp(src_img, result_img, tri_affines, dst_points, delaunay):
"""
Warp each triangle from the src_image only within the
ROI of the destination image (points in dst_points).
"""
roi_coords = grid_coordinates(dst_points)
# indices to vertices. -1 if pixel is not in any triangle
roi_tri_indices =... |
Warp each triangle from the src_image only within the
ROI of the destination image (points in dst_points).
| process_warp | python | OpenTalker/video-retalking | third_part/GPEN/face_morpher/facemorpher/warper.py | https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/warper.py | Apache-2.0 |
def triangular_affine_matrices(vertices, src_points, dest_points):
"""
Calculate the affine transformation matrix for each
triangle (x,y) vertex from dest_points to src_points
:param vertices: array of triplet indices to corners of triangle
:param src_points: array of [x, y] points to landmarks for source im... |
Calculate the affine transformation matrix for each
triangle (x,y) vertex from dest_points to src_points
:param vertices: array of triplet indices to corners of triangle
:param src_points: array of [x, y] points to landmarks for source image
:param dest_points: array of [x, y] points to landmarks for destin... | triangular_affine_matrices | python | OpenTalker/video-retalking | third_part/GPEN/face_morpher/facemorpher/warper.py | https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/warper.py | Apache-2.0 |
def get_landmark(filepath, predictor, detector=None, fa=None):
"""get landmark with dlib
:return: np.array shape=(68, 2)
"""
if fa is not None:
image = io.imread(filepath)
lms, _, bboxes = fa.get_landmarks(image, return_bboxes=True)
if len(lms) == 0:
return None
... | get landmark with dlib
:return: np.array shape=(68, 2)
| get_landmark | python | OpenTalker/video-retalking | utils/alignment_stit.py | https://github.com/OpenTalker/video-retalking/blob/master/utils/alignment_stit.py | Apache-2.0 |
def align_face(filepath_or_image, predictor, output_size, detector=None,
enable_padding=False, scale=1.0):
"""
:param filepath: str
:return: PIL Image
"""
c, x, y = compute_transform(filepath_or_image, predictor, detector=detector,
scale=scale)
qua... |
:param filepath: str
:return: PIL Image
| align_face | python | OpenTalker/video-retalking | utils/alignment_stit.py | https://github.com/OpenTalker/video-retalking/blob/master/utils/alignment_stit.py | Apache-2.0 |
def num_frames(length, fsize, fshift):
"""Compute number of time frames of spectrogram
"""
pad = (fsize - fshift)
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M | Compute number of time frames of spectrogram
| num_frames | python | OpenTalker/video-retalking | utils/audio.py | https://github.com/OpenTalker/video-retalking/blob/master/utils/audio.py | Apache-2.0 |
def get_landmark(self, img_np):
"""get landmark with dlib
:return: np.array shape=(68, 2)
"""
detector = dlib.get_frontal_face_detector()
dets = detector(img_np, 1)
if len(dets) == 0:
return None
d = dets[0]
# Get the landmarks/parts for the fa... | get landmark with dlib
:return: np.array shape=(68, 2)
| get_landmark | python | OpenTalker/video-retalking | utils/ffhq_preprocess.py | https://github.com/OpenTalker/video-retalking/blob/master/utils/ffhq_preprocess.py | Apache-2.0 |
def align_face(self, img, lm, output_size=1024):
"""
:param filepath: str
:return: PIL Image
"""
lm_chin = lm[0: 17] # left-right
lm_eyebrow_left = lm[17: 22] # left-right
lm_eyebrow_right = lm[22: 27] # left-right
lm_nose = lm[27: 31] # top-down
... |
:param filepath: str
:return: PIL Image
| align_face | python | OpenTalker/video-retalking | utils/ffhq_preprocess.py | https://github.com/OpenTalker/video-retalking/blob/master/utils/ffhq_preprocess.py | Apache-2.0 |
def convert_flow_to_deformation(flow):
r"""convert flow fields to deformations.
Args:
flow (tensor): Flow field obtained by the model
Returns:
deformation (tensor): The deformation used for warping
"""
b,c,h,w = flow.shape
flow_norm = 2 * torch.cat([flow[:,:1,...]/(w-1),flow[:,1... | convert flow fields to deformations.
Args:
flow (tensor): Flow field obtained by the model
Returns:
deformation (tensor): The deformation used for warping
| convert_flow_to_deformation | python | OpenTalker/video-retalking | utils/flow_util.py | https://github.com/OpenTalker/video-retalking/blob/master/utils/flow_util.py | Apache-2.0 |
def make_coordinate_grid(flow):
r"""obtain coordinate grid with the same size as the flow filed.
Args:
flow (tensor): Flow field obtained by the model
Returns:
grid (tensor): The grid with the same size as the input flow
"""
b,c,h,w = flow.shape
x = torch.arange(w).to(flow)... | obtain coordinate grid with the same size as the flow filed.
Args:
flow (tensor): Flow field obtained by the model
Returns:
grid (tensor): The grid with the same size as the input flow
| make_coordinate_grid | python | OpenTalker/video-retalking | utils/flow_util.py | https://github.com/OpenTalker/video-retalking/blob/master/utils/flow_util.py | Apache-2.0 |
def warp_image(source_image, deformation):
r"""warp the input image according to the deformation
Args:
source_image (tensor): source images to be warped
deformation (tensor): deformations used to warp the images; value in range (-1, 1)
Returns:
output (tensor): the warped images
... | warp the input image according to the deformation
Args:
source_image (tensor): source images to be warped
deformation (tensor): deformations used to warp the images; value in range (-1, 1)
Returns:
output (tensor): the warped images
| warp_image | python | OpenTalker/video-retalking | utils/flow_util.py | https://github.com/OpenTalker/video-retalking/blob/master/utils/flow_util.py | Apache-2.0 |
def compute_density_for_timestep_sampling(
weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None
):
"""Compute the density for sampling the timesteps when doing SD3 training.
Courtesy: This was contributed by Rafie Walker in https://github.com/h... | Compute the density for sampling the timesteps when doing SD3 training.
Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528.
SD3 paper reference: https://arxiv.org/abs/2403.03206v1.
| compute_density_for_timestep_sampling | python | memoavatar/memo | finetune.py | https://github.com/memoavatar/memo/blob/master/finetune.py | Apache-2.0 |
def compute_loss_weighting_for_sd3(weighting_scheme: str, sigmas=None):
"""Computes loss weighting scheme for SD3 training.
Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528.
SD3 paper reference: https://arxiv.org/abs/2403.03206v1.
"""
if weightin... | Computes loss weighting scheme for SD3 training.
Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528.
SD3 paper reference: https://arxiv.org/abs/2403.03206v1.
| compute_loss_weighting_for_sd3 | python | memoavatar/memo | finetune.py | https://github.com/memoavatar/memo/blob/master/finetune.py | Apache-2.0 |
def set_use_npu_flash_attention(self, use_npu_flash_attention: bool) -> None:
r"""
Set whether to use npu flash attention from `torch_npu` or not.
"""
if use_npu_flash_attention:
processor = AttnProcessorNPU()
else:
# set attention processor
#... |
Set whether to use npu flash attention from `torch_npu` or not.
| set_use_npu_flash_attention | python | memoavatar/memo | memo/models/attention_processor.py | https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py | Apache-2.0 |
def set_use_memory_efficient_attention_xformers(
self,
use_memory_efficient_attention_xformers: bool,
attention_op: Optional[Callable] = None,
) -> None:
r"""
Set whether to use memory efficient attention from `xformers` or not.
Args:
use_memory_efficient... |
Set whether to use memory efficient attention from `xformers` or not.
Args:
use_memory_efficient_attention_xformers (`bool`):
Whether to use memory efficient attention from `xformers` or not.
attention_op (`Callable`, *optional*):
The attention o... | set_use_memory_efficient_attention_xformers | python | memoavatar/memo | memo/models/attention_processor.py | https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py | Apache-2.0 |
def set_attention_slice(self, slice_size: int) -> None:
r"""
Set the slice size for attention computation.
Args:
slice_size (`int`):
The slice size for attention computation.
"""
if slice_size is not None and slice_size > self.sliceable_head_dim:
... |
Set the slice size for attention computation.
Args:
slice_size (`int`):
The slice size for attention computation.
| set_attention_slice | python | memoavatar/memo | memo/models/attention_processor.py | https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py | Apache-2.0 |
def set_processor(self, processor: "AttnProcessor") -> None:
r"""
Set the attention processor to use.
Args:
processor (`AttnProcessor`):
The attention processor to use.
"""
# if current processor is in `self._modules` and if passed `processor` is not,... |
Set the attention processor to use.
Args:
processor (`AttnProcessor`):
The attention processor to use.
| set_processor | python | memoavatar/memo | memo/models/attention_processor.py | https://github.com/memoavatar/memo/blob/master/memo/models/attention_processor.py | Apache-2.0 |
def preprocess_audio(
wav_path: str,
fps: int,
wav2vec_model: str,
vocal_separator_model: str = None,
cache_dir: str = "",
device: str = "cuda",
sample_rate: int = 16000,
num_generated_frames_per_clip: int = -1,
):
"""
Preprocess the audio file and extract audio embeddings.
... |
Preprocess the audio file and extract audio embeddings.
Args:
wav_path (str): Path to the input audio file.
fps (int): Frames per second for the audio processing.
wav2vec_model (str): Path to the pretrained Wav2Vec model.
vocal_separator_model (str, optional): Path to the vocal... | preprocess_audio | python | memoavatar/memo | memo/utils/audio_utils.py | https://github.com/memoavatar/memo/blob/master/memo/utils/audio_utils.py | Apache-2.0 |
def extract_audio_emotion_labels(
model: str,
wav_path: str,
emotion2vec_model: str,
audio_length: int,
sample_rate: int = 16000,
device: str = "cuda",
):
"""
Extract audio emotion labels from an audio file.
Args:
model (str): Path to the MEMO model.
wav_path (str): ... |
Extract audio emotion labels from an audio file.
Args:
model (str): Path to the MEMO model.
wav_path (str): Path to the input audio file.
emotion2vec_model (str): Path to the Emotion2vec model.
audio_length (int): Target length for interpolated emotion labels.
sample_ra... | extract_audio_emotion_labels | python | memoavatar/memo | memo/utils/audio_utils.py | https://github.com/memoavatar/memo/blob/master/memo/utils/audio_utils.py | Apache-2.0 |
def extract_emotion(x):
"""
Extract emotion for a given audio segment.
"""
x = x.to(device=device)
x = F.layer_norm(x, x.shape).view(1, -1)
feats = emotion_model.extract_features(x)
x = feats["x"].mean(dim=1) # average across frames
x = classifier(x)
... |
Extract emotion for a given audio segment.
| extract_emotion | python | memoavatar/memo | memo/utils/audio_utils.py | https://github.com/memoavatar/memo/blob/master/memo/utils/audio_utils.py | Apache-2.0 |
def tensor_to_video(tensor, output_video_path, input_audio_path, fps=30):
"""
Converts a Tensor with shape [c, f, h, w] into a video and adds an audio track from the specified audio file.
Args:
tensor (Tensor): The Tensor to be converted, shaped [c, f, h, w].
output_video_path (str): The fi... |
Converts a Tensor with shape [c, f, h, w] into a video and adds an audio track from the specified audio file.
Args:
tensor (Tensor): The Tensor to be converted, shaped [c, f, h, w].
output_video_path (str): The file path where the output video will be saved.
input_audio_path (str): The... | tensor_to_video | python | memoavatar/memo | memo/utils/vision_utils.py | https://github.com/memoavatar/memo/blob/master/memo/utils/vision_utils.py | Apache-2.0 |
def preprocess_image(face_analysis_model: str, image_path: str, image_size: int = 512):
"""
Preprocess the image and extract face embedding.
Args:
face_analysis_model (str): Path to the FaceAnalysis model directory.
image_path (str): Path to the image file.
image_size (int, optional... |
Preprocess the image and extract face embedding.
Args:
face_analysis_model (str): Path to the FaceAnalysis model directory.
image_path (str): Path to the image file.
image_size (int, optional): Target size for resizing the image. Default is 512.
Returns:
tuple: A tuple con... | preprocess_image | python | memoavatar/memo | memo/utils/vision_utils.py | https://github.com/memoavatar/memo/blob/master/memo/utils/vision_utils.py | Apache-2.0 |
def get_video_duration(file_path):
"""Use ffmpeg to get the video duration in seconds."""
global global_counter
result = subprocess.run(["ffmpeg", "-i", file_path], stderr=subprocess.PIPE, text=True)
for line in result.stderr.split("\n"):
if "Duration" in line:
duration = line.split(... | Use ffmpeg to get the video duration in seconds. | get_video_duration | python | memoavatar/memo | scripts/calculate_durations.py | https://github.com/memoavatar/memo/blob/master/scripts/calculate_durations.py | Apache-2.0 |
def update_progress(duration):
"""Update the progress bar and count."""
nonlocal progress_count
with progress_lock:
progress_count += 1
percent = int((100 * progress_count) / total)
bar = "#" * (percent // 2)
sys.stdout.write(f"\r[{bar:<50}] {perce... | Update the progress bar and count. | update_progress | python | memoavatar/memo | scripts/calculate_durations.py | https://github.com/memoavatar/memo/blob/master/scripts/calculate_durations.py | Apache-2.0 |
def convert_audio_emb_to_vocals_path(audio_emb_path):
"""
Convert audio embedding path to the corresponding original vocals path.
"""
path_parts = Path(audio_emb_path).parts
filename = path_parts[-1]
filename_base = filename.replace(".pt", "")
new_filename = f"{filename_base}-raw_(Vocals)_Ki... |
Convert audio embedding path to the corresponding original vocals path.
| convert_audio_emb_to_vocals_path | python | memoavatar/memo | scripts/prepare_data.py | https://github.com/memoavatar/memo/blob/master/scripts/prepare_data.py | Apache-2.0 |
def extract_emotion(x):
"""
Extract emotion for a given audio segment.
"""
x = x.to(device=args.device)
x = F.layer_norm(x, x.shape).view(1, -1)
feats = emotion_model.extract_features(x)
x = feats["x"].mean(d... |
Extract emotion for a given audio segment.
| extract_emotion | python | memoavatar/memo | scripts/prepare_data.py | https://github.com/memoavatar/memo/blob/master/scripts/prepare_data.py | Apache-2.0 |
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2... |
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "Attribut... | make_closing | python | hankcs/pyhanlp | pyhanlp/util.py | https://github.com/hankcs/pyhanlp/blob/master/pyhanlp/util.py | Apache-2.0 |
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors) | Convert a string (bytestring in `encoding` or unicode), to unicode. | any2unicode | python | hankcs/pyhanlp | pyhanlp/util.py | https://github.com/hankcs/pyhanlp/blob/master/pyhanlp/util.py | Apache-2.0 |
def newline(p1, p2, color=None, marker=None):
"""
https://stackoverflow.com/questions/36470343/how-to-draw-a-line-with-matplotlib
:param p1:
:param p2:
:return:
"""
ax = plt.gca()
xmin, xmax = ax.get_xbound()
if (p2[0] == p1[0]):
xmin = xmax = p1[0]
ymin, ymax = ax.g... | ERROR: type should be string, got "\n https://stackoverflow.com/questions/36470343/how-to-draw-a-line-with-matplotlib\n :param p1:\n :param p2:\n :return:\n " | newline | python | hankcs/pyhanlp | tests/book/ch05/plot_name.py | https://github.com/hankcs/pyhanlp/blob/master/tests/book/ch05/plot_name.py | Apache-2.0 |
def estimate_mfu(self, fwdbwd_per_iter, dt):
""" estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
# first estimate the number of flops we do per iteration.
# see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
N = sum(p.numel() for p in self.... | estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS | estimate_mfu | python | DLLXW/baby-llama2-chinese | model.py | https://github.com/DLLXW/baby-llama2-chinese/blob/master/model.py | MIT |
def generate(self, idx, eos, max_new_tokens, temperature=1.0, top_k=None):
"""
Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
the sequence max_new_tokens times, feeding the predictions back into the model each time.
Most likely you'll want to make su... |
Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
the sequence max_new_tokens times, feeding the predictions back into the model each time.
Most likely you'll want to make sure to be in model.eval() mode of operation for this.
Also note this is a super... | generate | python | DLLXW/baby-llama2-chinese | model.py | https://github.com/DLLXW/baby-llama2-chinese/blob/master/model.py | MIT |
def export(self, filepath='model.bin'):
"""export the model weights in fp32 into .bin file to be read from C"""
f = open(filepath, 'wb')
def serialize(t):
d = t.detach().cpu().view(-1).numpy().astype(np.float32)
b = struct.pack(f'{len(d)}f', *d)
f.write(b)
... | export the model weights in fp32 into .bin file to be read from C | export | python | DLLXW/baby-llama2-chinese | model.py | https://github.com/DLLXW/baby-llama2-chinese/blob/master/model.py | MIT |
def convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
if token in self.special_tokens:
return self.special_tokens[token]
return self.sp_model.PieceToId(token) | Converts a token (str) in an id using the vocab. | convert_token_to_id | python | DLLXW/baby-llama2-chinese | chatglm_tokenizer/tokenization_chatglm.py | https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py | MIT |
def convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.index_special_tokens or index in [self.eos_id, self.bos_id, self.pad_id] or index < 0:
return ""
return self.sp_model.IdToPiece(index) | Converts an index (integer) in a token (str) using the vocab. | convert_id_to_token | python | DLLXW/baby-llama2-chinese | chatglm_tokenizer/tokenization_chatglm.py | https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py | MIT |
def save_vocabulary(self, save_directory, filename_prefix=None):
"""
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
... |
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Retu... | save_vocabulary | python | DLLXW/baby-llama2-chinese | chatglm_tokenizer/tokenization_chatglm.py | https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py | MIT |
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence h... |
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
to... | build_inputs_with_special_tokens | python | DLLXW/baby-llama2-chinese | chatglm_tokenizer/tokenization_chatglm.py | https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py | MIT |
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Option... |
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and opt... | _pad | python | DLLXW/baby-llama2-chinese | chatglm_tokenizer/tokenization_chatglm.py | https://github.com/DLLXW/baby-llama2-chinese/blob/master/chatglm_tokenizer/tokenization_chatglm.py | MIT |
def fill_homoglyphs():
"""
Use http://dev.networkerror.org/utf8/?start=0&end=255&cols=10&show_uni_hex=on
with the stupid table width forced to auto.
This dataset is for ASCII characters mapped to UTF-8 homoglyphs (some approximate).
Some of the entries are also selected from the results of search(),... |
Use http://dev.networkerror.org/utf8/?start=0&end=255&cols=10&show_uni_hex=on
with the stupid table width forced to auto.
This dataset is for ASCII characters mapped to UTF-8 homoglyphs (some approximate).
Some of the entries are also selected from the results of search(), below.
Forward entries s... | fill_homoglyphs | python | reinderien/mimic | mimic/__init__.py | https://github.com/reinderien/mimic/blob/master/mimic/__init__.py | MIT |
def get_writer():
"""
:return: A codec writer for stdout. Necessary for output piping to work.
"""
from codecs import getwriter
from sys import stdout
if version_info >= (3,):
return stdout
return getwriter(stdout.encoding or 'utf-8')(stdout) |
:return: A codec writer for stdout. Necessary for output piping to work.
| get_writer | python | reinderien/mimic | mimic/__init__.py | https://github.com/reinderien/mimic/blob/master/mimic/__init__.py | MIT |
def listing():
"""
Show a list of all known homoglyphs
"""
out = get_writer()
for hgs in all_hgs:
out.write(hgs.ascii + ':')
if hgs.fwd:
out.write(' fwd ')
for c in hgs.fwd:
out.write(field + c)
out.write(field)
if hgs.rev:
... |
Show a list of all known homoglyphs
| listing | python | reinderien/mimic | mimic/__init__.py | https://github.com/reinderien/mimic/blob/master/mimic/__init__.py | MIT |
def explain(char):
"""
Show an explanation of all known homoglyphs for the given ASCII char
:param char: An ASCII char to explain
"""
if char not in hg_index:
print('No homoglyphs.')
return
try:
import unicodedata
except ImportError:
print('Install docutils.'... |
Show an explanation of all known homoglyphs for the given ASCII char
:param char: An ASCII char to explain
| explain | python | reinderien/mimic | mimic/__init__.py | https://github.com/reinderien/mimic/blob/master/mimic/__init__.py | MIT |
def search():
"""
(Not useful to the user) Troll the unicode DB for normalization matches, which are potentially homoglyphs.
"""
try:
import unicodedata
except ImportError:
print('Install docutils.')
return
out = get_writer()
for point in xrange(ord('~') + 1, 0x1000... |
(Not useful to the user) Troll the unicode DB for normalization matches, which are potentially homoglyphs.
| search | python | reinderien/mimic | mimic/__init__.py | https://github.com/reinderien/mimic/blob/master/mimic/__init__.py | MIT |
def pipe(replace):
"""
Pipe from input to output
End with ctrl+C or EOF
:param replace: A function to replace each char
"""
out = get_writer()
# "for line in stdin" works for piped input but not keyboard input
while True:
try:
line = read_line()
except EOFE... |
Pipe from input to output
End with ctrl+C or EOF
:param replace: A function to replace each char
| pipe | python | reinderien/mimic | mimic/__init__.py | https://github.com/reinderien/mimic/blob/master/mimic/__init__.py | MIT |
def pipe_mimic(hardness):
"""
Pipe from input to output, replacing chars with homoglyphs
:param hardness: Percent probability to replace a char
"""
from itertools import chain
from random import random, randrange
def replace(c):
if random() > hardness / 100. or c not in hg_index:
... |
Pipe from input to output, replacing chars with homoglyphs
:param hardness: Percent probability to replace a char
| pipe_mimic | python | reinderien/mimic | mimic/__init__.py | https://github.com/reinderien/mimic/blob/master/mimic/__init__.py | MIT |
def replace_check(c):
"""
Replace non-ASCII chars with their code point
"""
if ord(c) <= ord('~'):
return c
return '<%(orig)c:U+%(point)04X>' % {
'orig': c,
'point': ord(c)
} |
Replace non-ASCII chars with their code point
| replace_check | python | reinderien/mimic | mimic/__init__.py | https://github.com/reinderien/mimic/blob/master/mimic/__init__.py | MIT |
def build(class_cfg):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
ag_type = class_cfg.WhichOneof('anchor_generator')
... | Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
| build | python | traveller59/second.pytorch | second/builder/anchor_generator_builder.py | https://github.com/traveller59/second.pytorch/blob/master/second/builder/anchor_generator_builder.py | MIT |
def build(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner,
multi_gpu=False):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
... | Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are speci... | build | python | traveller59/second.pytorch | second/builder/dataset_builder.py | https://github.com/traveller59/second.pytorch/blob/master/second/builder/dataset_builder.py | MIT |
def build(similarity_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
similarity_type = similar... | Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
| build | python | traveller59/second.pytorch | second/builder/similarity_calculator_builder.py | https://github.com/traveller59/second.pytorch/blob/master/second/builder/similarity_calculator_builder.py | MIT |
def second_box_encode(boxes,
anchors,
encode_angle_to_vector=False,
smooth_dim=False,
cylindrical=False):
"""box encode for VoxelNet in lidar
Args:
boxes ([N, 7 + ?] Tensor): normal boxes: x, y, z, w, l, h, r, custom... | box encode for VoxelNet in lidar
Args:
boxes ([N, 7 + ?] Tensor): normal boxes: x, y, z, w, l, h, r, custom values
anchors ([N, 7] Tensor): anchors
| second_box_encode | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def second_box_decode(box_encodings,
anchors,
encode_angle_to_vector=False,
smooth_dim=False):
"""box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
... | box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
| second_box_decode | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def bev_box_encode(boxes,
anchors,
encode_angle_to_vector=False,
smooth_dim=False):
"""box encode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
encode_angle_to... | box encode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
encode_angle_to_vector: bool. increase aos performance,
decrease other performance.
| bev_box_encode | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def corners_nd(dims, origin=0.5):
"""generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array,... | generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned cor... | corners_nd | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def rbbox2d_to_near_bbox(rbboxes):
"""convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
"""
rots = rbboxes[..., -1]
rots_0_pi_div_2 = np.abs(limit_period(r... | convert rotated bbox to nearest 'standing' or 'lying' bbox.
Args:
rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes
Returns:
bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes
| rbbox2d_to_near_bbox | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def rotation_2d(points, angles):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
... | rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angles (float array, shape=[N]): rotation angle.
Returns:
float array: same shape as points
| rotation_2d | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def rotation_box(box_corners, angle):
"""rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angle (float): rotation angle.
Returns:
float array: same shape as points
"""
rot... | rotation 2d points based on origin point clockwise when angle positive.
Args:
points (float array, shape=[N, point_size, 2]): points to be rotated.
angle (float): rotation angle.
Returns:
float array: same shape as points
| rotation_box | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def center_to_corner_box3d(centers,
dims,
angles=None,
origin=(0.5, 0.5, 0.5),
axis=2):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]):... | convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or ... | center_to_corner_box3d | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def center_to_corner_box2d(centers, dims, angles=None, origin=0.5):
"""convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, sh... | convert kitti locations, dimensions and angles to corners.
format: center(xy), dims(xy), angles(clockwise when positive)
Args:
centers (float array, shape=[N, 2]): locations in kitti label file.
dims (float array, shape=[N, 2]): dimensions in kitti label file.
angles (float array, s... | center_to_corner_box2d | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def create_anchors_3d_stride(feature_size,
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, np.pi / 2],
dtype=np.float... |
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
| create_anchors_3d_stride | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def iou_jit(boxes, query_boxes, eps=1.0):
"""calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes... | calculate box iou. note that jit version runs 2x faster than cython in
my machine!
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
| iou_jit | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def corner_to_surfaces_3d(corners):
"""convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
"""
# box_corners: [N, 8, 3],... | convert 3d box corners from corner function above
to surfaces that normal vectors all direct to internal.
Args:
corners (float array, [N, 8, 3]): 3d box corners.
Returns:
surfaces (float array, [N, 6, 4, 3]):
| corner_to_surfaces_3d | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_... | assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
| assign_label_to_voxel | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def image_box_region_area(img_cumsum, bbox):
"""check a 2d voxel is contained by a box. used to filter empty
anchors.
Summed-area table algorithm:
==> W
------------------
| | |
|------A---------B
| | |
| | |
|----- C---------D
Iabcd = I... | check a 2d voxel is contained by a box. used to filter empty
anchors.
Summed-area table algorithm:
==> W
------------------
| | |
|------A---------B
| | |
| | |
|----- C---------D
Iabcd = ID-IB-IC+IA
Args:
img_cumsum: [M, H, W](y... | image_box_region_area | python | traveller59/second.pytorch | second/core/box_np_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/box_np_ops.py | MIT |
def is_line_segment_intersection_jit(lines1, lines2):
"""check if line segments1 and line segments2 have cross point
Args:
lines1 (float, [N, 2, 2]): [description]
lines2 (float, [M, 2, 2]): [description]
Returns:
[type]: [description]
"""
# Return true if line seg... | check if line segments1 and line segments2 have cross point
Args:
lines1 (float, [N, 2, 2]): [description]
lines2 (float, [M, 2, 2]): [description]
Returns:
[type]: [description]
| is_line_segment_intersection_jit | python | traveller59/second.pytorch | second/core/geometry.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py | MIT |
def points_in_convex_polygon_3d_jit_v1(points,
polygon_surfaces,
num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
... | check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
... | points_in_convex_polygon_3d_jit_v1 | python | traveller59/second.pytorch | second/core/geometry.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py | MIT |
def points_count_convex_polygon_3d_jit(points,
polygon_surfaces,
num_surfaces=None):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
... | check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
... | points_count_convex_polygon_3d_jit | python | traveller59/second.pytorch | second/core/geometry.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py | MIT |
def _points_count_convex_polygon_3d_jit(points,
polygon_surfaces,
normal_vec, d,
num_surfaces=None):
"""count points in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_sur... | count points in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
... | _points_count_convex_polygon_3d_jit | python | traveller59/second.pytorch | second/core/geometry.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py | MIT |
def points_in_convex_polygon_jit(points, polygon, clockwise=True):
"""check points is in 2d convex polygons. True when point in polygon
Args:
points: [num_points, 2] array.
polygon: [num_polygon, num_points_of_polygon, 2] array.
clockwise: bool. indicate polygon is clockwise.
Returns... | check points is in 2d convex polygons. True when point in polygon
Args:
points: [num_points, 2] array.
polygon: [num_polygon, num_points_of_polygon, 2] array.
clockwise: bool. indicate polygon is clockwise.
Returns:
[num_points, num_polygon] bool array.
| points_in_convex_polygon_jit | python | traveller59/second.pytorch | second/core/geometry.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py | MIT |
def points_in_convex_polygon(points, polygon, clockwise=True):
"""check points is in convex polygons. may run 2x faster when write in
cython(don't need to calculate all cross-product between edge and point)
Args:
points: [num_points, 2] array.
polygon: [num_polygon, num_points_of_polygon, 2]... | check points is in convex polygons. may run 2x faster when write in
cython(don't need to calculate all cross-product between edge and point)
Args:
points: [num_points, 2] array.
polygon: [num_polygon, num_points_of_polygon, 2] array.
clockwise: bool. indicate polygon is clockwise.
Re... | points_in_convex_polygon | python | traveller59/second.pytorch | second/core/geometry.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/geometry.py | MIT |
def noise_per_object_v3_(gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=5,
... | random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7+?], gt box in lidar.points_transform_
points: [M, 3+], point cloud in lidar.
| noise_per_object_v3_ | python | traveller59/second.pytorch | second/core/preprocess.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/preprocess.py | MIT |
def global_translate_(gt_boxes, points, noise_translate_std):
"""
Apply global translation to gt_boxes and points.
"""
if not isinstance(noise_translate_std, (list, tuple, np.ndarray)):
noise_translate_std = np.array([noise_translate_std, noise_translate_std, noise_translate_std])
if all([e... |
Apply global translation to gt_boxes and points.
| global_translate_ | python | traveller59/second.pytorch | second/core/preprocess.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/preprocess.py | MIT |
def _compare(self, boxes1, boxes2):
"""Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
"""
boxes1_bv = box_np_ops.rbbox2d_to_n... | Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
| _compare | python | traveller59/second.pytorch | second/core/region_similarity.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/region_similarity.py | MIT |
def assign_per_class(self,
anchors_dict,
gt_boxes,
anchors_mask=None,
gt_classes=None,
gt_names=None,
importance=None):
"""this function assign target individally... | this function assign target individally for each class.
recommend for multi-class network.
| assign_per_class | python | traveller59/second.pytorch | second/core/target_assigner.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/target_assigner.py | MIT |
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of
size count)"""
if count == len(inds):
return data
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=data.dtype)
ret.fill(fill)
ret[inds] = data
else:
... | Unmap a subset of item (data) back to the original set of items (of
size count) | unmap | python | traveller59/second.pytorch | second/core/target_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/target_ops.py | MIT |
def create_target_np(all_anchors,
gt_boxes,
similarity_fn,
box_encoding_fn,
prune_anchor_fn=None,
gt_classes=None,
matched_threshold=0.6,
unmatched_threshold=0.45,
... | Modified from FAIR detectron.
Args:
all_anchors: [num_of_anchors, box_ndim] float tensor.
gt_boxes: [num_gt_boxes, box_ndim] float tensor.
similarity_fn: a function, accept anchors and gt_boxes, return
similarity matrix(such as IoU).
box_encoding_fn: a function, accept gt... | create_target_np | python | traveller59/second.pytorch | second/core/target_ops.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/target_ops.py | MIT |
def nms_gpu(dets, nms_overlap_thresh, device_id=0):
"""nms in gpu.
Args:
dets ([type]): [description]
nms_overlap_thresh ([type]): [description]
device_id ([type], optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
boxes_num = dets.... | nms in gpu.
Args:
dets ([type]): [description]
nms_overlap_thresh ([type]): [description]
device_id ([type], optional): Defaults to 0. [description]
Returns:
[type]: [description]
| nms_gpu | python | traveller59/second.pytorch | second/core/non_max_suppression/nms_gpu.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/non_max_suppression/nms_gpu.py | MIT |
def rotate_nms_gpu(dets, nms_overlap_thresh, device_id=0):
"""nms in gpu. WARNING: this function can provide right result
but its performance isn't be tested
Args:
dets ([type]): [description]
nms_overlap_thresh ([type]): [description]
device_id ([type], optional): Defaults to ... | nms in gpu. WARNING: this function can provide right result
but its performance isn't be tested
Args:
dets ([type]): [description]
nms_overlap_thresh ([type]): [description]
device_id ([type], optional): Defaults to 0. [description]
Returns:
[type]: [description]
... | rotate_nms_gpu | python | traveller59/second.pytorch | second/core/non_max_suppression/nms_gpu.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/non_max_suppression/nms_gpu.py | MIT |
def rotate_iou_gpu(boxes, query_boxes, device_id=0):
"""rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float... | rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
... | rotate_iou_gpu | python | traveller59/second.pytorch | second/core/non_max_suppression/nms_gpu.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/non_max_suppression/nms_gpu.py | MIT |
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu. 8x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
... | rotated box iou running in gpu. 8x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
... | rotate_iou_gpu_eval | python | traveller59/second.pytorch | second/core/non_max_suppression/nms_gpu.py | https://github.com/traveller59/second.pytorch/blob/master/second/core/non_max_suppression/nms_gpu.py | MIT |
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] ... | Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
| area | python | traveller59/second.pytorch | second/data/kitti_common.py | https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_common.py | MIT |
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise in... | Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
| intersection | python | traveller59/second.pytorch | second/data/kitti_common.py | https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_common.py | MIT |
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing p... | Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
| iou | python | traveller59/second.pytorch | second/data/kitti_common.py | https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_common.py | MIT |
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
... |
KITTI annotation format version 2:
{
[optional]points: [N, 3+] point cloud
[optional, for kitti]image: {
image_idx: ...
image_path: ...
image_shape: ...
}
point_cloud: {
num_features: 4
velodyne_path: ...
}
... | get_kitti_image_info | python | traveller59/second.pytorch | second/data/kitti_common.py | https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_common.py | MIT |
def evaluation(self, detections, output_dir):
"""
detection
When you want to eval your own dataset, you MUST set correct
the z axis and box z center.
If you want to eval by my KITTI eval function, you must
provide the correct format annotations.
ground_truth_anno... |
detection
When you want to eval your own dataset, you MUST set correct
the z axis and box z center.
If you want to eval by my KITTI eval function, you must
provide the correct format annotations.
ground_truth_annotations format:
{
bbox: [N, 4], if yo... | evaluation | python | traveller59/second.pytorch | second/data/kitti_dataset.py | https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_dataset.py | MIT |
def convert_to_kitti_info_version2(info):
"""convert kitti info v1 to v2 if possible.
"""
if "image" not in info or "calib" not in info or "point_cloud" not in info:
info["image"] = {
'image_shape': info["img_shape"],
'image_idx': info['image_idx'],
'image_path': ... | convert kitti info v1 to v2 if possible.
| convert_to_kitti_info_version2 | python | traveller59/second.pytorch | second/data/kitti_dataset.py | https://github.com/traveller59/second.pytorch/blob/master/second/data/kitti_dataset.py | MIT |
def evaluation_kitti(self, detections, output_dir):
"""eval by kitti evaluation tool.
I use num_lidar_pts to set easy, mod, hard.
easy: num>15, mod: num>7, hard: num>0.
"""
print("++++++++NuScenes KITTI unofficial Evaluation:")
print(
"++++++++easy: num_lidar_... | eval by kitti evaluation tool.
I use num_lidar_pts to set easy, mod, hard.
easy: num>15, mod: num>7, hard: num>0.
| evaluation_kitti | python | traveller59/second.pytorch | second/data/nuscenes_dataset.py | https://github.com/traveller59/second.pytorch/blob/master/second/data/nuscenes_dataset.py | MIT |
def evaluation(self, detections, output_dir):
"""kitti evaluation is very slow, remove it.
"""
# res_kitti = self.evaluation_kitti(detections, output_dir)
res_nusc = self.evaluation_nusc(detections, output_dir)
res = {
"results": {
"nusc": res_nusc["re... | kitti evaluation is very slow, remove it.
| evaluation | python | traveller59/second.pytorch | second/data/nuscenes_dataset.py | https://github.com/traveller59/second.pytorch/blob/master/second/data/nuscenes_dataset.py | MIT |
def prep_pointcloud(input_dict,
root_path,
voxel_generator,
target_assigner,
db_sampler=None,
max_voxels=20000,
remove_outside_points=False,
training=True,
crea... | convert point cloud to voxels, create targets if ground truths
exists.
input_dict format: dataset.get_sensor_data format
| prep_pointcloud | python | traveller59/second.pytorch | second/data/preprocess.py | https://github.com/traveller59/second.pytorch/blob/master/second/data/preprocess.py | MIT |
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._G... | Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
| assertAllEqual | python | traveller59/second.pytorch | second/framework/test.py | https://github.com/traveller59/second.pytorch/blob/master/second/framework/test.py | MIT |
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` i... | Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be... | assertAllClose | python | traveller59/second.pytorch | second/framework/test.py | https://github.com/traveller59/second.pytorch/blob/master/second/framework/test.py | MIT |
def onColorPicker(self):
'''
Show color-picker dialog to select color.
Qt will use the native dialog by default.
'''
dlg = QColorDialog(self)
if self._color:
dlg.setCurrentColor(QColor(self._color))
if dlg.exec_():
# self.setColor(dlg.cu... |
Show color-picker dialog to select color.
Qt will use the native dialog by default.
| onColorPicker | python | traveller59/second.pytorch | second/kittiviewer/control_panel.py | https://github.com/traveller59/second.pytorch/blob/master/second/kittiviewer/control_panel.py | MIT |
def train(config_path,
model_dir,
result_path=None,
create_folder=False,
display_step=50,
summary_step=5,
pretrained_path=None,
pretrained_include=None,
pretrained_exclude=None,
freeze_include=None,
freeze_exclude=None,
... | train a VoxelNet model specified by a config file.
| train | python | traveller59/second.pytorch | second/pytorch/train.py | https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/train.py | MIT |
def evaluate(config_path,
model_dir=None,
result_path=None,
ckpt_path=None,
measure_time=False,
batch_size=None,
**kwargs):
"""Don't support pickle_result anymore. if you want to generate kitti label file,
please use kitti_anno_to_lab... | Don't support pickle_result anymore. if you want to generate kitti label file,
please use kitti_anno_to_label_file and convert_detection_to_kitti_annos
in second.data.kitti_dataset.
| evaluate | python | traveller59/second.pytorch | second/pytorch/train.py | https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/train.py | MIT |
def helper_tune_target_assigner(config_path, target_rate=None, update_freq=200, update_delta=0.01, num_tune_epoch=5):
"""get information of target assign to tune thresholds in anchor generator.
"""
if isinstance(config_path, str):
# directly provide a config object. this usually used
# w... | get information of target assign to tune thresholds in anchor generator.
| helper_tune_target_assigner | python | traveller59/second.pytorch | second/pytorch/train.py | https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/train.py | MIT |
def build(loss_config):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization... | Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A losses_pb2.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classificat... | build | python | traveller59/second.pytorch | second/pytorch/builder/losses_builder.py | https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/losses_builder.py | MIT |
def build_faster_rcnn_classification_loss(loss_config):
"""Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(los... | Builds a classification loss for Faster RCNN based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
| build_faster_rcnn_classification_loss | python | traveller59/second.pytorch | second/pytorch/builder/losses_builder.py | https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/losses_builder.py | MIT |
def _build_localization_loss(loss_config):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.Localization... | Builds a localization loss based on the loss config.
Args:
loss_config: A losses_pb2.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
| _build_localization_loss | python | traveller59/second.pytorch | second/pytorch/builder/losses_builder.py | https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/losses_builder.py | MIT |
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if not isinstance(loss_config, losses_pb2.Classi... | Builds a classification loss based on the loss config.
Args:
loss_config: A losses_pb2.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
| _build_classification_loss | python | traveller59/second.pytorch | second/pytorch/builder/losses_builder.py | https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/losses_builder.py | MIT |
def build(optimizer_config, optimizer, total_step):
"""Create lr scheduler based on config. note that
lr_scheduler must accept a optimizer that has been restored.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: wh... | Create lr scheduler based on config. note that
lr_scheduler must accept a optimizer that has been restored.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
| build | python | traveller59/second.pytorch | second/pytorch/builder/lr_scheduler_builder.py | https://github.com/traveller59/second.pytorch/blob/master/second/pytorch/builder/lr_scheduler_builder.py | MIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.