code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def accumulate(self): """ Accumulate metric results and calculate mAP """ mAP = 0. valid_cnt = 0 eval_results = [] for score_pos, count in zip(self.class_score_poss, self.class_gt_counts): if count == 0: continue ...
Accumulate metric results and calculate mAP
accumulate
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py
Apache-2.0
def _get_tp_fp_accum(self, score_pos_list): """ Calculate accumulating true/false positive results from [score, pos] records """ sorted_list = sorted(score_pos_list, key=lambda s: s[0], reverse=True) accum_tp = 0 accum_fp = 0 accum_tp_list = [] acc...
Calculate accumulating true/false positive results from [score, pos] records
_get_tp_fp_accum
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py
Apache-2.0
def ap_per_class(tp, conf, pred_cls, target_cls): """ Computes the average precision, given the recall and precision curves. Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics. Args: tp (list): True positives. conf (list): Objectness value from 0-1. ...
Computes the average precision, given the recall and precision curves. Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics. Args: tp (list): True positives. conf (list): Objectness value from 0-1. pred_cls (list): Predicted object classes. t...
ap_per_class
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py
Apache-2.0
def compute_ap(recall, precision): """ Computes the average precision, given the recall and precision curves. Code originally from https://github.com/rbgirshick/py-faster-rcnn. Args: recall (list): The recall curve. precision (list): The precision curve. Returns: The av...
Computes the average precision, given the recall and precision curves. Code originally from https://github.com/rbgirshick/py-faster-rcnn. Args: recall (list): The recall curve. precision (list): The precision curve. Returns: The average precision as computed in py-faster-r...
compute_ap
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/metrics/map_utils.py
Apache-2.0
def __init__(self, width, num_joints, backbone='HRNet', loss='KeyPointMSELoss', post_process='HRNetPostProcess', flip_perm=None, flip=True, shift_heatmap=True, use_dar...
HRNet network, see https://arxiv.org/abs/1902.09212 Args: backbone (nn.Layer): backbone instance post_process (object): `HRNetPostProcess` instance flip_perm (list): The left-right joints exchange order list use_dark(bool): Whether to use DARK in post pr...
__init__
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py
Apache-2.0
def get_max_preds(self, heatmaps): '''get predictions from score maps Args: heatmaps: numpy.ndarray([batch_size, num_joints, height, width]) Returns: preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords maxvals: numpy.ndarray([batch_size, num_...
get predictions from score maps Args: heatmaps: numpy.ndarray([batch_size, num_joints, height, width]) Returns: preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords maxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the key...
get_max_preds
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py
Apache-2.0
def dark_postprocess(self, hm, coords, kernelsize): '''DARK postpocessing, Zhang et al. Distribution-Aware Coordinate Representation for Human Pose Estimation (CVPR 2020). ''' hm = self.gaussian_blur(hm, kernelsize) hm = np.maximum(hm, 1e-10) hm = np.log(hm) for ...
DARK postpocessing, Zhang et al. Distribution-Aware Coordinate Representation for Human Pose Estimation (CVPR 2020).
dark_postprocess
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py
Apache-2.0
def get_final_preds(self, heatmaps, center, scale, kernelsize=3): """the highest heatvalue location with a quarter offset in the direction from the highest response to the second highest response. Args: heatmaps (numpy.ndarray): The predicted heatmaps center (numpy.ndarr...
the highest heatvalue location with a quarter offset in the direction from the highest response to the second highest response. Args: heatmaps (numpy.ndarray): The predicted heatmaps center (numpy.ndarray): The boxes center scale (numpy.ndarray): The scale factor ...
get_final_preds
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/keypoint_hrnet.py
Apache-2.0
def __init__(self, use_target_weight=True, loss_scale=0.5): """ KeyPointMSELoss layer Args: use_target_weight (bool): whether to use target weight """ super(KeyPointMSELoss, self).__init__() self.criterion = nn.MSELoss(reduction='mean') self.use_targe...
KeyPointMSELoss layer Args: use_target_weight (bool): whether to use target weight
__init__
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/models/loss.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/models/loss.py
Apache-2.0
def check_gpu(use_gpu): """ Log error and exit when set use_gpu=true in paddlepaddle cpu version. """ err = "Config use_gpu cannot be set as true while you are " \ "using paddlepaddle cpu version ! \nPlease try: \n" \ "\t1. Install paddlepaddle-gpu to run model on GPU \n" \ ...
Log error and exit when set use_gpu=true in paddlepaddle cpu version.
check_gpu
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py
Apache-2.0
def check_version(version='2.0'): """ Log error and exit when the installed version of paddlepaddle is not satisfied. """ err = "PaddlePaddle version {} or higher is required, " \ "or a suitable develop version is satisfied as well. \n" \ "Please make sure the version is good wit...
Log error and exit when the installed version of paddlepaddle is not satisfied.
check_version
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py
Apache-2.0
def check_config(cfg): """ Check the correctness of the configuration file. Log error and exit when Config is not compliant. """ err = "'{}' not specified in config file. Please set it in config file." check_list = ['architecture', 'num_classes'] try: for var in check_list: ...
Check the correctness of the configuration file. Log error and exit when Config is not compliant.
check_config
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/check.py
Apache-2.0
def is_url(path): """ Whether path is URL. Args: path (string): URL string or not. """ return path.startswith('http://') \ or path.startswith('https://') \ or path.startswith('ppdet://')
Whether path is URL. Args: path (string): URL string or not.
is_url
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py
Apache-2.0
def match_state_dict(model_state_dict, weight_state_dict): """ Match between the model state dict and pretrained weight state dict. Return the matched state dict. The method supposes that all the names in pretrained weight state dict are subclass of the names in models`, if the prefix 'backbone.' i...
Match between the model state dict and pretrained weight state dict. Return the matched state dict. The method supposes that all the names in pretrained weight state dict are subclass of the names in models`, if the prefix 'backbone.' in pretrained weight keys is stripped. And we could get the can...
match_state_dict
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py
Apache-2.0
def save_model(model, optimizer, save_dir, save_name, last_epoch): """ save model into disk. Args: model (paddle.nn.Layer): the Layer instalce to save parameters. optimizer (paddle.optimizer.Optimizer): the Optimizer instance to save optimizer states. save_dir (str): the...
save model into disk. Args: model (paddle.nn.Layer): the Layer instalce to save parameters. optimizer (paddle.optimizer.Optimizer): the Optimizer instance to save optimizer states. save_dir (str): the directory to be saved. save_name (str): the path to be saved. ...
save_model
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/checkpoint.py
Apache-2.0
def get_weights_path(url): """Get weights path from WEIGHTS_HOME, if not exists, download it from url. """ url = parse_url(url) path, _ = get_path(url, WEIGHTS_HOME) return path
Get weights path from WEIGHTS_HOME, if not exists, download it from url.
get_weights_path
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
Apache-2.0
def get_config_path(url): """Get weights path from CONFIGS_HOME, if not exists, download it from url. """ url = parse_url(url) path = map_path(url, CONFIGS_HOME, path_depth=2) if os.path.isfile(path): return path # config file not found, try download # 1. clear configs directory...
Get weights path from CONFIGS_HOME, if not exists, download it from url.
get_config_path
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
Apache-2.0
def get_dataset_path(path, annotation, image_dir): """ If path exists, return path. Otherwise, get dataset path from DATASET_HOME, if not exists, download it. """ if _dataset_exists(path, annotation, image_dir): return path logger.info( "Dataset {} is not valid for reason ab...
If path exists, return path. Otherwise, get dataset path from DATASET_HOME, if not exists, download it.
get_dataset_path
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
Apache-2.0
def get_path(url, root_dir, md5sum=None, check_exist=True): """ Download from given url to root_dir. if file or directory specified by url is exists under root_dir, return the path directly, otherwise download from url and decompress it, return the path. url (str): download url root_dir (str): ...
Download from given url to root_dir. if file or directory specified by url is exists under root_dir, return the path directly, otherwise download from url and decompress it, return the path. url (str): download url root_dir (str): root dir for downloading, it should be WEIGHTS_...
get_path
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
Apache-2.0
def _dataset_exists(path, annotation, image_dir): """ Check if user define dataset exists """ if not osp.exists(path): logger.warning("Config dataset_dir {} is not exits, " "dataset config is not valid".format(path)) return False if annotation: annotat...
Check if user define dataset exists
_dataset_exists
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
Apache-2.0
def _download(url, path, md5sum=None): """ Download from url, save to path. url (str): download url path (str): download to given path """ if not osp.exists(path): os.makedirs(path) fname = osp.split(url)[-1] fullname = osp.join(path, fname) retry_cnt = 0 while not (os...
Download from url, save to path. url (str): download url path (str): download to given path
_download
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
Apache-2.0
def _decompress(fname): """ Decompress for zip and tar file """ logger.info("Decompressing {}...".format(fname)) # For protecting decompressing interupted, # decompress to fpath_tmp directory firstly, if decompress # successed, move decompress files to fpath and delete # fpath_tmp and r...
Decompress for zip and tar file
_decompress
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
Apache-2.0
def _move_and_merge_tree(src, dst): """ Move src directory to dst, if dst is already exists, merge src to dst """ if not osp.exists(dst): shutil.move(src, dst) elif osp.isfile(src): shutil.move(src, dst) else: for fp in os.listdir(src): src_fp = osp.join(s...
Move src directory to dst, if dst is already exists, merge src to dst
_move_and_merge_tree
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/download.py
Apache-2.0
def get_affine_transform(center, input_size, rot, output_size, shift=(0., 0.), inv=False): """Get the affine transform matrix, given the center/scale/rot/output_size. Args: cente...
Get the affine transform matrix, given the center/scale/rot/output_size. Args: center (np.ndarray[2, ]): Center of the bounding box (x, y). input_size (np.ndarray[2, ]): Size of input feature (width, height). rot (float): Rotation angle (degree). output_size (np.ndarray[2, ]): Size ...
get_affine_transform
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
Apache-2.0
def get_warp_matrix(theta, size_input, size_dst, size_target): """Calculate the transformation matrix under the constraint of unbiased. Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: theta (float): Rotation...
Calculate the transformation matrix under the constraint of unbiased. Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: theta (float): Rotation angle in degrees. size_input (np.ndarray): Size of input imag...
get_warp_matrix
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
Apache-2.0
def _get_3rd_point(a, b): """To calculate the affine matrix, three pairs of points are required. This function is used to get the 3rd point, given 2D points a & b. The 3rd point is defined by rotating vector `a - b` by 90 degrees anticlockwise, using b as the rotation center. Args: a (np.n...
To calculate the affine matrix, three pairs of points are required. This function is used to get the 3rd point, given 2D points a & b. The 3rd point is defined by rotating vector `a - b` by 90 degrees anticlockwise, using b as the rotation center. Args: a (np.ndarray): point(x,y) b (np...
_get_3rd_point
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
Apache-2.0
def rotate_point(pt, angle_rad): """Rotate a point by an angle. Args: pt (list[float]): 2 dimensional point to be rotated angle_rad (float): rotation angle by radian Returns: list[float]: Rotated point. """ assert len(pt) == 2 sn, cs = np.sin(angle_rad), np.cos(angle_ra...
Rotate a point by an angle. Args: pt (list[float]): 2 dimensional point to be rotated angle_rad (float): rotation angle by radian Returns: list[float]: Rotated point.
rotate_point
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
Apache-2.0
def warp_affine_joints(joints, mat): """Apply affine transformation defined by the transform matrix on the joints. Args: joints (np.ndarray[..., 2]): Origin coordinate of joints. mat (np.ndarray[3, 2]): The affine matrix. Returns: matrix (np.ndarray[..., 2]): Result coordinate ...
Apply affine transformation defined by the transform matrix on the joints. Args: joints (np.ndarray[..., 2]): Origin coordinate of joints. mat (np.ndarray[3, 2]): The affine matrix. Returns: matrix (np.ndarray[..., 2]): Result coordinate of joints.
warp_affine_joints
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
Apache-2.0
def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None): """greedily select boxes with high confidence and overlap with current maximum <= thresh rule out overlap >= thresh Args: kpts_db (list): The predicted keypoints within the image thresh (float): The threshold to select the boxes ...
greedily select boxes with high confidence and overlap with current maximum <= thresh rule out overlap >= thresh Args: kpts_db (list): The predicted keypoints within the image thresh (float): The threshold to select the boxes sigmas (np.array): The variance to calculate the oks iou ...
oks_nms
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
Apache-2.0
def soft_oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None): """greedily select boxes with high confidence and overlap with current maximum <= thresh rule out overlap >= thresh Args: kpts_db (list): The predicted keypoints within the image thresh (float): The threshold to select the bo...
greedily select boxes with high confidence and overlap with current maximum <= thresh rule out overlap >= thresh Args: kpts_db (list): The predicted keypoints within the image thresh (float): The threshold to select the boxes sigmas (np.array): The variance to calculate the oks iou ...
soft_oks_nms
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/keypoint_utils.py
Apache-2.0
def setup_logger(name="ppdet", output=None): """ Initialize logger and set its verbosity level to INFO. Args: output (str): a file name or a directory to save log. If None, will not save log file. If ends with ".txt" or ".log", assumed to be a file name. Otherwise, logs will ...
Initialize logger and set its verbosity level to INFO. Args: output (str): a file name or a directory to save log. If None, will not save log file. If ends with ".txt" or ".log", assumed to be a file name. Otherwise, logs will be saved to `output/log.txt`. name (str): th...
setup_logger
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/logger.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/logger.py
Apache-2.0
def colormap(rgb=False): """ Get colormap The code of this function is copied from https://github.com/facebookresearch/Detectron/blob/main/detectron/utils/colormap.py """ color_list = np.array([ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.46...
Get colormap The code of this function is copied from https://github.com/facebookresearch/Detectron/blob/main/detectron/utils/colormap.py
colormap
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/visualizer.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/visualizer.py
Apache-2.0
def load_config(file_path): """ Load config from file. Args: file_path (str): Path of the config file to be loaded. Returns: global config """ _, ext = os.path.splitext(file_path) assert ext in ['.yml', '.yaml'], "only support yaml files for now" # load config from file and me...
Load config from file. Args: file_path (str): Path of the config file to be loaded. Returns: global config
load_config
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
Apache-2.0
def dict_merge(dct, merge_dct): """ Recursive dict merge. Inspired by :meth:``dict.update()``, instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. The ``merge_dct`` is merged into ``dct``. Args: dct: dict onto which the m...
Recursive dict merge. Inspired by :meth:``dict.update()``, instead of updating only top-level keys, dict_merge recurses down into dicts nested to an arbitrary depth, updating keys. The ``merge_dct`` is merged into ``dct``. Args: dct: dict onto which the merge is executed merge_dct: dct...
dict_merge
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
Apache-2.0
def merge_config(config, another_cfg=None): """ Merge config into global config or another_cfg. Args: config (dict): Config to be merged. Returns: global config """ global global_config dct = another_cfg or global_config return dict_merge(dct, config)
Merge config into global config or another_cfg. Args: config (dict): Config to be merged. Returns: global config
merge_config
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
Apache-2.0
def register(cls): """ Register a given module class. Args: cls (type): Module class to be registered. Returns: cls """ if cls.__name__ in global_config: raise ValueError("Module class already registered: {}".format( cls.__name__)) if hasattr(cls, '__op__'): ...
Register a given module class. Args: cls (type): Module class to be registered. Returns: cls
register
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
Apache-2.0
def create(cls_or_name, **kwargs): """ Create an instance of given module class. Args: cls_or_name (type or str): Class of which to create instance. Returns: instance of type `cls_or_name` """ assert type(cls_or_name) in [type, str ], "should be a class...
Create an instance of given module class. Args: cls_or_name (type or str): Class of which to create instance. Returns: instance of type `cls_or_name`
create
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/workspace.py
Apache-2.0
def extract_schema(cls): """ Extract schema from a given class Args: cls (type): Class from which to extract. Returns: schema (SchemaDict): Extracted schema. """ ctor = cls.__init__ # python 2 compatibility if hasattr(inspect, 'getfullargspec'): argspec = inspec...
Extract schema from a given class Args: cls (type): Class from which to extract. Returns: schema (SchemaDict): Extracted schema.
extract_schema
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/config/schema.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/config/schema.py
Apache-2.0
def serializable(cls): """ Add loader and dumper for given class, which must be "trivially serializable" Args: cls: class to be serialized Returns: cls """ yaml.add_constructor(u'!{}'.format(cls.__name__), _make_python_constructor(cls)) yaml.add_represe...
Add loader and dumper for given class, which must be "trivially serializable" Args: cls: class to be serialized Returns: cls
serializable
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/lib/utils/config/yaml_helpers.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/lib/utils/config/yaml_helpers.py
Apache-2.0
def get_test_images(infer_dir, infer_img): """ Get image path list in TEST mode """ assert infer_img is not None or infer_dir is not None, \ "--infer_img or --infer_dir should be set" assert infer_img is None or os.path.isfile(infer_img), \ "{} is not a file".format(infer_img) ...
Get image path list in TEST mode
get_test_images
python
PaddlePaddle/models
tutorials/pp-series/HRNet-Keypoint/tools/infer.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/pp-series/HRNet-Keypoint/tools/infer.py
Apache-2.0
def get_args(add_help=True): """get_args Parse all args using argparse lib Args: add_help: Whether to add -h option on args Returns: An object which contains many parameters used for inference. """ import argparse parser = argparse.ArgumentParser( description='Padd...
get_args Parse all args using argparse lib Args: add_help: Whether to add -h option on args Returns: An object which contains many parameters used for inference.
get_args
python
PaddlePaddle/models
tutorials/tipc/train_infer_python/template/code/export_model.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/tipc/train_infer_python/template/code/export_model.py
Apache-2.0
def export(args): """export export inference model using jit.save Args: args: Parameters generated using argparser. Returns: None """ model = build_model(args) # decorate model with jit.save model = paddle.jit.to_static( model, input_spec=[ InputSp...
export export inference model using jit.save Args: args: Parameters generated using argparser. Returns: None
export
python
PaddlePaddle/models
tutorials/tipc/train_infer_python/template/code/export_model.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/tipc/train_infer_python/template/code/export_model.py
Apache-2.0
def infer_main(args): """infer_main Main inference function. Args: args: Parameters generated using argparser. Returns: class_id: Class index of the input. prob: : Probability of the input. """ # init inference engine inference_engine = InferenceEngine(args) #...
infer_main Main inference function. Args: args: Parameters generated using argparser. Returns: class_id: Class index of the input. prob: : Probability of the input.
infer_main
python
PaddlePaddle/models
tutorials/tipc/train_infer_python/template/code/infer.py
https://github.com/PaddlePaddle/models/blob/master/tutorials/tipc/train_infer_python/template/code/infer.py
Apache-2.0
def pytest_configure(config): """Pytest configuration hook to help reproduce test segfaults Sets and outputs rng seeds. The segfault-debug procedure on a module called test_module.py is: 1. run "pytest --verbose test_module.py". A seg-faulting output might be: [INFO] np, mx and python random...
Pytest configuration hook to help reproduce test segfaults Sets and outputs rng seeds. The segfault-debug procedure on a module called test_module.py is: 1. run "pytest --verbose test_module.py". A seg-faulting output might be: [INFO] np, mx and python random seeds = 4018804151 test_modul...
pytest_configure
python
dmlc/gluon-nlp
conftest.py
https://github.com/dmlc/gluon-nlp/blob/master/conftest.py
Apache-2.0
def pytest_runtest_makereport(item, call): """Make test outcome available to fixture. https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures """ # execute all other hooks to obtain the report object outcome = yield rep = outcome.get_result() ...
Make test outcome available to fixture. https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
pytest_runtest_makereport
python
dmlc/gluon-nlp
conftest.py
https://github.com/dmlc/gluon-nlp/blob/master/conftest.py
Apache-2.0
def function_scope_seed(request): """A function scope fixture that manages rng seeds. This fixture automatically initializes the python, numpy and mxnet random number generators randomly on every test run. def test_ok_with_random_data(): ... To fix the seed used for a test case mark the t...
A function scope fixture that manages rng seeds. This fixture automatically initializes the python, numpy and mxnet random number generators randomly on every test run. def test_ok_with_random_data(): ... To fix the seed used for a test case mark the test function with the desired seed: ...
function_scope_seed
python
dmlc/gluon-nlp
conftest.py
https://github.com/dmlc/gluon-nlp/blob/master/conftest.py
Apache-2.0
def predict_extended(original_feature, chunked_features, results, n_best_size, max_answer_length=64, start_top_n=5, end_top_n=5): """Get prediction results for SQuAD. Start Logits: (B, ...
Get prediction results for SQuAD. Start Logits: (B, N_start) End Logits: (B, N_start, N_end) Parameters ---------- original_feature: The original SquadFeature before chunked chunked_features List of ChunkFeatures results List of model predictions for span start and ...
predict_extended
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def get_end_logits(self, contextual_embedding, start_positions, p_mask): """ Parameters ---------- contextual_embedding Shape (batch_size, sequence_length, C) start_positions Shape (batch_size, N) We process multiple candidates simultaneously ...
Parameters ---------- contextual_embedding Shape (batch_size, sequence_length, C) start_positions Shape (batch_size, N) We process multiple candidates simultaneously p_mask Shape (batch_size, sequence_length) Returns ...
get_end_logits
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def get_answerable_logits(self, contextual_embedding, p_mask): """Get the answerable logits. Parameters ---------- contextual_embedding Shape (batch_size, sequence_length, C) p_mask Shape (batch_size, sequence_length) Mask the sequence. ...
Get the answerable logits. Parameters ---------- contextual_embedding Shape (batch_size, sequence_length, C) p_mask Shape (batch_size, sequence_length) Mask the sequence. 0 --> Denote that the element is masked, 1 --> Denote th...
get_answerable_logits
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def forward(self, tokens, token_types, valid_length, p_mask, start_position): """ Parameters ---------- tokens Shape (batch_size, sequence_length) token_types Shape (batch_size, sequence_length) valid_length Shape (batch_size,) ...
Parameters ---------- tokens Shape (batch_size, sequence_length) token_types Shape (batch_size, sequence_length) valid_length Shape (batch_size,) p_mask Shape (batch_size, sequence_length) start_position ...
forward
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def inference(self, tokens, token_types, valid_length, p_mask, start_top_n: int = 5, end_top_n: int = 5): """Get the inference result with beam search Parameters ---------- tokens The input tokens. Shape (batch_size, sequence_length) token_types ...
Get the inference result with beam search Parameters ---------- tokens The input tokens. Shape (batch_size, sequence_length) token_types The input token types. Shape (batch_size, sequence_length) valid_length The valid length of the tokens. Sh...
inference
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def get_chunks(self, doc_stride, max_chunk_length=None): """Get a sequence of chunks for the squad feature. In reality, the document will be too long for the NLP model, and we will split it into multiple chunks. For example, consider the following Doc: the man went to the store...
Get a sequence of chunks for the squad feature. In reality, the document will be too long for the NLP model, and we will split it into multiple chunks. For example, consider the following Doc: the man went to the store and bought a gallon of milk We may divide it into four chu...
get_chunks
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace. This is from the official evaluate-v2.0.py in SQuAD. """ def remove_articles(text): regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) return re.sub(regex, ' ', text) def white_space_fix(te...
Lower text and remove punctuation, articles and extra whitespace. This is from the official evaluate-v2.0.py in SQuAD.
normalize_answer
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def get_squad_examples_from_json(json_file: str, is_training: bool) -> List[SquadExample]: """ Read the whole entry of raw json file and convert it to examples. Parameters ---------- json_file The path to the json file is_training Whether or not training Returns -------...
Read the whole entry of raw json file and convert it to examples. Parameters ---------- json_file The path to the json file is_training Whether or not training Returns ------- ret List of SquadExample objects
get_squad_examples_from_json
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def convert_squad_example_to_feature(example: SquadExample, tokenizer: BaseTokenizerWithVocab, is_training: bool): """ Convert a SquadExample object to a SquadFeature object with the designated tokenizer. There are accually few examp...
Convert a SquadExample object to a SquadFeature object with the designated tokenizer. There are accually few examples can not be converted properly with token level tokenization, due to the ground-truth are given by the start position and the answer text, and some examples are annotated with wrong lab...
convert_squad_example_to_feature
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def __init__(self, tokenizer, doc_stride, max_seq_length, max_query_length): """ Parameters ---------- tokenizer The tokenizer doc_stride The stride to chunk the document max_seq_length Maximum length of the merged data max_que...
Parameters ---------- tokenizer The tokenizer doc_stride The stride to chunk the document max_seq_length Maximum length of the merged data max_query_length Maximum query length
__init__
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def process_sample(self, feature: SquadFeature): """Process the data to the following format. Note that we mask all the special tokens except the CLS token. The reason for not masking the CLS token is that if the question is not answerable, we will set the start and end to be 0. ...
Process the data to the following format. Note that we mask all the special tokens except the CLS token. The reason for not masking the CLS token is that if the question is not answerable, we will set the start and end to be 0. Merged: <CLS> Question <SEP> Context <SEP> S...
process_sample
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def get_train(self, features, skip_unreliable=True): """Get the training dataset Parameters ---------- features skip_unreliable Whether to skip the unreliable spans in the training set Returns ------- train_dataset num_token_answer_mi...
Get the training dataset Parameters ---------- features skip_unreliable Whether to skip the unreliable spans in the training set Returns ------- train_dataset num_token_answer_mismatch num_unreliable
get_train
python
dmlc/gluon-nlp
docs/tutorials/question_answering/squad_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/docs/tutorials/question_answering/squad_utils.py
Apache-2.0
def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]: """ This function wraps another function into its own separated process. In order to ensure accurate memory measurements it is important that the function is executed in a separate pro...
This function wraps another function into its own separated process. In order to ensure accurate memory measurements it is important that the function is executed in a separate process Args: - `func`: (`callable`): function() -> ... generic function which wi...
separate_process_wrapper_fn
python
dmlc/gluon-nlp
scripts/benchmarks/benchmark_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py
Apache-2.0
def get_cpu_memory(process_id: int) -> int: """ measures current cpu memory usage of a given `process_id` Args: - `process_id`: (`int`) process_id for which to measure memory Returns - `memory`: (`int`) cosumed memory in Bytes """...
measures current cpu memory usage of a given `process_id` Args: - `process_id`: (`int`) process_id for which to measure memory Returns - `memory`: (`int`) cosumed memory in Bytes
get_cpu_memory
python
dmlc/gluon-nlp
scripts/benchmarks/benchmark_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py
Apache-2.0
def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int: """ measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and at most 20 * interval seconds. This function is heavily inspired by: ...
measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package `memory_profiler`: https://github.com/pythonprofilers/memory_profi...
measure_peak_memory_cpu
python
dmlc/gluon-nlp
scripts/benchmarks/benchmark_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py
Apache-2.0
def traceit(frame, event, args): """ Tracing method executed before running each line in a module or sub-module Record memory allocated in a list with debugging information """ global _is_memory_tracing_enabled if not _is_memory_tracing_enabled: return traceit ...
Tracing method executed before running each line in a module or sub-module Record memory allocated in a list with debugging information
traceit
python
dmlc/gluon-nlp
scripts/benchmarks/benchmark_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py
Apache-2.0
def stop_memory_tracing( memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True ) -> Optional[MemorySummary]: """ Stop memory tracing cleanly and return a summary of the memory trace if a trace is given. Args: - `memory_trace` (optional output of start_memory_tracin...
Stop memory tracing cleanly and return a summary of the memory trace if a trace is given. Args: - `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary - `ignore_released_memory` (boolean, default: None): if True we only sum memory ...
stop_memory_tracing
python
dmlc/gluon-nlp
scripts/benchmarks/benchmark_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py
Apache-2.0
def __init__(self, workloads, model_names, use_fp16=False, repeat=3, use_gpu=True, device_idx=0, profile_inference=True, profile_train=True, env_print=True, to_csv=False, use_tvm=False, ...
Parameters ---------- workloads List of workloads to profile model_names List of model names to profile use_fp16 Whether to use fp16 repeat The number of repeat use_gpu Whether to use GPU device...
__init__
python
dmlc/gluon-nlp
scripts/benchmarks/benchmark_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/benchmarks/benchmark_utils.py
Apache-2.0
def get_network(model_name, ctx_l, checkpoint_path=None, backbone_path=None, task=None): """ Get the network that fine-tune the Question Answering Task """ use_segmentation = 'roberta' not in model_name and 'xlmr' not in model_name Mod...
Get the network that fine-tune the Question Answering Task
get_network
python
dmlc/gluon-nlp
scripts/classification/train_classification.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/classification/train_classification.py
Apache-2.0
def convert_tf_assets(tf_assets_dir, model_size, electra_path): """Convert the assets file including config, vocab and tokenizer model""" file_names = os.listdir(tf_assets_dir) vocab_path = None for ele in file_names: if ele.endswith('.txt'): assert vocab_path is None voc...
Convert the assets file including config, vocab and tokenizer model
convert_tf_assets
python
dmlc/gluon-nlp
scripts/conversion_toolkits/convert_electra.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_electra.py
Apache-2.0
def get_name_map(tf_names, convert_type='backbone'): """ Get the converting mapping between tensor names and mxnet names. The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert, but there is no guarantee that it can match to other tf models in case of some sepecial variable_scope (...
Get the converting mapping between tensor names and mxnet names. The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert, but there is no guarantee that it can match to other tf models in case of some sepecial variable_scope (tensorflow) and prefix (mxnet). Redefined mapping is en...
get_name_map
python
dmlc/gluon-nlp
scripts/conversion_toolkits/convert_electra.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_electra.py
Apache-2.0
def convert_qkv_weights(tf_prefix, mx_prefix): """ To convert the qkv weights with different prefix. In tensorflow framework, the prefix of query/key/value for the albert model is 'bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel', ...
To convert the qkv weights with different prefix. In tensorflow framework, the prefix of query/key/value for the albert model is 'bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel', and that for the bert model is 'bert/encoder/layer_{}/attenti...
convert_qkv_weights
python
dmlc/gluon-nlp
scripts/conversion_toolkits/convert_electra.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_electra.py
Apache-2.0
def convert_tf_assets(tf_assets_dir): """Convert the assets file including config, vocab and tokenizer model""" file_names = os.listdir(tf_assets_dir) vocab_path = None json_cfg_path = None for ele in file_names: if ele.endswith('.txt'): assert vocab_path is None voca...
Convert the assets file including config, vocab and tokenizer model
convert_tf_assets
python
dmlc/gluon-nlp
scripts/conversion_toolkits/convert_mobilebert.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_mobilebert.py
Apache-2.0
def get_name_map(tf_names, num_stacked_ffn): """ Get the converting mapping between tensor names and mxnet names. The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert, but there is no guarantee that it can match to other tf models in case of some sepecial variable_scope (tensorfl...
Get the converting mapping between tensor names and mxnet names. The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert, but there is no guarantee that it can match to other tf models in case of some sepecial variable_scope (tensorflow) and prefix (mxnet). Redefined mapping is en...
get_name_map
python
dmlc/gluon-nlp
scripts/conversion_toolkits/convert_mobilebert.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_mobilebert.py
Apache-2.0
def convert_tf_assets(tf_assets_dir, model_type): """Convert the assets file including config, vocab and tokenizer model""" file_names = os.listdir(tf_assets_dir) json_cfg_path = None spm_model_path = None vocab_path = None for ele in file_names: if ele.endswith('.model'): as...
Convert the assets file including config, vocab and tokenizer model
convert_tf_assets
python
dmlc/gluon-nlp
scripts/conversion_toolkits/convert_tf_hub_model.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_tf_hub_model.py
Apache-2.0
def get_name_map(tf_names, is_TF1=True): """ Get the converting mapping between TF names and mxnet names. The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert, but there is no guarantee that it can match to other tf models in case of some special variable_scope (tensorflow) and p...
Get the converting mapping between TF names and mxnet names. The above mapping CONVERT_MAP is effectively adaptive to Bert and Albert, but there is no guarantee that it can match to other tf models in case of some special variable_scope (tensorflow) and prefix (mxnet). Redefined mapping is encoura...
get_name_map
python
dmlc/gluon-nlp
scripts/conversion_toolkits/convert_tf_hub_model.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_tf_hub_model.py
Apache-2.0
def convert_qkv_weights(tf_prefix, prefix, is_mlm): """ To convert the qkv weights with different prefix. In tensorflow framework, the prefix of query/key/value for the albert model is 'bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel', and that for t...
To convert the qkv weights with different prefix. In tensorflow framework, the prefix of query/key/value for the albert model is 'bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel', and that for the bert model is 'bert/encoder/layer_{}/attention/self/key/bias...
convert_qkv_weights
python
dmlc/gluon-nlp
scripts/conversion_toolkits/convert_tf_hub_model.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/conversion_toolkits/convert_tf_hub_model.py
Apache-2.0
def get_hash_and_size(obj, retries=5, algorithm='sha1', cache=None, save_path=None, verify_ssl=True): """Fetch sha1 hash of all urls in the input obj""" def _get_hash_and_size(obj, retries, algorithm, cache=None, save_path=None): if isinstance(obj, str): if obj.startswi...
Fetch sha1 hash of all urls in the input obj
get_hash_and_size
python
dmlc/gluon-nlp
scripts/datasets/update_download_stats.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/update_download_stats.py
Apache-2.0
def build_vocab(corpus_path_l: List, eos_token: Optional[str] = '<eos>') -> Vocab: """Build the default vocabulary used in datasets like - wikitext2 - wikitext103 - text8 - enwiki8 The strategy is to split with white-space and store all appeared tokens. Also, the tokens wil...
Build the default vocabulary used in datasets like - wikitext2 - wikitext103 - text8 - enwiki8 The strategy is to split with white-space and store all appeared tokens. Also, the tokens will be sorted with a descending order of their frequency. Parameters ---------- ...
build_vocab
python
dmlc/gluon-nlp
scripts/datasets/language_modeling/prepare_lm.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/language_modeling/prepare_lm.py
Apache-2.0
def parse_sgm(path_or_buffer: Union[str, IO[AnyStr]], out_path_or_buffer: Optional[Union[str, IO[AnyStr]]] = None, return_sentences=False, clean_space=True) -> Optional[List[str]]: """Returns sentences from a single SGML file. This is compatible to the behavior of `inpu...
Returns sentences from a single SGML file. This is compatible to the behavior of `input-from-sgm.perl` in https://github.com/moses-smt/mosesdecoder/blob/a89691fee395bb7eb6dfd51e368825f0578f437d/scripts/ems/support/input-from-sgm.perl Parameters ---------- path_or_buffer The source path to p...
parse_sgm
python
dmlc/gluon-nlp
scripts/datasets/machine_translation/prepare_wmt.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py
Apache-2.0
def concatenate_files(fname_l: List[str], out_fname: Optional[str] = None, chunk_size: int = 128 * 1024) -> str: """Concatenate multiple files into a single file. This is used to recover a large file that has been split into multiple parts. E.g., UNv1.0.en-zh.tar...
Concatenate multiple files into a single file. This is used to recover a large file that has been split into multiple parts. E.g., UNv1.0.en-zh.tar.gz.00, UNv1.0.en-zh.tar.gz.01 --> UNv1.0.en-zh.tar.gz Parameters ---------- fname_l out_fname chunk_size Returns ------- ret
concatenate_files
python
dmlc/gluon-nlp
scripts/datasets/machine_translation/prepare_wmt.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py
Apache-2.0
def fetch_mono_dataset(selection: Union[str, List[str], List[List[str]]], lang: str = 'de', path: Optional[str] = _BASE_DATASET_PATH, overwrite: bool = False) -> List[str]: """Fetch the monolingual dataset provided by WMT Parameters -----...
Fetch the monolingual dataset provided by WMT Parameters ---------- selection The selected datasets lang Language of the monolingual corpus path overwrite Whether to overwrite the downloaded dataset Returns ------- src_corpus_paths
fetch_mono_dataset
python
dmlc/gluon-nlp
scripts/datasets/machine_translation/prepare_wmt.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py
Apache-2.0
def download_mono_newscrawl(lang: str = 'de', path: str = _BASE_DATASET_PATH)\ -> List[str]: """Download the train dataset used for WMT2014 Parameters ---------- lang path Returns ------- train_src_paths """ if lang == 'de': train_src_paths =\ fetch_...
Download the train dataset used for WMT2014 Parameters ---------- lang path Returns ------- train_src_paths
download_mono_newscrawl
python
dmlc/gluon-nlp
scripts/datasets/machine_translation/prepare_wmt.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py
Apache-2.0
def download_wmt14_train(lang_pair: str = 'en-de', path: str = _BASE_DATASET_PATH)\ -> Tuple[List[str], List[str]]: """Download the train dataset used for WMT2014 Parameters ---------- lang_pair path Returns ------- train_src_paths train_tgt_paths """ if lang_pair =...
Download the train dataset used for WMT2014 Parameters ---------- lang_pair path Returns ------- train_src_paths train_tgt_paths
download_wmt14_train
python
dmlc/gluon-nlp
scripts/datasets/machine_translation/prepare_wmt.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py
Apache-2.0
def download_wmt16_train(lang_pair: str = 'en-de', path: str = _BASE_DATASET_PATH)\ -> Tuple[List[str], List[str]]: """Download the train dataset used for WMT2016 Parameters ---------- lang_pair path Returns ------- train_src_paths train_tgt_paths """ if lang_pair ...
Download the train dataset used for WMT2016 Parameters ---------- lang_pair path Returns ------- train_src_paths train_tgt_paths
download_wmt16_train
python
dmlc/gluon-nlp
scripts/datasets/machine_translation/prepare_wmt.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py
Apache-2.0
def download_wmt17_train(lang_pair: str = 'en-de', path: str = _BASE_DATASET_PATH)\ -> Tuple[List[str], List[str]]: """Download the train dataset used for WMT2017 Parameters ---------- lang_pair path Returns ------- train_src_paths train_tgt_paths """ if lang_pair ...
Download the train dataset used for WMT2017 Parameters ---------- lang_pair path Returns ------- train_src_paths train_tgt_paths
download_wmt17_train
python
dmlc/gluon-nlp
scripts/datasets/machine_translation/prepare_wmt.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/machine_translation/prepare_wmt.py
Apache-2.0
def extract_files(full_name, output_dir, shuffle=False): """ Extract the file and concatenate all the TXT files it archives """ if not full_name.endswith(".xz"): return file_prefix = re.split(r'\.|/', full_name)[-2] file_prefix = file_prefix.replace('urlsf_subset', 'openwebtext-prepared-...
Extract the file and concatenate all the TXT files it archives
extract_files
python
dmlc/gluon-nlp
scripts/datasets/pretrain_corpus/prepare_openwebtext.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/pretrain_corpus/prepare_openwebtext.py
Apache-2.0
def get_formatting_list(wiki_path, recursive=False): """ get formatting list of file names from extracted content """ filenames = [] for dirname in glob.glob(os.path.join(wiki_path, '*'), recursive=False): for filename in glob.glob(os.path.join(dirname, 'wiki_*'), recursive=recursive): ...
get formatting list of file names from extracted content
get_formatting_list
python
dmlc/gluon-nlp
scripts/datasets/pretrain_corpus/prepare_wikipedia.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/pretrain_corpus/prepare_wikipedia.py
Apache-2.0
def download_wikicorpus(lang, date, output): """ lang: the language code such as en, zh date: string, the date of the Wikipedia with format of YYYYMMDD, or 'latest'. """ if not os.path.exists(output): os.makedirs(output) if lang not in __LANGUAGES_BANK: raise ValueError('Unsuppor...
lang: the language code such as en, zh date: string, the date of the Wikipedia with format of YYYYMMDD, or 'latest'.
download_wikicorpus
python
dmlc/gluon-nlp
scripts/datasets/pretrain_corpus/prepare_wikipedia.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/datasets/pretrain_corpus/prepare_wikipedia.py
Apache-2.0
def calculate_self_bleu4(sample_strs, num_bleu_samples): """Self-BLEU is calculated by computing the BLEU score of each generated document using all other generations in the evaluation set as references. """ pool = Pool(processes=os.cpu_count()) return sum(tqdm( pool.imap_unordered( ...
Self-BLEU is calculated by computing the BLEU score of each generated document using all other generations in the evaluation set as references.
calculate_self_bleu4
python
dmlc/gluon-nlp
scripts/generation/calculate_metrics.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/generation/calculate_metrics.py
Apache-2.0
def calculate_zipf_coefficient(sample_ids, tokenizer): """The Zipfian coefficient (R-squared) can be used to compare the distribution in a given text to a theoretically perfect exponential curve. """ cnt = Counter() for sample_id in sample_ids: cnt.update(sample_id) xs = np.arange(1, mi...
The Zipfian coefficient (R-squared) can be used to compare the distribution in a given text to a theoretically perfect exponential curve.
calculate_zipf_coefficient
python
dmlc/gluon-nlp
scripts/generation/calculate_metrics.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/generation/calculate_metrics.py
Apache-2.0
def calculate_repetition(sample_ids): """The repetition rate in generated samples. """ max_n = 90 n_repeated_examples = 0 for sample_id in sample_ids: rev = list(reversed(sample_id)) last_n_repeats = [0 for _ in range(max_n)] for n in range(1, max_n + 1): n_repeat...
The repetition rate in generated samples.
calculate_repetition
python
dmlc/gluon-nlp
scripts/generation/calculate_metrics.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/generation/calculate_metrics.py
Apache-2.0
def get_base_tokenizer(method, lang): """The base tokenization method Parameters ---------- method lang Returns ------- """ if method == 'moses': return tokenizers.create('moses', lang) elif method == 'whitespace': return tokenizers.create('whitespace') el...
The base tokenization method Parameters ---------- method lang Returns -------
get_base_tokenizer
python
dmlc/gluon-nlp
scripts/machine_translation/evaluate_transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/machine_translation/evaluate_transformer.py
Apache-2.0
def validation(model, data_loader, inference_model, sequence_sampler, tgt_tokenizer, ctx_l): """Validate the model on the dataset Parameters ---------- model : TransformerModel The transformer model data_loader : DataLoader DataLoader inference_model The m...
Validate the model on the dataset Parameters ---------- model : TransformerModel The transformer model data_loader : DataLoader DataLoader inference_model The model for inference sequence_sampler: The sequence sampler for doing beam search tgt_tokenizer ...
validation
python
dmlc/gluon-nlp
scripts/machine_translation/train_transformer.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/machine_translation/train_transformer.py
Apache-2.0
def tokenize_lines_to_ids(lines, tokenizer): """ Worker function to tokenize lines based on the tokenizer, and perform vocabulary lookup. Parameters ---------- lines Lines to be tokenized of the whole file tokenizer The trained tokenizer Returns ------- results ...
Worker function to tokenize lines based on the tokenizer, and perform vocabulary lookup. Parameters ---------- lines Lines to be tokenized of the whole file tokenizer The trained tokenizer Returns ------- results A list storing the valid tokenized lines
tokenize_lines_to_ids
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0
def get_all_features(x): """ Get the feature data in numpy form. Parameters ---------- x List/tuple that contains: - file_list A list of text files - output_file The path to a output file that store the np_features - tokenizer Th...
Get the feature data in numpy form. Parameters ---------- x List/tuple that contains: - file_list A list of text files - output_file The path to a output file that store the np_features - tokenizer The trained tokenizer - ma...
get_all_features
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0
def process_a_text(text_file, tokenizer, max_seq_length, short_seq_prob=0.05): """ Create features from a single raw text file, in which one line is treated as a sentence, and double blank lines represent document separators. In this process, mxnet-unrelated features are generated, to easily convert ...
Create features from a single raw text file, in which one line is treated as a sentence, and double blank lines represent document separators. In this process, mxnet-unrelated features are generated, to easily convert to features of a particular deep learning framework in subsequent steps Parame...
process_a_text
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0
def convert_to_npz(all_features, output_file=None): """ Convert features to numpy array and store if output_file provided Parameters ---------- all_features A list of processed features. output_file The path to a output file that store the np_features. Returns ------- ...
Convert features to numpy array and store if output_file provided Parameters ---------- all_features A list of processed features. output_file The path to a output file that store the np_features. Returns ------- input_ids A tuple of features segment_ids ...
convert_to_npz
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0
def sentenceize(current_sentences, max_seq_length, target_seq_length): """ Generate a pair of sentences based on a segmentation strategy cloned from official electra model. Parameters ---------- current_sentences max_seq_length Maximum sequence length of the training features ta...
Generate a pair of sentences based on a segmentation strategy cloned from official electra model. Parameters ---------- current_sentences max_seq_length Maximum sequence length of the training features target_seq_length Target sequence length of the training features Re...
sentenceize
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0
def prepare_pretrain_npz_dataset(filename, allow_pickle=False): """Create dataset based on the numpy npz file""" if isinstance(filename, (list, tuple)): assert len(filename) == 1, \ 'When .npy/.npz data file is loaded, len(filename) must be 1.' \ ' Received len(filename)={}.'.for...
Create dataset based on the numpy npz file
prepare_pretrain_npz_dataset
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0
def prepare_pretrain_text_dataset( filenames, tokenizer, max_seq_length, short_seq_prob, cached_file_path): """Create dataset based on the raw text files""" if not isinstance(filenames, (list, tuple)): filenames = [filenames] if cached_file_path: # gen...
Create dataset based on the raw text files
prepare_pretrain_text_dataset
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0
def prepare_pretrain_bucket_sampler(dataset, batch_size, shuffle=False, num_buckets=1): """Create data sampler based on the dataset""" if isinstance(dataset, NumpyDataset): lengths = dataset.get_field('valid_lengths') else: lengths = dataset.transform(lambda input_ids, segment_ids, ...
Create data sampler based on the dataset
prepare_pretrain_bucket_sampler
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0
def get_pretrain_data_npz(data, batch_size, shuffle, num_buckets, vocab, num_parts=1, part_idx=0, num_dataset_workers=1, num_batch_workers=1, circle_length=1, repeat=1, dataset_cached=False, ...
Get a data iterator from pre-processed npz files. Parameters ---------- data: str The path to the dataset directory batch_size : int The batch size per GPU. shuffle : bool Whether to shuffle the data. num_buckets : int The number of buckets for the FixedBucketSam...
get_pretrain_data_npz
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0
def dynamic_masking(self, input_ids, valid_lengths): # TODO(zheyuye), two additional flag `disallow_from_mask` and `already_masked` # that control the masking status for each positions in the sequence. """ Generate masking positions on-the-fly instead of during preprocessing Para...
Generate masking positions on-the-fly instead of during preprocessing Parameters ---------- input_ids The batchified input_ids with shape (batch_size, max_seq_length) valid_lengths The batchified valid_lengths with shape (batch_size, ) Returns ...
dynamic_masking
python
dmlc/gluon-nlp
scripts/pretraining/pretraining_utils.py
https://github.com/dmlc/gluon-nlp/blob/master/scripts/pretraining/pretraining_utils.py
Apache-2.0