code stringlengths 101 5.91M |
|---|
def label_mapping(input, mapping):
output = np.copy(input)
for ind in range(len(mapping)):
output[(input == mapping[ind][0])] = mapping[ind][1]
return np.array(output, dtype=np.int64) |
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--without_sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=2, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=None, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=5, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
parser.add_argument('--not_eval_with_train', action='store_true', default=False, help='')
parser.add_argument('--logger_iter_interval', type=int, default=50, help='')
parser.add_argument('--ckpt_save_time_interval', type=int, default=300, help='in terms of seconds')
parser.add_argument('--add_worker_init_fn', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:(- 1)])
if (args.set_cfgs is not None):
cfg_from_list(args.set_cfgs, cfg)
return (args, cfg) |
class RandomActiveLearningNodeMC(LearningNodeMC, RandomActiveLeafClass):
def __init__(self, initial_stats=None, max_features=2, random_state=None):
super().__init__(initial_stats)
self.max_features = max_features
self.feature_indices = np.array([])
self.random_state = random_state
self._random_state = check_random_state(self.random_state) |
def handy_var(a, unbias=True):
n = a.size(0)
asum = a.sum(dim=0)
as_sum = (a ** 2).sum(dim=0)
sumvar = (as_sum - ((asum * asum) / n))
if unbias:
return (sumvar / (n - 1))
else:
return (sumvar / n) |
class MaskedImageModelingOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
reconstruction: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
def logits(self):
warnings.warn('logits attribute is deprecated and will be removed in version 5 of Transformers. Please use the reconstruction attribute to retrieve the final output instead.', FutureWarning)
return self.reconstruction |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
g_ours = parser.add_argument_group('DeepHuman')
g_ours.add_argument('--meshDirSearch', type=str, default='/trainman-mount/trainman-storage-d5c0a121-bb5d-4afb-8020-c53f096d2a5c/data')
g_ours.add_argument('--trainingDataRatio', type=float, default='0.8')
g_ours.add_argument('--datasetDir', type=str, default='/trainman-mount/trainman-storage-d5c0a121-bb5d-4afb-8020-c53f096d2a5c/data/humanRender')
g_ours.add_argument('--totalNumFrame', type=int, default='108720', help="total data number: N*M'*4 = 6795*4*4 = 108720")
g_ours.add_argument('--online_sampling', action='store_true', help='online query point sampling, or offline')
g_ours.add_argument('--resolution_x', type=int, default=171, help='# of grid in mesh reconstruction')
g_ours.add_argument('--resolution_y', type=int, default=256, help='# of grid in mesh reconstruction')
g_ours.add_argument('--resolution_z', type=int, default=171, help='# of grid in mesh reconstruction')
g_ours.add_argument('--preModelDir', type=str, default='./results/results_final_19_09_30_10_29_33', help="if mode is 'finetune' then load pre-trained model from this dir")
g_ours.add_argument('--resultsDir', type=str, default='/trainman-mount/trainman-storage-d5c0a121-bb5d-4afb-8020-c53f096d2a5c/data/humanRender/deepHumanResults/expName')
g_ours.add_argument('--splitNum', type=int, default='8', help='for multi-process running')
g_ours.add_argument('--splitIdx', type=int, default='0', help='{0, ..., splitNum-1}')
g_ours.add_argument('--visual_demo_mesh', type=int, default='0', help='num of frames used in visual demo')
g_ours.add_argument('--shuffle_train_test_ids', action='store_true', help='shuffle training, test data indices or not')
g_ours.add_argument('--sampleType', type=str, default='sigma3.5_pts5k')
g_ours.add_argument('--epoch_range', nargs='+', default=[0, 15], type=int, help='epoch range names used for offline query-pts sampling')
g_ours.add_argument('--resume_name', type=str, default='example', help='name of the experiment. It decides where to load weights to resume training')
g_ours.add_argument('--upsample_mode', type=str, default='bicubic', help='bicubic | nearest')
g_ours.add_argument('--recover_dim', action='store_true', help='recover stack-hour-glass output feature dimensions from BVx256x128x128 to BVx256x512x512')
g_ours.add_argument('--epoch_offline_len', type=int, default='15', help='number of epochs that have been sampled offline')
g_ours.add_argument('--load_single_view_meshVoxels', action='store_true', help='load meshVoxels for a single view in order to train the VRN network')
g_ours.add_argument('--vrn_net_input_height', type=int, default='384', help='vrn network image input height 192*2')
g_ours.add_argument('--vrn_net_input_width', type=int, default='256', help='vrn network image input width 128*2')
g_ours.add_argument('--vrn_num_modules', type=int, default=4, help='num of stack-hour-glass')
g_ours.add_argument('--vrn_num_hourglass', type=int, default=2, help='depth of each hour-glass')
g_ours.add_argument('--partial_load', action='store_true', help='set strict=False for net.load_state_dict function, useful when you need to load weights for partial networks')
g_ours.add_argument('--load_from_multi_GPU_shape', action='store_true', help='load weights to single-GPU model, from shape models trained with nn.DataParallel function')
g_ours.add_argument('--load_from_multi_GPU_color', action='store_true', help='load weights to single-GPU model, from color models trained with nn.DataParallel function')
g_ours.add_argument('--give_idx', nargs='+', default=[None], type=int, help='list of idx for visual demo')
g_ours.add_argument('--weight_occu', type=float, default='1000.')
g_ours.add_argument('--weight_rgb_recon', type=float, default='200.')
g_ours.add_argument('--vrn_occupancy_loss_type', type=str, default='ce', help='mse | ce')
g_ours.add_argument('--use_view_pred_loss', action='store_true', help='apply view prediction losses upon deep voxels')
g_ours.add_argument('--use_3d_gan', action='store_true', help='apply 3d GAN losses upon deep voxels')
g_ours.add_argument('--view_probs_front_right_back_left', nargs='+', default=[0.15, 0.3, 0.25, 0.3], type=float, help='4-view sampling probs when training with view rendering losses, must sum to 1.0')
g_ours.add_argument('--use_view_discriminator', action='store_true', help='also apply patch-GAN losses when view prediction losses are in use')
g_ours.add_argument('--dataType', type=str, default='test', help='train | test | both')
g_ours.add_argument('--dataTypeZip', type=str, default='both', help='train | test | both')
g_ours.add_argument('--deepVoxels_fusion', type=str, default=None, help='early | late')
g_ours.add_argument('--deepVoxels_c_len', type=int, default=8, help='len of deepVoxel features when conducting fusion with 2D aligned features')
g_ours.add_argument('--deepVoxels_c_len_intoLateFusion', type=int, default=8, help='len of deepVoxel features into the late fusion layers')
g_ours.add_argument('--multiRanges_deepVoxels', action='store_true', help='use xyz-3-direction deepvoxels sampling')
g_ours.add_argument('--displacment', type=float, default='0.0722', help='0.035 | 0.0722, displacment used when conducting multiRanges_deepVoxels')
g_ours.add_argument('--deepVoxelsDir', type=str, default='/trainman-mount/trainman-storage-d5c0a121-bb5d-4afb-8020-c53f096d2a5c/data/humanRender/pifuResults/ourDataShape_vrn_ce_6gpu/train')
g_ours.add_argument('--mlp_dim_3d', nargs='+', default=[56, 256, 128, 1], type=int, help='# of dimensions of mlp for DeepVoxels 3d branch')
g_ours.add_argument('--mlp_dim_joint', nargs='+', default=[0, 256, 128, 1], type=int, help='# of dimensions of mlp for joint 2d-3d branch')
g_ours.add_argument('--discriminator_accuracy_update_threshold', type=float, default='0.8', help='only update the discriminator if fake/real accuracies are both below this threshold, to avoid discriminator going too fast')
g_ours.add_argument('--weight_3d_gan_gen', type=float, default='15.', help='weight for 3d-gan generator loss, to be comparable with the occupancy loss')
g_ours.add_argument('--must_run_in_train_modes', type=str, default='ourDataShape_vrn_ce_6gpu_3dGAN,XXX', help='some models have to be run in train modes due to some hacky issues of BN layers')
g_ours.add_argument('--num_skip_frames', type=int, default='1', help='num of frames to skip when generating visual demos')
g_data = parser.add_argument_group('Data')
g_data.add_argument('--dataroot', type=str, default='./data', help='path to images (data folder)')
g_data.add_argument('--loadSize', type=int, default=512, help='load size of input image')
g_exp = parser.add_argument_group('Experiment')
g_exp.add_argument('--name', type=str, default='example', help='name of the experiment. It decides where to store samples and models')
g_exp.add_argument('--debug', action='store_true', help='debug mode or not')
g_exp.add_argument('--num_views', type=int, default=1, help='How many views to use for multiview network.')
g_exp.add_argument('--random_multiview', action='store_true', help='Select random multiview combination.')
g_train = parser.add_argument_group('Training')
g_train.add_argument('--gpu_id', type=int, default=0, help='gpu id for cuda')
g_train.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2, -1 for CPU mode')
g_train.add_argument('--num_threads', default=1, type=int, help='# sthreads for loading data')
g_train.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
g_train.add_argument('--pin_memory', action='store_true', help='pin_memory')
g_train.add_argument('--batch_size', type=int, default=2, help='input batch size')
g_train.add_argument('--learning_rate', type=float, default=0.001, help='adam learning rate')
g_train.add_argument('--learning_rate_3d_gan', type=float, default=1e-05, help='adam learning rate')
g_train.add_argument('--learning_rateC', type=float, default=0.001, help='adam learning rate')
g_train.add_argument('--num_epoch', type=int, default=100, help='num epoch to train')
g_train.add_argument('--freq_plot', type=int, default=10, help='freqency of the error plot')
g_train.add_argument('--freq_save', type=int, default=50, help='freqency of the save_checkpoints')
g_train.add_argument('--freq_save_ply', type=int, default=100, help='freqency of the save ply')
g_train.add_argument('--no_gen_mesh', action='store_true')
g_train.add_argument('--no_num_eval', action='store_true')
g_train.add_argument('--resume_epoch', type=int, default=(- 1), help='epoch resuming the training')
g_train.add_argument('--resume_iter', type=int, default=(- 1), help='iter resuming the training, within the resume_epoch defined above')
g_train.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
g_test = parser.add_argument_group('Testing')
g_test.add_argument('--resolution', type=int, default=256, help='# of grid in mesh reconstruction')
g_test.add_argument('--test_folder_path', type=str, default=None, help='the folder of test image')
g_sample = parser.add_argument_group('Sampling')
g_sample.add_argument('--sigma', type=float, default=5.0, help='perturbation standard deviation for positions')
g_sample.add_argument('--num_sample_inout', type=int, default=5000, help='# of sampling points')
g_sample.add_argument('--num_sample_color', type=int, default=0, help='# of sampling points')
g_sample.add_argument('--z_size', type=float, default=200.0, help='z normalization factor')
g_model = parser.add_argument_group('Model')
g_model.add_argument('--norm', type=str, default='group', help='instance normalization or batch normalization or group normalization')
g_model.add_argument('--norm_color', type=str, default='group', help='instance normalization or batch normalization or group normalization')
g_model.add_argument('--num_stack', type=int, default=4, help='# of hourglass')
g_model.add_argument('--num_hourglass', type=int, default=2, help='# of stacked layer of hourglass')
g_model.add_argument('--skip_hourglass', action='store_true', help='skip connection in hourglass')
g_model.add_argument('--hg_down', type=str, default='ave_pool', help='ave pool || conv64 || conv128')
g_model.add_argument('--hourglass_dim', type=int, default='256', help='256 | 512')
g_model.add_argument('--mlp_dim', nargs='+', default=[257, 1024, 512, 256, 128, 1], type=int, help='# of dimensions of mlp')
g_model.add_argument('--mlp_dim_color', nargs='+', default=[513, 1024, 512, 256, 128, 3], type=int, help='# of dimensions of color mlp')
g_model.add_argument('--use_tanh', action='store_true', help='using tanh after last conv of image_filter network')
parser.add_argument('--random_flip', action='store_true', help='if random flip')
parser.add_argument('--random_trans', action='store_true', help='if random flip')
parser.add_argument('--random_scale', action='store_true', help='if random flip')
parser.add_argument('--no_residual', action='store_true', help='no skip connection in mlp')
parser.add_argument('--schedule', type=int, nargs='+', default=[60, 80], help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--color_loss_type', type=str, default='l1', help='mse | l1')
parser.add_argument('--occupancy_loss_type', type=str, default='mse', help='mse | l1 | ce')
parser.add_argument('--val_test_error', action='store_true', help='validate errors of test data')
parser.add_argument('--val_train_error', action='store_true', help='validate errors of train data')
parser.add_argument('--gen_test_mesh', action='store_true', help='generate test mesh')
parser.add_argument('--gen_train_mesh', action='store_true', help='generate train mesh')
parser.add_argument('--all_mesh', action='store_true', help='generate meshs from all hourglass output')
parser.add_argument('--num_gen_mesh_test', type=int, default=1, help='how many meshes to generate during testing')
parser.add_argument('--checkpoints_path', type=str, default='./checkpoints', help='path to save checkpoints')
parser.add_argument('--load_netV_checkpoint_path', type=str, default=None, help='path to save checkpoints')
parser.add_argument('--load_netG_checkpoint_path', type=str, default=None, help='path to save checkpoints')
parser.add_argument('--load_netC_checkpoint_path', type=str, default=None, help='path to save checkpoints')
parser.add_argument('--results_path', type=str, default='./results', help='path to save results ply')
parser.add_argument('--load_checkpoint_path', type=str, help='path to save results ply')
parser.add_argument('--single', type=str, default='', help='single data for training')
parser.add_argument('--mask_path', type=str, help='path for input mask')
parser.add_argument('--img_path', type=str, help='path for input image')
group_aug = parser.add_argument_group('aug')
group_aug.add_argument('--aug_alstd', type=float, default=0.0, help='augmentation pca lighting alpha std')
group_aug.add_argument('--aug_bri', type=float, default=0.0, help='augmentation brightness')
group_aug.add_argument('--aug_con', type=float, default=0.0, help='augmentation contrast')
group_aug.add_argument('--aug_sat', type=float, default=0.0, help='augmentation saturation')
group_aug.add_argument('--aug_hue', type=float, default=0.0, help='augmentation hue')
group_aug.add_argument('--aug_blur', type=float, default=0.0, help='augmentation blur')
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
def parse(self):
opt = self.gather_options()
return opt |
def AlignedOneImageUsingFaceXAlignment(input_root, out_root, image_path):
try:
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
(input_height, input_width, _) = image.shape
except:
return
dets = faceDetModelHandler.inference_on_image(image)
if (len(dets) > 0):
dets = Filter2centerBox(dets, image)
for (i, det) in enumerate(dets):
assert (i != 1)
landmarks = faceAlignModelHandler.inference_on_image(image, det)
cropped_image = face_cropper.crop_image_by_mat(image, landmarks.reshape((- 1)))
out_path = image_path.replace(input_root, out_root)
if (os.path.exists(os.path.dirname(out_path)) is False):
os.makedirs(os.path.dirname(out_path))
cv2.imwrite(out_path, cropped_image)
else:
out_path = image_path.replace(input_root, out_root)
cv2.imwrite(out_path, image) |
def _distributed_worker(local_rank, main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args, timeout=DEFAULT_TIMEOUT):
assert torch.cuda.is_available(), 'cuda is not available. Please check your installation.'
global_rank = ((machine_rank * num_gpus_per_machine) + local_rank)
try:
dist.init_process_group(backend='NCCL', init_method=dist_url, world_size=world_size, rank=global_rank, timeout=timeout)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error('Process group URL: {}'.format(dist_url))
raise e
comm.synchronize()
assert (num_gpus_per_machine <= torch.cuda.device_count())
torch.cuda.set_device(local_rank)
assert (comm._LOCAL_PROCESS_GROUP is None)
num_machines = (world_size // num_gpus_per_machine)
for i in range(num_machines):
ranks_on_i = list(range((i * num_gpus_per_machine), ((i + 1) * num_gpus_per_machine)))
pg = dist.new_group(ranks_on_i)
if (i == machine_rank):
comm._LOCAL_PROCESS_GROUP = pg
main_func(*args) |
def batch_recall(candidates, sources, gold_edits, max_unchanged_words=2, beta=0.5, ignore_whitespace_casing=False, verbose=False):
return batch_pre_rec_f1(candidates, sources, gold_edits, max_unchanged_words, beta, ignore_whitespace_casing, verbose)[1] |
class TestThreading(object):
def check_func_thread(self, n, fun, args, out):
from threading import Thread
thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]}) for x in range(n)]
[t.start() for t in thrds]
[t.join() for t in thrds]
def check_func_serial(self, n, fun, args, out):
for i in range(n):
fun(*args, output=out[i])
def test_correlate1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate1d, (d, np.arange(5)), os)
self.check_func_thread(4, sndi.correlate1d, (d, np.arange(5)), ot)
assert_array_equal(os, ot)
def test_correlate(self):
d = np.random.randn(500, 500)
k = np.random.randn(10, 10)
os = np.empty(([4] + list(d.shape)))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate, (d, k), os)
self.check_func_thread(4, sndi.correlate, (d, k), ot)
assert_array_equal(os, ot)
def test_median_filter(self):
d = np.random.randn(500, 500)
os = np.empty(([4] + list(d.shape)))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.median_filter, (d, 3), os)
self.check_func_thread(4, sndi.median_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_uniform_filter1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.uniform_filter1d, (d, 5), os)
self.check_func_thread(4, sndi.uniform_filter1d, (d, 5), ot)
assert_array_equal(os, ot)
def test_minmax_filter(self):
d = np.random.randn(500, 500)
os = np.empty(([4] + list(d.shape)))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.maximum_filter, (d, 3), os)
self.check_func_thread(4, sndi.maximum_filter, (d, 3), ot)
assert_array_equal(os, ot)
self.check_func_serial(4, sndi.minimum_filter, (d, 3), os)
self.check_func_thread(4, sndi.minimum_filter, (d, 3), ot)
assert_array_equal(os, ot) |
class DeformRoIPooling(nn.Module):
def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0):
super(DeformRoIPooling, self).__init__()
self.spatial_scale = spatial_scale
self.out_size = out_size
self.out_channels = out_channels
self.no_trans = no_trans
self.group_size = group_size
self.part_size = (out_size if (part_size is None) else part_size)
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, data, rois, offset):
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std) |
def process_part(header_contents):
retval = list()
l = list()
for elem in header_contents:
headers = elem.header
if ((len(headers) > 0) and headers[0].strip().lower().startswith('part')):
l.append(len(headers))
max_count = max(set(l), key=l.count)
for elem in header_contents:
headers = elem.header
found = False
for idx in range(len(headers)):
h = headers[idx]
if (idx < (len(headers) - max_count)):
retval.append(('C', h))
elif found:
retval.append(('H', h))
elif h.strip().lower().startswith('part'):
found = True
retval.append(('H', h))
else:
retval.append(('C', h))
contents = elem.content
for c in contents:
retval.append(('C', c))
return retval |
(config_path='config/preprocessing.yaml')
def preprocess_dataset(cfg):
in_dir = Path(utils.to_absolute_path(cfg.in_dir))
out_dir = (Path(utils.to_absolute_path('datasets')) / str(cfg.dataset.dataset))
out_dir.mkdir(parents=True, exist_ok=True)
executor = ProcessPoolExecutor(max_workers=cpu_count())
for split in ['train', 'test']:
print('Extracting features for {} set'.format(split))
futures = []
split_path = ((out_dir / cfg.dataset.language) / split)
with open(split_path.with_suffix('.json')) as file:
metadata = json.load(file)
for (in_path, start, duration, out_path) in metadata:
wav_path = (in_dir / in_path)
out_path = (out_dir / out_path)
out_path.parent.mkdir(parents=True, exist_ok=True)
futures.append(executor.submit(partial(process_wav, wav_path, out_path, **cfg.preprocessing, offset=start, duration=duration)))
results = [future.result() for future in tqdm(futures)]
lengths = [x[(- 1)] for x in results]
frames = sum(lengths)
frame_shift_ms = (cfg.preprocessing.hop_length / cfg.preprocessing.sr)
hours = ((frames * frame_shift_ms) / 3600)
print('Wrote {} utterances, {} frames ({:.2f} hours)'.format(len(lengths), frames, hours)) |
def generate_combos():
wkload_combos = []
for seq in ['readseq', 'readreverse']:
for rand in ['readrandom', 'readrandomwriterandom', 'mixgraph']:
wkload_combos.append((seq, rand))
return wkload_combos |
class ReduLayer(nn.Module):
def __init__(self):
super(ReduLayer, self).__init__()
def __name__(self):
return 'ReduNet'
def forward(self, Z):
raise NotImplementedError
def zero(self):
state_dict = self.state_dict()
state_dict['E.weight'] = torch.zeros_like(self.E.weight)
for j in range(self.num_classes):
state_dict[f'Cs.{j}.weight'] = torch.zeros_like(self.Cs[j].weight)
self.load_state_dict(state_dict)
def init(self, X, y):
gam = self.compute_gam(X, y)
E = self.compute_E(X)
Cs = self.compute_Cs(X, y)
self.set_params(E, Cs, gam)
def update_old(self, X, y, tau):
E = self.compute_E(X).to(X.device)
Cs = self.compute_Cs(X, y).to(X.device)
state_dict = self.state_dict()
ref_E = self.E.weight
ref_Cs = [self.Cs[j].weight for j in range(self.num_classes)]
new_E = (ref_E + (tau * (E - ref_E)))
new_Cs = [(ref_Cs[j] + (tau * (Cs[j] - ref_Cs[j]))) for j in range(self.num_classes)]
state_dict['E.weight'] = new_E
for j in range(self.num_classes):
state_dict[f'Cs.{j}.weight'] = new_Cs[j]
self.load_state_dict(state_dict)
def update(self, X, y, tau):
(E_ref, Cs_ref) = self.get_params()
E_new = self.compute_E(X).to(X.device)
Cs_new = self.compute_Cs(X, y).to(X.device)
E_update = (E_ref + (tau * (E_new - E_ref)))
Cs_update = [(Cs_ref[j] + (tau * (Cs_new[j] - Cs_ref[j]))) for j in range(self.num_classes)]
self.set_params(E_update, Cs_update)
def set_params(self, E, Cs, gam=None):
state_dict = self.state_dict()
assert (self.E.weight.shape == E.shape), f'E shape does not match: {self.E.weight.shape} and {E.shape}'
state_dict['E.weight'] = E
for j in range(self.num_classes):
assert (self.Cs[j].weight.shape == Cs[j].shape), f'Cj shape does not match'
state_dict[f'Cs.{j}.weight'] = Cs[j]
if (gam is not None):
assert (self.gam.shape == gam.shape), 'gam shape does not match'
state_dict['gam'] = gam
self.load_state_dict(state_dict)
def get_params(self):
E = self.E.weight
Cs = [self.Cs[j].weight for j in range(self.num_classes)]
return (E, Cs) |
class RandomHorizontalFlip():
def __init__(self, p=0.5):
self.p = p
def __call__(self, sample):
if (random.random() < self.p):
(image, target) = sample
image = (F.hflip(image) if isinstance(image, torch.Tensor) else image.transpose(Image.Transpose.FLIP_LEFT_RIGHT))
target = (F.hflip(target) if isinstance(target, torch.Tensor) else target.transpose(Image.Transpose.FLIP_LEFT_RIGHT))
return (image, target)
else:
return sample |
.parametrize('dataset_type', [pytest.param('log_spark', marks=pytest.mark.spark), pytest.param('log', marks=pytest.mark.core)])
.parametrize('test_size', test_sizes)
def test_nothing_is_lost(test_size, dataset_type, request):
log = request.getfixturevalue(dataset_type)
splitter = RandomSplitter(test_size=test_size, drop_cold_items=False, drop_cold_users=False, seed=SEED)
(train, test) = splitter.split(log)
if isinstance(log, pd.DataFrame):
real_test_size = (test.shape[0] / len(log))
assert ((train.shape[0] + test.shape[0]) == len(log))
else:
real_test_size = (test.count() / log.count())
assert ((train.count() + test.count()) == log.count())
assert np.isclose(real_test_size, test_size, atol=0.01) |
class PretrainConfig():
defaults: List[Any] = field(default_factory=(lambda : DEFAULTS))
hydra: Dict[(str, Any)] = field(default_factory=(lambda : {'run': {'dir': 'runs/train/${model.identifier}+dataset-${dataset.name}'}}))
run_id: Optional[str] = None
seed: int = 21
resume: bool = True
wandb_resume_id: Optional[str] = None
model: ModelConfig = MISSING
dataset: DatasetConfig = MISSING
accelerator: AcceleratorConfig = MISSING
tracking: TrackingConfig = MISSING |
class BaseRealBanditDataset(BaseBanditDataset):
def load_raw_data(self) -> None:
raise NotImplementedError
def pre_process(self) -> None:
raise NotImplementedError |
(frozen=True)
class ModelDeployment():
name: str
client_spec: ClientSpec
model_name: Optional[str] = None
tokenizer_name: Optional[str] = None
window_service_spec: Optional[WindowServiceSpec] = None
max_sequence_length: Optional[int] = None
max_request_length: Optional[int] = None
max_sequence_and_generated_tokens_length: Optional[int] = None
deprecated: bool = False
def host_organization(self) -> str:
return self.name.split('/')[0]
def engine(self) -> str:
return self.name.split('/')[1]
def __post_init__(self):
if (not self.model_name):
object.__setattr__(self, 'model_name', self.name) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
args = parser.parse_args()
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
train_data = registry.construct('dataset', config['data']['train'])
grammar = registry.construct('grammar', config['model']['decoder_preproc']['grammar'])
base_grammar = registry.construct('grammar', config['model']['decoder_preproc']['grammar']['base_grammar'])
for (i, item) in enumerate(tqdm.tqdm(train_data, dynamic_ncols=True)):
parsed = grammar.parse(item.code, 'train')
orig_parsed = base_grammar.parse(item.orig['orig'], 'train')
canonicalized_orig_code = base_grammar.unparse(base_grammar.parse(item.orig['orig'], 'train'), item)
unparsed = grammar.unparse(parsed, item)
if (canonicalized_orig_code != unparsed):
print('Original tree:')
pprint.pprint(orig_parsed)
print('Rewritten tree:')
pprint.pprint(parsed)
print('Reconstructed tree:')
pprint.pprint(grammar._expand_templates(parsed))
print('Original code:')
print(canonicalized_orig_code)
print('Reconstructed code:')
print(unparsed)
import IPython
IPython.embed()
break |
def main(root, tsv_path, ckpt_path, layer, nshard, rank, feat_dir, split, max_chunk):
reader = HubertFeatureReaderS2T(ckpt_path, layer, max_chunk)
(generator, num) = get_path_iterator(root, tsv_path, nshard, rank)
dump_feature(reader, generator, num, split, nshard, rank, feat_dir) |
.node
class CSRMM(dace.sdfg.nodes.LibraryNode):
implementations = {'pure': ExpandCSRMMPure, 'MKL': ExpandCSRMMMKL, 'cuSPARSE': ExpandCSRMMCuSPARSE}
default_implementation = None
transB = properties.Property(dtype=bool, desc='Whether to transpose B before multiplying')
alpha = properties.Property(allow_none=False, default=1, desc='A scalar which will be multiplied with A B before adding C')
beta = properties.Property(allow_none=False, default=0, desc='A scalar which will be multiplied with C before adding C')
def __init__(self, name, location=None, transB=False, alpha=1, beta=0):
super().__init__(name, location=location, inputs=({'_a_rows', '_a_cols', '_a_vals', '_b', '_cin'} if (beta != 0) else {'_a_rows', '_a_cols', '_a_vals', '_b'}), outputs={'_c'})
self.transB = transB
self.alpha = alpha
self.beta = beta
def validate(self, sdfg, state):
in_edges = state.in_edges(self)
if (len(in_edges) not in [4, 5]):
raise ValueError('Expected 4 or 5 inputs to CSRMM')
size4 = None
for (_, _, _, dst_conn, memlet) in state.in_edges(self):
if (dst_conn == '_a_rows'):
subset = dc(memlet.subset)
subset.squeeze()
size0 = subset.size()
if (dst_conn == '_a_cols'):
subset = dc(memlet.subset)
subset.squeeze()
size1 = subset.size()
if (dst_conn == '_a_vals'):
subset = dc(memlet.subset)
subset.squeeze()
size2 = subset.size()
if (dst_conn == '_b'):
subset = dc(memlet.subset)
subset.squeeze()
size3 = subset.size()
if (dst_conn == '_cin'):
subset = dc(memlet.subset)
subset.squeeze()
size4 = subset.size()
out_edges = state.out_edges(self)
if (len(out_edges) != 1):
raise ValueError('Expected exactly one output from matrix-matrix product')
out_memlet = out_edges[0].data
if (len(size3) != 2):
raise ValueError('matrix-matrix product only supported on matrices')
A_rows = (size0[0] - 1)
if self.transB:
B_cols = size3[0]
else:
B_cols = size3[1]
out_subset = dc(out_memlet.subset)
out_subset.squeeze()
size5 = out_subset.size()
if ((size4 is not None) and (size4 != size5)):
raise ValueError('Input C matrix must match output matrix.')
if (len(size5) != 2):
raise ValueError('matrix-matrix product only supported on matrices')
if ((len(size5) == 2) and (list(size5) != [A_rows, B_cols])):
raise ValueError('Output to matrix-matrix product must agree in the m and n dimensions') |
class pseudo_audio():
def __init__(self, secs: List[float], sample_rate: int=SAMPLE_RATE):
self.tempdir = Path(tempfile.TemporaryDirectory().name)
self.tempdir.mkdir(parents=True, exist_ok=True)
self.num_samples = []
for (n, sec) in enumerate(secs):
wav = torch.randn(1, round((sample_rate * sec)))
torchaudio.save(str((self.tempdir / f'audio_{n}.wav')), wav, sample_rate=sample_rate)
self.num_samples.append(wav.size((- 1)))
self.filepaths = [str((self.tempdir / f'audio_{i}.wav')) for i in range(len(secs))]
def __enter__(self):
return (self.filepaths, self.num_samples)
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
shutil.rmtree(self.tempdir) |
def init_particles(x: ti.types.ndarray(ndim=1), v: ti.types.ndarray(ndim=1), J: ti.types.ndarray(ndim=1)):
for i in range(n_particles):
x[i] = [((ti.random() * 0.4) + 0.2), ((ti.random() * 0.4) + 0.2)]
v[i] = [0, (- 1)]
J[i] = 1 |
def run_inference(args, ind_range=None, multi_gpu_testing=False):
is_parent = (ind_range is None)
def result_getter():
if is_parent:
return test_net_on_dataset(args, multi_gpu=multi_gpu_testing)
else:
return test_net(args, ind_range=ind_range)
all_results = result_getter()
return all_results |
def get_ANLI_examples(prefix, hypo_only=False):
folders = ['R1', 'R2', 'R3']
examples = []
guid_id = 0
pos_size = 0
neg_size = 0
path = '/export/home/Dataset/para_entail_datasets/ANLI/anli_v0.1/'
for folder in folders:
filename = ((((path + folder) + '/') + prefix) + '.jsonl')
print('loading ANLI...', filename)
with open(filename, 'r') as f:
for line in json_lines.reader(f):
guid_id += 1
premise = line.get('context')
hypothesis = line.get('hypothesis')
label = ('entailment' if (line.get('label') == 'e') else 'not_entailment')
if ((len(premise) == 0) or (len(hypothesis) == 0)):
continue
if (label == 'entailment'):
pos_size += 1
else:
neg_size += 1
if hypo_only:
examples.append(InputExample(guid=str(guid_id), text_a=hypothesis, text_b=None, label=label))
else:
examples.append(InputExample(guid=str(guid_id), text_a=premise, text_b=hypothesis, label=label))
print('>>pos:neg: ', pos_size, neg_size)
print('ANLI size:', len(examples))
return (examples, pos_size) |
def _plot_pixel_importance(attributions, image, polarity='positive', clip_above_percentile=99.0, clip_below_percentile=0, outlines_component_percentage=90, use_linear_transform=True, overlay=False):
if (polarity == 'both'):
pos_attributions = _plot_pixel_importance(attributions, image, polarity='positive', clip_above_percentile=clip_above_percentile, clip_below_percentile=clip_below_percentile, outlines_component_percentage=outlines_component_percentage, overlay=False)
neg_attributions = _plot_pixel_importance(attributions, image, polarity='negative', clip_above_percentile=clip_above_percentile, clip_below_percentile=clip_below_percentile, outlines_component_percentage=outlines_component_percentage, overlay=False)
attributions = (pos_attributions + neg_attributions)
if overlay:
attributions = np.clip(((0.7 * image) + (0.5 * attributions)), 0, 255)
return attributions
elif (polarity == 'positive'):
attributions = np.clip(attributions, 0, 1)
channel = [0, 255, 0]
elif (polarity == 'negative'):
attributions = np.abs(np.clip(attributions, (- 1), 0))
channel = [255, 0, 0]
attributions = np.mean(attributions, axis=2)
if use_linear_transform:
attributions = _linear_transform(attributions, clip_above_percentile, clip_below_percentile, 0.0)
attributions = (np.expand_dims(attributions, 2) * channel)
if overlay:
attributions = np.clip(((0.7 * image) + (0.5 * attributions)), 0, 255)
return attributions.astype(int) |
def test_fake_deps_only_root():
result = maybe_add_fake_dependencies(ONLY_ROOT_EXAMPLE)
assert (result == ONLY_ROOT_EXPECTED) |
.script
def bias_gelu(y, bias):
x = (bias + y)
return ((x * 0.5) * (1.0 + torch.tanh(((0. * x) * (1 + ((0.044715 * x) * x)))))).to(dtype=y.dtype) |
def generate_gif(frames, path, size=(180, 180, 3), duration=(1 / 20)):
import imageio
from skimage.transform import resize
for (idx, frame_idx) in enumerate(frames):
frames[idx] = resize(frame_idx, size, preserve_range=True, order=0).astype(np.uint8)
imageio.mimsave(path, frames, duration=duration) |
def parse(task_log, tool_log, tool_output):
tool = task_log['tool']
filename = task_log['filename']
exit_code = task_log['result']['exit_code']
tool_parser = get_parser(tool)
try:
(findings, infos, errors, fails) = tool_parser.parse(exit_code, tool_log, tool_output)
for finding in findings:
if (tool_parser.FINDINGS and (finding['name'] not in tool_parser.FINDINGS)):
raise sb.errors.SmartBugsError(f"'{finding['name']}' not among the findings of {tool['id']}")
assert ((not finding.get('filename')) or filename.endswith(finding['filename'].split('/')[(- 1)]))
finding['filename'] = filename
except Exception as e:
raise sb.errors.SmartBugsError(f'''Parsing of results failed
{e}''')
return {'findings': findings, 'infos': sorted(infos), 'errors': sorted(errors), 'fails': sorted(fails), 'parser': {'id': tool['id'], 'mode': tool['mode'], 'version': tool_parser.VERSION}} |
.skip
def test_inline_lambda_scalar():
def lamb(A: dace.float64[20], B: dace.float64[20], C: dace.float64[20]):
f = (lambda a, b: (a + b))
for i in dace.map[0:20]:
A[i] = f(B[i], C[i])
A = np.random.rand(20)
B = np.random.rand(20)
C = np.random.rand(20)
lamb(A, B, C)
assert np.allclose(A, (B + C)) |
def dump_conv2d_nobn(name='Conv2d_1x1'):
conv_operation = sess.graph.get_operation_by_name((('InceptionResnetV2/' + name) + '/Conv2D'))
weights_tensor = sess.graph.get_tensor_by_name((('InceptionResnetV2/' + name) + '/weights:0'))
weights = weights_tensor.eval()
biases_tensor = sess.graph.get_tensor_by_name((('InceptionResnetV2/' + name) + '/biases:0'))
biases = biases_tensor.eval()
padding = make_padding(conv_operation.get_attr('padding'), weights_tensor.get_shape())
strides = conv_operation.get_attr('strides')
conv_out = sess.graph.get_operation_by_name((('InceptionResnetV2/' + name) + '/BiasAdd')).outputs[0].eval()
os.system(('mkdir -p dump/InceptionResnetV2/' + name))
h5f = h5py.File((('dump/InceptionResnetV2/' + name) + '.h5'), 'w')
h5f.create_dataset('weights', data=weights)
h5f.create_dataset('biases', data=biases)
h5f.create_dataset('strides', data=strides)
h5f.create_dataset('padding', data=padding)
h5f.create_dataset('conv_out', data=conv_out)
h5f.close() |
class Sequential(torch.nn.Sequential):
def __init__(self, *args):
super(Sequential, self).__init__()
if ((len(args) == 1) and isinstance(args[0], OrderedDict)):
for (key, module) in args[0].items():
self.add_module(key, module)
else:
discount_none = 0
for (idx, module) in enumerate(args):
if module:
self.add_module(str((idx - discount_none)), module)
else:
discount_none += 1
def forward(self, *args, **kwargs):
for (i, module) in enumerate(self._modules.values()):
if (i == 0):
result = module(*args, **kwargs)
else:
result = module(result)
return result |
class DIN(BaseModel):
def __init__(self, dnn_feature_columns, history_feature_list, dnn_use_bn=False, dnn_hidden_units=(256, 128), dnn_activation='relu', att_hidden_size=(64, 16), att_activation='Dice', att_weight_normalization=False, l2_reg_dnn=0.0, l2_reg_embedding=1e-06, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary', device='cpu'):
super(DIN, self).__init__([], dnn_feature_columns, l2_reg_linear=0, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.sparse_feature_columns = (list(filter((lambda x: isinstance(x, SparseFeat)), dnn_feature_columns)) if dnn_feature_columns else [])
self.varlen_sparse_feature_columns = (list(filter((lambda x: isinstance(x, VarLenSparseFeat)), dnn_feature_columns)) if dnn_feature_columns else [])
self.history_feature_list = history_feature_list
self.history_feature_columns = []
self.sparse_varlen_feature_columns = []
self.history_fc_names = list(map((lambda x: ('hist_' + x)), history_feature_list))
for fc in self.varlen_sparse_feature_columns:
feature_name = fc.name
if (feature_name in self.history_fc_names):
self.history_feature_columns.append(fc)
else:
self.sparse_varlen_feature_columns.append(fc)
att_emb_dim = self._compute_interest_dim()
self.attention = AttentionSequencePoolingLayer(att_hidden_units=att_hidden_size, embedding_dim=att_emb_dim, activation=att_activation, return_score=False, supports_masking=False, weight_normalization=att_weight_normalization)
self.dnn = DNN(inputs_dim=self.compute_input_dim(dnn_feature_columns), hidden_units=dnn_hidden_units, activation=dnn_activation, dropout_rate=dnn_dropout, l2_reg=l2_reg_dnn, use_bn=dnn_use_bn)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
query_emb_list = embedding_lookup(X, self.embedding_dict, self.feature_index, self.sparse_feature_columns, self.history_feature_list, self.history_feature_list, to_list=True)
keys_emb_list = embedding_lookup(X, self.embedding_dict, self.feature_index, self.history_feature_columns, self.history_fc_names, self.history_fc_names, to_list=True)
dnn_input_emb_list = embedding_lookup(X, self.embedding_dict, self.feature_index, self.sparse_feature_columns, mask_feat_list=self.history_feature_list, to_list=True)
sequence_embed_dict = varlen_embedding_lookup(X, self.embedding_dict, self.feature_index, self.sparse_varlen_feature_columns)
sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, X, self.feature_index, self.sparse_varlen_feature_columns, self.device)
dnn_input_emb_list += sequence_embed_list
query_emb = torch.cat(query_emb_list, dim=(- 1))
keys_emb = torch.cat(keys_emb_list, dim=(- 1))
keys_length = torch.ones((query_emb.size(0), 1)).to(self.device)
deep_input_emb = torch.cat(dnn_input_emb_list, dim=(- 1))
hist = self.attention(query_emb, keys_emb, keys_length)
deep_input_emb = torch.cat((deep_input_emb, hist), dim=(- 1))
deep_input_emb = deep_input_emb.view(deep_input_emb.size(0), (- 1))
dnn_input = combined_dnn_input([deep_input_emb], dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
y_pred = self.out(dnn_logit)
return y_pred
def _compute_interest_dim(self):
interest_dim = 0
for feat in self.sparse_feature_columns:
if (feat.name in self.history_feature_list):
interest_dim += feat.embedding_dim
return interest_dim |
class MjrRectWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
def ptr(self):
return self._wrapped
def obj(self):
return self._wrapped.contents
def left(self):
return self._wrapped.contents.left
def left(self, value):
self._wrapped.contents.left = value
def bottom(self):
return self._wrapped.contents.bottom
def bottom(self, value):
self._wrapped.contents.bottom = value
def width(self):
return self._wrapped.contents.width
def width(self, value):
self._wrapped.contents.width = value
def height(self):
return self._wrapped.contents.height
def height(self, value):
self._wrapped.contents.height = value |
def try_rewrite_ast_with_print(code):
try:
return rewrite_ast_with_print(code)
except Exception as e:
print(e)
return code |
def apply_hysteresis_threshold(image, low, high):
low = np.clip(low, a_min=None, a_max=high)
mask_low = (image > low)
mask_high = (image > high)
(labels_low, num_labels) = ndi.label(mask_low)
sums = ndi.sum(mask_high, labels_low, np.arange((num_labels + 1)))
connected_to_high = (sums > 0)
thresholded = connected_to_high[labels_low]
return thresholded |
class FairseqTask(object):
def add_args(parser):
pass
def __init__(self, args):
self.args = args
self.datasets = {}
def setup_task(cls, args, **kwargs):
return cls(args)
def load_dataset(self, split, combine=False):
raise NotImplementedError
def dataset(self, split):
from fairseq.data import FairseqDataset
if (split not in self.datasets):
raise KeyError(('Dataset not loaded: ' + split))
if (not isinstance(self.datasets[split], FairseqDataset)):
raise TypeError('Datasets are expected to be of type FairseqDataset')
return self.datasets[split]
def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0):
assert isinstance(dataset, FairseqDataset)
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
indices = data_utils.filter_by_size(indices, dataset.size, max_positions, raise_exception=(not ignore_invalid_inputs))
batch_sampler = data_utils.batch_by_size(indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple)
return iterators.EpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_sampler=batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id)
def build_model(self, args):
from fairseq import models
return models.build_model(args, self)
def build_criterion(self, args):
from fairseq import criterions
return criterions.build_criterion(args, self)
def get_loss(self, model, criterion, sample, is_valid=False):
return criterion(model, sample)
def max_positions(self):
return None
def source_dictionary(self):
raise NotImplementedError
def target_dictionary(self):
raise NotImplementedError |
_model
def skresnet34(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['skresnet34']
sk_kwargs = dict(min_attn_channels=16, attn_reduction=8, split_input=True)
model = ResNet(SelectiveKernelBasic, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def _constant_fill(g, sizes, dtype, const_value):
if (dtype is None):
dtype = 6
if (not sym_help.scalar_type_to_pytorch_type[dtype].is_floating_point):
result = g.op('ConstantFill', sizes, dtype_i=sym_help.cast_pytorch_to_onnx['Float'], input_as_shape_i=1, value_f=const_value)
return sym_help._cast_func_template(sym_help.scalar_type_to_onnx[dtype], g, result, None)
else:
return g.op('ConstantFill', sizes, dtype_i=sym_help.scalar_type_to_onnx[dtype], input_as_shape_i=1, value_f=const_value) |
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.l1 = nn.Linear((state_dim + action_dim), 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
self.l4 = nn.Linear((state_dim + action_dim), 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return (q1, q2)
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1 |
def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, verbose=False):
assert (0 <= rank < num_gpus)
key = (url, device)
if (key not in _feature_detector_cache):
is_leader = (rank == 0)
if ((not is_leader) and (num_gpus > 1)):
torch.distributed.barrier()
with open_url(url, verbose=(verbose and is_leader)) as f:
if urlparse(url).path.endswith('.pkl'):
_feature_detector_cache[key] = pickle.load(f).to(device)
else:
_feature_detector_cache[key] = torch.jit.load(f).eval().to(device)
if (is_leader and (num_gpus > 1)):
torch.distributed.barrier()
return _feature_detector_cache[key] |
class SequencePredictor():
def __init__(self, builder):
self.builder = builder
def predict_sequence(self, inputs):
return [self.builder(x) for x in inputs] |
def backup_code(save_path, save_parent=False, ignored_in_current_folder=None, marked_in_parent_folder=None):
if (ignored_in_current_folder is None):
ignored_in_current_folder = ['tmp', 'log', 'data', '__pycache__', 'output', 'sythc_data']
if (marked_in_parent_folder is None):
marked_in_parent_folder = ['mylib']
backup_code_dir = os.path.join(save_path, 'backup_code')
os.makedirs(backup_code_dir)
with open(os.path.join(backup_code_dir, 'CLI argument.txt'), 'w') as f:
res = ''.join(['hostName: ', socket.gethostname(), '\n', 'account: ', getpass.getuser(), '\n', 'save_path: ', os.path.realpath(save_path), '\n', 'CUDA_VISIBLE_DEVICES: ', str(os.environ.get('CUDA_VISIBLE_DEVICES')), '\n'])
f.write(res)
for (i, _) in enumerate(sys.argv):
f.write((sys.argv[i] + '\n'))
script_file = sys.argv[0]
shutil.copy(script_file, backup_code_dir)
current_folder_name = os.path.basename(sys.path[0])
os.makedirs(os.path.join(backup_code_dir, current_folder_name))
for file_path in os.listdir(sys.path[0]):
if (file_path not in ignored_in_current_folder):
if os.path.isdir(file_path):
shutil.copytree(os.path.join(sys.path[0], file_path), os.path.join(backup_code_dir, current_folder_name, file_path))
elif os.path.isfile(file_path):
shutil.copy(os.path.join(sys.path[0], file_path), os.path.join(backup_code_dir, current_folder_name))
else:
print('{} is a special file(socket, FIFO, device file) that would not be backup.'.format(file_path))
if save_parent:
os.makedirs(os.path.join(backup_code_dir, 'parent_folder_files'))
for file_path in os.listdir('../'):
if (os.path.isdir(os.path.join(sys.path[0], '../', file_path)) and (file_path in marked_in_parent_folder)):
shutil.copytree(os.path.join(sys.path[0], '../', file_path), os.path.join(backup_code_dir, file_path))
elif os.path.isfile(os.path.join(sys.path[0], '../', file_path)):
shutil.copy(os.path.join(sys.path[0], '../', file_path), os.path.join(backup_code_dir, 'parent_folder_files')) |
class MultilayerTest(unittest.TestCase):
def setUp(self):
warnings.filterwarnings('ignore')
np.random.seed(42)
w2 = (np.random.randn(20, 50) / np.sqrt(50))
w1 = (np.random.randn(50, 100) / np.sqrt(100))
alpha2 = (float(w2.shape[0]) / w2.shape[1])
alpha1 = (float(w1.shape[0]) / w1.shape[1])
eigvals2 = np.linalg.eigvalsh(w2.T.dot(w2))
eigvals1 = np.linalg.eigvalsh(w1.T.dot(w1))
self.weights = [(alpha1, eigvals1), (alpha2, eigvals2)]
def tearDown(self):
pass
def test_Normal_Linear_Linear(self):
layers = [Normal(0, 1), Linear(0), Linear(0.01)]
entropy_expected = 0.244437
entropy = dnner.compute_entropy(layers=layers, weights=self.weights, verbose=0)
self.assertAlmostEqual(entropy, entropy_expected, places=6)
def test_save_fixed_points(self):
layers = [Normal(0, 1), Linear(0), Linear(0.01)]
entropy1 = dnner.compute_entropy(layers=layers, weights=self.weights, v0=[(1, 1)], max_iter=10)
(_, extra) = dnner.compute_entropy(layers=layers, weights=self.weights, return_extra=True, v0=[(1, 1)], max_iter=5)
(entropy2, _) = dnner.compute_entropy(layers=layers, weights=self.weights, return_extra=True, start_at=extra, max_iter=5)
self.assertEqual(entropy1, entropy2) |
('coref')
class ConllCorefReader(DatasetReader):
def __init__(self, max_span_width: int, token_indexers: Dict[(str, TokenIndexer)]=None) -> None:
self._max_span_width = max_span_width
self._token_indexers = (token_indexers or {'tokens': SingleIdTokenIndexer()})
self._begin_document_regex = re.compile('#begin document \\((.*)\\); part (\\d+)')
def read(self, file_path: str):
file_path = cached_path(file_path)
logger.info('Reading file at %s', file_path)
instances = []
with open(file_path) as dataset_file:
document_state = _DocumentState()
for line in dataset_file:
if self._begin_document_regex.match(line):
document_state = _DocumentState()
elif line.startswith('#end document'):
document_state.assert_document_is_finished()
clusters = document_state.canonicalize_clusters()
instance = self.text_to_instance(document_state.sentences, clusters)
instances.append(instance)
else:
self._handle_line(line, document_state)
if (not instances):
raise ConfigurationError('No instances were read from the given filepath {}. Is the path correct?'.format(file_path))
return Dataset(instances)
def text_to_instance(self, sentences: List[List[str]], gold_clusters: Optional[List[List[Tuple[(int, int)]]]]=None) -> Instance:
flattened_sentences = [token for sentence in sentences for token in sentence]
metadata: Dict[(str, Any)] = {'original_text': flattened_sentences}
if (gold_clusters is not None):
metadata['clusters'] = gold_clusters
text_field = TextField([Token(word) for word in flattened_sentences], self._token_indexers)
cluster_dict = {}
if (gold_clusters is not None):
for (cluster_id, cluster) in enumerate(gold_clusters):
for mention in cluster:
cluster_dict[tuple(mention)] = cluster_id
span_starts: List[Field] = []
span_ends: List[Field] = []
span_labels: Optional[List[int]] = ([] if (gold_clusters is not None) else None)
sentence_offset = 0
for sentence in sentences:
for start_index in range(len(sentence)):
for end_index in range(start_index, min((start_index + self._max_span_width), len(sentence))):
start = (sentence_offset + start_index)
end = (sentence_offset + end_index)
if (span_labels is not None):
if ((start, end) in cluster_dict):
span_labels.append(cluster_dict[(start, end)])
else:
span_labels.append((- 1))
span_starts.append(IndexField(start, text_field))
span_ends.append(IndexField(end, text_field))
sentence_offset += len(sentence)
span_starts_field = ListField(span_starts)
span_ends_field = ListField(span_ends)
metadata_field = MetadataField(metadata)
fields: Dict[(str, Field)] = {'text': text_field, 'span_starts': span_starts_field, 'span_ends': span_ends_field, 'metadata': metadata_field}
if (span_labels is not None):
fields['span_labels'] = SequenceLabelField(span_labels, span_starts_field)
return Instance(fields)
def from_params(cls, params: Params) -> 'ConllCorefReader':
token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
max_span_width = params.pop('max_span_width')
params.assert_empty(cls.__name__)
return cls(token_indexers=token_indexers, max_span_width=max_span_width)
def _normalize_word(word):
if ((word == '/.') or (word == '/?')):
return word[1:]
else:
return word
def _handle_line(self, line: str, document_state: _DocumentState) -> None:
row = line.split()
if (not row):
document_state.complete_sentence()
else:
if (len(row) < 12):
raise ConfigurationError('Encountered a non-empty line with fewer than 12 entries - this does not match the CONLL format.: {}'.format(row))
word = self._normalize_word(row[3])
coref = row[(- 1)]
word_index = document_state.num_total_words
document_state.add_word(word)
if (coref != '-'):
for segment in coref.split('|'):
if (segment[0] == '('):
if (segment[(- 1)] == ')'):
cluster_id = int(segment[1:(- 1)])
document_state.clusters[cluster_id].append((word_index, word_index))
else:
cluster_id = int(segment[1:])
document_state.coref_stacks[cluster_id].append(word_index)
else:
cluster_id = int(segment[:(- 1)])
start = document_state.coref_stacks[cluster_id].pop()
document_state.clusters[cluster_id].append((start, word_index)) |
def build_gauss_wavefront_xy(nx, ny, ekev, xMin, xMax, yMin, yMax, sigX, sigY, d2waist, xoff=0.0, yoff=0.0, tiltX=0.0, tiltY=0.0, pulseEn=None, pulseTau=None, repRate=None, _mx=None, _my=None):
GsnBm = srwlib.SRWLGsnBm()
GsnBm.x = xoff
GsnBm.y = yoff
GsnBm.z = 0
GsnBm.xp = tiltX
GsnBm.yp = tiltY
GsnBm.avgPhotEn = (ekev * 1000.0)
if (pulseEn is not None):
GsnBm.pulseEn = pulseEn
else:
GsnBm.pulseEn = 0.001
if (repRate is not None):
GsnBm.repRate = repRate
else:
GsnBm.repRate = 1
GsnBm.polar = 1
GsnBm.sigX = sigX
GsnBm.sigY = sigY
if (pulseTau is not None):
GsnBm.sigT = pulseTau
else:
GsnBm.sigT = 2e-16
if (_mx is not None):
GsnBm.mx = _mx
else:
GsnBm.mx = 0
if (_mx is not None):
GsnBm.my = _my
else:
GsnBm.my = 0
wfr = srwlib.SRWLWfr()
wfr.allocate(1, nx, ny)
wfr.mesh.eStart = GsnBm.avgPhotEn
wfr.mesh.eFin = GsnBm.avgPhotEn
wfr.avgPhotEn = ((wfr.mesh.eStart + wfr.mesh.eFin) / 2)
wfr.mesh.zStart = d2waist
wfr.mesh.xStart = xMin
wfr.mesh.xFin = xMax
wfr.mesh.yStart = yMin
wfr.mesh.yFin = yMax
wfr.presFT = 0
wfr.partBeam.partStatMom1.x = GsnBm.x
wfr.partBeam.partStatMom1.y = GsnBm.y
wfr.partBeam.partStatMom1.z = GsnBm.z
wfr.partBeam.partStatMom1.xp = GsnBm.xp
wfr.partBeam.partStatMom1.yp = GsnBm.yp
if ((pulseEn is None) and (pulseTau is None)):
wfr.unitElFld = 0
sampFactNxNyForProp = (- 1)
arPrecPar = [sampFactNxNyForProp]
srwlpy.CalcElecFieldGaussian(wfr, GsnBm, arPrecPar)
return wfr |
def generate_vuv(condition, cat_input):
model_path = 'snapshots/vuv'
model = load_latest_model_from(2, model_path)
gen = model.generate(condition, cat_input).squeeze()
return gen.cpu().numpy().astype(np.uint8) |
def _child_of(node: SDFGState, parent: SDFGState, ptree: Dict[(SDFGState, SDFGState)]) -> bool:
curnode = node
while (curnode is not None):
if (curnode is parent):
return True
curnode = ptree[curnode]
return False |
class MentionCandidatesTranslator(FromParams):
def __init__(self, inter_wiki_path: str, multilingual_entity_db_path: Dict[(str, str)]=None):
self.inter_wiki_db = InterwikiDB.load(inter_wiki_path)
multilingual_entity_db_path = (multilingual_entity_db_path or {})
self.entity_db_dict = {lang: EntityDB(path) for (lang, path) in multilingual_entity_db_path.items()}
def __call__(self, mention_candidates: Dict[(str, str)], source_language: str, target_language: str) -> Dict[(str, str)]:
assert (target_language in self.entity_db_dict)
source_titles = list(mention_candidates.values())
target_titles = []
for title in source_titles:
translated_title = self.inter_wiki_db.get_title_translation(title, source_language, target_language)
if (translated_title is not None):
target_titles.append(translated_title)
target_mention_candidates = dict()
ambiguous_mentions = set()
entity_db = self.entity_db_dict[target_language]
for target_title in target_titles:
for (_, mention, count) in entity_db.query(target_title):
if (mention in target_mention_candidates):
ambiguous_mentions.add(mention)
del target_mention_candidates[mention]
if (mention not in ambiguous_mentions):
target_mention_candidates[mention] = target_title
return target_mention_candidates |
class ConfigParser():
def __init__(self, file_path):
directory = os.path.dirname(os.path.abspath(__file__))
if (file_path is None):
file_path = os.path.join(directory, 'configs/default.yaml')
with open(file_path, 'r') as f:
self.config = yaml.safe_load(f)
if ('sigmas' not in self.config['bayesian']):
self.config['bayesian']['sigmas'] = {}
if ('sigmas' not in self.config['stats_detector']):
self.config['stats_detector']['sigmas'] = {}
def get_parameters(self, name):
return self.config[name] |
def GetHits_PDirNet(Graph, NIdHubH, NIdAuthH, MaxIter=20):
return _snap.GetHits_PDirNet(Graph, NIdHubH, NIdAuthH, MaxIter) |
class _DecoratorBaseClass():
_stack_length = {}
def get_stack_length(self, func):
return self._stack_length.get(func.__name__, _get_stack_length(func)) |
class ThreeInterpolate(Function):
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
(B, c, m) = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
def backward(ctx, grad_out: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
(idx, weight, m) = ctx.three_interpolate_for_backward
(B, c, n) = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return (grad_features, None, None) |
def pickle_dump(python_object, file_path):
make_parent(file_path)
with open(file_path, 'wb') as f:
pickle.dump(python_object, f) |
def push_graphs_to_main_directory(model_dirname, name):
dirname = model_dirname
files = os.listdir(dirname)
files = [f for f in files if f.endswith('svg')]
for f in files:
outdir = f[:(- 4)]
output_name = os.path.join('graph_outputs', outdir)
os.makedirs(output_name, exist_ok=True)
shutil.copyfile(os.path.join(model_dirname, f), os.path.join(output_name, (name + '.svg')))
files = os.listdir(dirname)
files = [f for f in files if f.endswith('csv')]
for f in files:
outdir = f[:(- 4)]
output_name = os.path.join('graph_outputs', outdir)
os.makedirs(output_name, exist_ok=True)
shutil.copyfile(os.path.join(model_dirname, f), os.path.join(output_name, (name + '.csv')))
files = os.listdir(dirname)
files = [f for f in files if f.endswith('pdf')]
for f in files:
outdir = f[:(- 4)]
output_name = os.path.join('graph_outputs', outdir)
os.makedirs(output_name, exist_ok=True)
shutil.copyfile(os.path.join(model_dirname, f), os.path.join(output_name, (name + '.pdf')))
files = os.listdir(dirname)
files = [f for f in files if (f == 'evaluate.json')]
for f in files:
outdir = f[:(- 5)]
output_name = os.path.join('graph_outputs', outdir)
os.makedirs(output_name, exist_ok=True)
shutil.copyfile(os.path.join(model_dirname, f), os.path.join(output_name, (name + '.json'))) |
def mk_lean_function_auto_soundness_theorem(func: LeanFunctionInfo, lean_info: LeanProgramInfo, assembly_info: LeanAssemblyInfo, out):
soundness_gen = LeanSoundnessGen(func=func, lean_info=lean_info, assembly_info=assembly_info)
soundness_gen.gen_blocks()
proofs = soundness_gen.gen_func_proofs()
for line in proofs:
print(line, file=out) |
def pytest_addoption(parser):
group = parser.getgroup('timeout', 'Interrupt test run and dump stacks of all threads after a test times out')
group.addoption('--timeout', type=float, help=TIMEOUT_DESC)
parser.addini('timeout', TIMEOUT_DESC)
parser.addini('timeout_func_only', FUNC_ONLY_DESC, type='bool') |
def test__get_reference_position_multi(sample_test_case):
assert (tf.TestFactory._get_reference_positions(sample_test_case, 0) == {0, 2, 3}) |
class Resnet3dCSNiRLight(Resnet3dEmbeddingMultiDecoder):
def __init__(self, tw=16, sample_size=112, e_dim=7):
super(Resnet3dCSNiRLight, self).__init__(decoders=[DecoderLight(), DecoderLight(n_classes=e_dim, conv_t=True)])
self.encoder = Encoder3d_csn_ir(tw, sample_size) |
def segment_f1(segments: List[dict], segments_gold: List[dict]) -> float:
if ((len(segments_gold) == 0) or (len(segments) == 0)):
return (1 if (len(segments) == len(segments_gold)) else 0)
precision = segment_precision(segments, segments_gold)
recall = segment_recall(segments, segments_gold)
return (((precision * recall) / (precision + recall)) if ((precision > 0) and (recall > 0)) else 0) |
def test_array_copy_outside_scope():
sdfg = dace.SDFG('array_copy_outside_scope')
(iname, _) = sdfg.add_array('inp', (10,), dtype=dace.int32)
(oname, _) = sdfg.add_array('out', (10,), dtype=dace.int32)
nsdfg = dace.SDFG('nested_sdfg')
(niname, nidesc) = nsdfg.add_array('ninp', (1,), dtype=dace.int32)
(ntname, ntdesc) = nsdfg.add_scalar('ntmp', dtype=dace.int32, transient=True)
(noname, nodesc) = nsdfg.add_array('nout', (1,), dtype=dace.int32)
nstate = nsdfg.add_state('nmain')
ninode = nstate.add_access(niname)
ntnode = nstate.add_access(ntname)
nonode = nstate.add_access(noname)
tasklet = nstate.add_tasklet('tasklet', {'__inp'}, {'__out'}, '__out = __inp + 1')
nstate.add_edge(ninode, None, tasklet, '__inp', dace.Memlet.from_array(niname, nidesc))
nstate.add_edge(tasklet, '__out', ntnode, None, dace.Memlet.from_array(ntname, ntdesc))
nstate.add_nedge(ntnode, nonode, dace.Memlet.from_array(noname, nodesc))
state = sdfg.add_state('main')
inode = state.add_access(iname)
onode = state.add_access(oname)
(me, mx) = state.add_map('map', {'i': '0:10'})
snode = state.add_nested_sdfg(nsdfg, None, {'ninp'}, {'nout'})
state.add_memlet_path(inode, me, snode, memlet=dace.Memlet(data=iname, subset='i'), dst_conn='ninp')
state.add_memlet_path(snode, mx, onode, memlet=dace.Memlet(data=oname, subset='i'), src_conn='nout')
sdfg.apply_transformations(MapFission)
A = np.arange(10, dtype=np.int32)
B = np.empty((10,), dtype=np.int32)
sdfg(inp=A, out=B)
assert np.array_equal((A + 1), B) |
def collate_by_len(data, budget=((256 ** 2) * 64)):
sorted_data = sorted(data, key=(lambda d: len(d[0])), reverse=True)
idx = 0
splits = []
while (idx < len(data)):
x = sorted_data[idx][0]
cost_each = (len(x) ** 2)
split_size = max((budget // cost_each), 16)
last_idx = min(len(data), (idx + split_size))
splits.append(sorted_data[idx:last_idx])
idx += split_size
result = []
for split in splits:
(x, y, label) = zip(*split)
result.append((pad_sequence([torch.tensor(s) for s in x]), pad_sequence([torch.tensor(s) for s in y]), pad_sequence([torch.tensor(s) for s in label])))
return result |
def process_generators_chain(gen_string, dim, base_ring=None):
deprecation(33777, 'the CHomP interface is deprecated')
from sage.modules.free_module_element import vector
from sage.rings.integer_ring import ZZ
if (base_ring is None):
base_ring = ZZ
g_srch = re.compile(('\\[H_%s\\]\\n([^]]*)(?:\\[|$)' % dim))
g = g_srch.search(gen_string)
if g:
g = g.group(1)
if g:
lines = g.splitlines()
new_gens = []
for l in lines:
gen = re.compile('([+-]?)\\s?([0-9]+)?\\s?[*]?\\s?a([0-9]*)(?:\\s|$)')
v = {}
for term in gen.finditer(l):
if (term.group(1) and re.search('-', term.group(1))):
sign = (- 1)
else:
sign = 1
if (term.group(2) and (len(term.group(2)) > 0)):
coeff = (sign * int(term.group(2)))
else:
coeff = (sign * 1)
idx = int(term.group(3))
v[(idx - 1)] = coeff
if v:
new_gens.append(vector(base_ring, v))
g = new_gens
return g |
class DocumentState(OntoNotesDocumentState):
def __init__(self, key):
super().__init__(key)
def finalize(self):
self.final_processing()
return {'doc_key': self.doc_key, 'sentences': self.segments, 'clusters': self.merged_clusters, 'sentence_map': self.sentence_map, 'subtoken_map': self.subtoken_map} |
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model_read, d_model_write, d_model_out, d_k, d_v, num_blocks_read, num_blocks_write, topk, grad_sparse, residual=True, dropout=0.1, skip_write=False, joined_heads_write=False):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.joined_heads_write = joined_heads_write
self.GLN_qs = GroupLinearLayer(d_model_read, (n_head * d_k), num_blocks_read)
self.GLN_ks = GroupLinearLayer(d_model_write, (n_head * d_k), num_blocks_write)
if joined_heads_write:
print('joined heads write layer')
print('output size dv', d_v, 'num groups', num_blocks_write)
self.GLN_vs = GroupLinearLayer(d_model_write, d_v, num_blocks_write)
else:
self.GLN_vs = GroupLinearLayer(d_model_write, (n_head * d_v), num_blocks_write)
self.residual = residual
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5), topk=topk, grad_sparse=grad_sparse)
self.gate_fc = nn.Linear((n_head * d_v), d_model_out)
if (not skip_write):
self.fc = nn.Linear((n_head * d_v), d_model_out)
else:
self.fc = (lambda a: a)
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(d_model_out)
def forward(self, q, k, v, mask=None):
(d_k, d_v, n_head) = (self.d_k, self.d_v, self.n_head)
(sz_b, len_q, _) = q.size()
(sz_b, len_k, _) = k.size()
(sz_b, len_v, _) = v.size()
residual = q
q = self.GLN_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.GLN_ks(k).view(sz_b, len_k, n_head, d_k)
if self.joined_heads_write:
v = self.GLN_vs(v).view(sz_b, len_v, 1, d_v).repeat(1, 1, n_head, 1)
else:
v = self.GLN_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view((- 1), len_q, d_k)
k = k.permute(2, 0, 1, 3).contiguous().view((- 1), len_k, d_k)
v = v.permute(2, 0, 1, 3).contiguous().view((- 1), len_v, d_v)
(output, attn, extra_loss) = self.attention(q, k, v, mask=None)
output = output.view(n_head, sz_b, len_q, d_v)
if self.joined_heads_write:
output = output.mean(0).view(sz_b, len_q, d_v)
else:
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, (- 1))
output_init = (output * 1.0)
output = self.dropout(self.fc(output_init))
if self.residual:
gate = torch.sigmoid(self.gate_fc(output_init))
output = (gate * torch.tanh(output))
else:
pass
return (output, attn, extra_loss) |
def _sentence_case(text: Any) -> Any:
return (str(text).capitalize() if pd.notna(text) else text) |
.parametrize('estimator', all_survival_function_estimators())
.parametrize('y_time', [(- 1e-08), (- 1), np.finfo(float).min])
def test_fit_negative_survial_time_raises(estimator, y_time):
X = np.random.randn(7, 3)
y = Surv.from_arrays(event=np.ones(7, dtype=bool), time=[1, 9, 3, y_time, 1, 8, .0])
with pytest.raises(ValueError, match='observed time contains values smaller zero'):
estimator.fit(X, y) |
def check_results(documents, expected_conllu, expected_txt, expected_labels):
with tempfile.TemporaryDirectory() as output_dir:
write_section(output_dir, 'orchid', 'train', documents)
with open(os.path.join(output_dir, 'th_orchid.train.gold.conllu')) as fin:
conllu = fin.read().strip()
with open(os.path.join(output_dir, 'th_orchid.train.txt')) as fin:
txt = fin.read()
with open(os.path.join(output_dir, 'th_orchid-ud-train.toklabels')) as fin:
labels = fin.read()
assert (conllu == expected_conllu)
assert (txt == expected_txt)
assert (labels == expected_labels)
assert (len(txt) == len(labels)) |
def iter_traceback(tb=None, enforce_most_recent_call_first=False):
if (tb is None):
tb = get_current_frame()
def is_stack_summary(_tb):
return isinstance(_tb, StackSummary)
is_frame = inspect.isframe
is_traceback = inspect.istraceback
assert (is_traceback(tb) or is_frame(tb) or is_stack_summary(tb))
if (is_traceback(tb) and enforce_most_recent_call_first):
frames = list(iter_traceback(tb))
for frame in frames[::(- 1)]:
(yield frame)
return
_tb = tb
while (_tb is not None):
if is_frame(_tb):
frame = _tb
elif is_stack_summary(_tb):
if isinstance(_tb[0], ExtendedFrameSummary):
frame = _tb[0].tb_frame
else:
frame = DummyFrame.from_frame_summary(_tb[0])
else:
frame = _tb.tb_frame
(yield frame)
if is_frame(_tb):
_tb = _tb.f_back
elif is_stack_summary(_tb):
_tb = StackSummary.from_list(_tb[1:])
if (not _tb):
_tb = None
else:
_tb = _tb.tb_next |
def skip(*filenames):
for filename in filenames:
if (not os.path.isfile(filename)):
return False
return True |
def triplet_margin_loss_gor(anchor, positive, negative1, negative2, beta=1.0, margin=1.0, p=2, eps=1e-06, swap=False):
assert (anchor.size() == positive.size()), 'Input sizes between positive and negative must be equal.'
assert (anchor.size() == negative1.size()), 'Input sizes between anchor and negative must be equal.'
assert (positive.size() == negative2.size()), 'Input sizes between positive and negative must be equal.'
assert (anchor.dim() == 2), 'Inputd must be a 2D matrix.'
assert (margin > 0.0), 'Margin should be positive value.'
d_p = pairwise_distance(anchor, positive, p, eps)
d_n1 = pairwise_distance(anchor, negative1, p, eps)
d_n2 = pairwise_distance(anchor, negative2, p, eps)
dist_hinge = torch.clamp(((margin + d_p) - (0.5 * (d_n1 + d_n2))), min=0.0)
neg_dis1 = torch.pow(torch.sum(torch.mul(anchor, negative1), 1), 2)
gor1 = torch.mean(neg_dis1)
neg_dis2 = torch.pow(torch.sum(torch.mul(anchor, negative2), 1), 2)
gor2 = torch.mean(neg_dis2)
loss = (torch.mean(dist_hinge) + (beta * (gor1 + gor2)))
return loss |
def recognize_coxeter_type_from_matrix(coxeter_matrix, index_set):
n = ZZ(coxeter_matrix.nrows())
G = Graph([index_set, [(index_set[i], index_set[j], coxeter_matrix[(i, j)]) for i in range(n) for j in range(i, n) if (coxeter_matrix[(i, j)] not in [1, 2])]], format='vertices_and_edges')
types = []
for S in G.connected_components_subgraphs():
r = S.num_verts()
if (r == 1):
types.append(CoxeterType(['A', 1]).relabel({1: S.vertices(sort=True)[0]}))
continue
if (r == 2):
e = S.edge_labels()[0]
if (e == 3):
ct = CoxeterType(['A', 2])
elif (e == 4):
ct = CoxeterType(['B', 2])
elif (e == 6):
ct = CoxeterType(['G', 2])
elif ((e > 0) and (e < float('inf'))):
ct = CoxeterType(['I', e])
else:
ct = CoxeterType(['A', 1, 1])
SV = S.vertices(sort=True)
if (not ct.is_affine()):
types.append(ct.relabel({1: SV[0], 2: SV[1]}))
else:
types.append(ct.relabel({0: SV[0], 1: SV[1]}))
continue
test = [['A', r], ['B', r], ['A', (r - 1), 1]]
if (r >= 3):
if (r == 3):
test += [['G', 2, 1], ['H', 3]]
test.append(['C', (r - 1), 1])
if (r >= 4):
if (r == 4):
test += [['F', 4], ['H', 4]]
test += [['D', r], ['B', (r - 1), 1]]
if (r >= 5):
if (r == 5):
test.append(['F', 4, 1])
test.append(['D', (r - 1), 1])
if (r == 6):
test.append(['E', 6])
elif (r == 7):
test += [['E', 7], ['E', 6, 1]]
elif (r == 8):
test += [['E', 8], ['E', 7, 1]]
elif (r == 9):
test.append(['E', 8, 1])
found = False
for ct in test:
ct = CoxeterType(ct)
T = ct.coxeter_graph()
(iso, match) = T.is_isomorphic(S, certificate=True, edge_labels=True)
if iso:
types.append(ct.relabel(match))
found = True
break
if (not found):
return None
return CoxeterType(types) |
def argumenttype_type(t: Type, *, mutable: bool) -> str:
if (local.use_c10_dispatcher() is UseC10Dispatcher.full):
return cpp.argumenttype_type(t, mutable=mutable)
else:
return legacy_dispatcher.argumenttype_type(t, mutable=mutable) |
def marching_cubes(fn, c1, c2, reso, isosurface, chunk):
grid = np.vstack(np.meshgrid(*(np.linspace(lo, hi, sz, dtype=np.float32) for (lo, hi, sz) in zip(c1, c2, reso)), indexing='ij')).reshape(3, (- 1)).T
h0print('* Evaluating sigma ', grid.shape[0], 'points')
(rgbs, sigmas) = utils.eval_points(fn, grid, chunk)
sigmas = sigmas.reshape(*reso)
del rgbs
if (jax.host_id() == 0):
print('* Running marching cubes')
(vertices, triangles) = mcubes.marching_cubes(sigmas, isosurface)
(c1, c2) = (np.array(c1), np.array(c2))
vertices *= ((c2 - c1) / np.array(reso))
return ((vertices + c1), triangles)
return (None, None) |
class Data2VecTextForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_cursor_path(sqlite_path: str):
try:
if (not os.path.exists(sqlite_path)):
print(('Openning a new connection %s' % sqlite_path))
connection = sqlite3.connect(sqlite_path)
except Exception as e:
print(sqlite_path)
raise e
connection.text_factory = (lambda b: b.decode(errors='ignore'))
cursor = connection.cursor()
return cursor |
def conv_block_d(input_tensor, f, use_norm=False, k=3, strides=2):
x = input_tensor
if (not (k == 1)):
x = ReflectPadding2D(x)
x = Conv2D(f, kernel_size=k, strides=strides, kernel_regularizer=regularizers.l2(w_l2), kernel_initializer=conv_init, use_bias=(not use_norm))(x)
x = (normalization(x, 'instancenorm', f) if use_norm else x)
x = LeakyReLU(alpha=0.2)(x)
return x |
class _ConvBnReLU(nn.Sequential):
BATCH_NORM = _BATCH_NORM
def __init__(self, in_ch, out_ch, kernel_size, stride, padding, dilation, relu=True):
super(_ConvBnReLU, self).__init__()
self.add_module('conv', nn.Conv2d(in_ch, out_ch, kernel_size, stride, padding, dilation, bias=False))
self.add_module('bn', _BATCH_NORM(out_ch, eps=1e-05, momentum=(1 - 0.999)))
if relu:
self.add_module('relu', nn.ReLU()) |
def get_path_to_root_old(node_idx, struct):
paths_list = []
while (node_idx >= 0):
paths_list.append(node_idx)
node_idx = get_parent(node_idx, struct)
return paths_list[::(- 1)] |
def detect_special_tokens(word):
try:
int(word)
word = SPECIAL_TOKENS[4]
except ValueError:
if AtMentionRegex.findall(word):
word = SPECIAL_TOKENS[2]
elif urlRegex.findall(word):
word = SPECIAL_TOKENS[3]
return word |
class GANTask(GPUTask):
def main(self):
import os
import tensorflow as tf
from gan.load_data import load_dSprites
from gan.latent import UniformLatent, JointLatent
from gan.network import Decoder, InfoGANDiscriminator, CrDiscriminator, MetricRegresser
from gan.infogan_cr import INFOGAN_CR
from gan.metric import FactorVAEMetric, DSpritesInceptionScore, DHSICMetric
(data, metric_data, latent_values, metadata) = load_dSprites('data/dSprites')
(_, height, width, depth) = data.shape
latent_list = []
for i in range(self._config['uniform_reg_dim']):
latent_list.append(UniformLatent(in_dim=1, out_dim=1, low=(- 1.0), high=1.0, q_std=1.0, apply_reg=True))
if (self._config['uniform_not_reg_dim'] > 0):
latent_list.append(UniformLatent(in_dim=self._config['uniform_not_reg_dim'], out_dim=self._config['uniform_not_reg_dim'], low=(- 1.0), high=1.0, q_std=1.0, apply_reg=False))
latent = JointLatent(latent_list=latent_list)
decoder = Decoder(output_width=width, output_height=height, output_depth=depth)
infoGANDiscriminator = InfoGANDiscriminator(output_length=latent.reg_out_dim, q_l_dim=self._config['q_l_dim'])
crDiscriminator = CrDiscriminator(output_length=latent.num_reg_latent)
shape_network = MetricRegresser(output_length=3, scope_name='dSpritesSampleQualityMetric_shape')
checkpoint_dir = os.path.join(self._work_dir, 'checkpoint')
if (not os.path.exists(checkpoint_dir)):
os.makedirs(checkpoint_dir)
sample_dir = os.path.join(self._work_dir, 'sample')
if (not os.path.exists(sample_dir)):
os.makedirs(sample_dir)
time_path = os.path.join(self._work_dir, 'time.txt')
metric_path = os.path.join(self._work_dir, 'metric.csv')
run_config = tf.ConfigProto()
with tf.Session(config=run_config) as sess:
factorVAEMetric = FactorVAEMetric(metric_data, sess=sess)
dSpritesInceptionScore = DSpritesInceptionScore(sess=sess, do_training=False, data=data, metadata=metadata, latent_values=latent_values, network_path='metric_model/DSprites', shape_network=shape_network, sample_dir=sample_dir)
dHSICMetric = DHSICMetric(sess=sess, data=data)
metric_callbacks = [factorVAEMetric, dSpritesInceptionScore, dHSICMetric]
gan = INFOGAN_CR(sess=sess, checkpoint_dir=checkpoint_dir, sample_dir=sample_dir, time_path=time_path, epoch=self._config['epoch'], batch_size=self._config['batch_size'], data=data, vis_freq=self._config['vis_freq'], vis_num_sample=self._config['vis_num_sample'], vis_num_rep=self._config['vis_num_rep'], latent=latent, decoder=decoder, infoGANDiscriminator=infoGANDiscriminator, crDiscriminator=crDiscriminator, gap_start=self._config['gap_start'], gap_decrease_times=self._config['gap_decrease_times'], gap_decrease=self._config['gap_decrease'], gap_decrease_batch=self._config['gap_decrease_batch'], cr_coe_start=self._config['cr_coe_start'], cr_coe_increase_times=self._config['cr_coe_increase_times'], cr_coe_increase=self._config['cr_coe_increase'], cr_coe_increase_batch=self._config['cr_coe_increase_batch'], info_coe_de=self._config['info_coe_de'], info_coe_infod=self._config['info_coe_infod'], metric_callbacks=metric_callbacks, metric_freq=self._config['metric_freq'], metric_path=metric_path, output_reverse=self._config['output_reverse'], de_lr=self._config['de_lr'], infod_lr=self._config['infod_lr'], crd_lr=self._config['crd_lr'], summary_freq=self._config['summary_freq'])
gan.build()
gan.train() |
def delexicaliseDomain(utt, dictionary, domain):
for (key, val) in dictionary:
if ((key == domain) or (key == 'value')):
utt = ((' ' + utt) + ' ').replace(((' ' + key) + ' '), ((' ' + val) + ' '))
utt = utt[1:(- 1)]
for (key, val) in dictionary:
utt = ((' ' + utt) + ' ').replace(((' ' + key) + ' '), ((' ' + val) + ' '))
utt = utt[1:(- 1)]
return utt |
class SemanticSegAlgo():
def __init__(self, loss, num_classes, ignore_index=255):
self.loss = loss
self.num_classes = num_classes
self.ignore_index = ignore_index
def _pack_logits(sem_logits, valid_size, img_size):
sem_logits = functional.interpolate(sem_logits, size=img_size, mode='bilinear', align_corners=False)
return pack_padded_images(sem_logits, valid_size)
def _confusion_matrix(self, sem_pred, sem):
confmat = sem[0].new_zeros((self.num_classes * self.num_classes), dtype=torch.float)
for (sem_pred_i, sem_i) in zip(sem_pred, sem):
valid = (sem_i != self.ignore_index)
if valid.any():
sem_pred_i = sem_pred_i[valid]
sem_i = sem_i[valid]
confmat.index_add_(0, ((sem_i.view((- 1)) * self.num_classes) + sem_pred_i.view((- 1))), confmat.new_ones(sem_i.numel()))
return confmat.view(self.num_classes, self.num_classes)
def _logits(head, x, valid_size, img_size):
(sem_logits, sem_feats) = head(x)
return (sem_logits, SemanticSegAlgo._pack_logits(sem_logits, valid_size, img_size), sem_feats)
def training(self, head, x, sem, valid_size, img_size):
(sem_logits_low_res, sem_logits, sem_feats) = self._logits(head, x, valid_size, img_size)
sem_pred = PackedSequence([sem_logits_i.max(dim=0)[1] for sem_logits_i in sem_logits])
sem_pred_low_res = PackedSequence([sem_logits_low_res_i.max(dim=0)[1].float() for sem_logits_low_res_i in sem_logits_low_res])
sem_loss = self.loss(sem_logits, sem)
conf_mat = self._confusion_matrix(sem_pred, sem)
return (sem_loss, conf_mat, sem_pred, sem_logits, sem_logits_low_res, sem_pred_low_res, sem_feats)
def inference(self, head, x, valid_size, img_size):
(sem_logits_low_res, sem_logits, sem_feats) = self._logits(head, x, valid_size, img_size)
sem_pred = PackedSequence([sem_logits_i.max(dim=0)[1] for sem_logits_i in sem_logits])
sem_pred_low_res = PackedSequence([sem_logits_low_res_i.max(dim=0)[1].float() for sem_logits_low_res_i in sem_logits_low_res])
return (sem_pred, sem_feats, sem_pred_low_res) |
def _get_custom_interpreter(implementation=None, version=None):
if (implementation is None):
implementation = interpreter_name()
if (version is None):
version = interpreter_version()
return '{}{}'.format(implementation, version) |
def test_error_ndim():
arr_error = np.random.randn(1, 2)
with testing.raises(ValueError):
montage(arr_error)
arr_error = np.random.randn(1, 2, 3, 4)
with testing.raises(ValueError):
montage(arr_error)
arr_error = np.random.randn(1, 2, 3)
with testing.raises(ValueError):
montage(arr_error, channel_axis=(- 1))
arr_error = np.random.randn(1, 2, 3, 4, 5)
with testing.raises(ValueError):
montage(arr_error, channel_axis=(- 1)) |
def get_tempo_info(beat_df):
(mean, std) = (beat_df['duration'].mean(), beat_df['duration'].std())
return (sec2tempo(mean), (sec2tempo((mean - (2 * std))) - sec2tempo((mean + (2 * std))))) |
def print_task_log(demo_task_counter, live_task_counter, mod):
print()
logger.info(f'Modality: {mod}')
for task in demo_task_counter:
logger.info((f'{task}: SR = {((live_task_counter[task] / demo_task_counter[task]) * 100):.0f}%' + f' | {live_task_counter[task]} of {demo_task_counter[task]}'))
logger.info((f'Average Success Rate {mod} = ' + f'{(((sum(live_task_counter.values()) / s) if ((s := sum(demo_task_counter.values())) > 0) else 0) * 100):.0f}% '))
logger.info(f'Success Rates averaged throughout classes = {(np.mean([(live_task_counter[task] / demo_task_counter[task]) for task in demo_task_counter]) * 100):.0f}%') |
class ConvMergeNetwork(LayersPowered, Serializable):
def __init__(self, name, input_shape, extra_input_shape, output_dim, hidden_sizes, conv_filters, conv_filter_sizes, conv_strides, conv_pads, extra_hidden_sizes=None, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, input_var=None, input_layer=None):
Serializable.quick_init(self, locals())
if (extra_hidden_sizes is None):
extra_hidden_sizes = []
with tf.variable_scope(name):
input_flat_dim = np.prod(input_shape)
extra_input_flat_dim = np.prod(extra_input_shape)
total_input_flat_dim = (input_flat_dim + extra_input_flat_dim)
if (input_layer is None):
l_in = L.InputLayer(shape=(None, total_input_flat_dim), input_var=input_var, name='input')
else:
l_in = input_layer
l_conv_in = L.reshape(L.SliceLayer(l_in, indices=slice(input_flat_dim), name='conv_slice'), (([0],) + input_shape), name='conv_reshaped')
l_extra_in = L.reshape(L.SliceLayer(l_in, indices=slice(input_flat_dim, None), name='extra_slice'), (([0],) + extra_input_shape), name='extra_reshaped')
l_conv_hid = l_conv_in
for (idx, conv_filter, filter_size, stride, pad) in zip(range(len(conv_filters)), conv_filters, conv_filter_sizes, conv_strides, conv_pads):
l_conv_hid = L.Conv2DLayer(l_conv_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=hidden_nonlinearity, name=('conv_hidden_%d' % idx))
l_extra_hid = l_extra_in
for (idx, hidden_size) in enumerate(extra_hidden_sizes):
l_extra_hid = L.DenseLayer(l_extra_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('extra_hidden_%d' % idx), W=hidden_W_init, b=hidden_b_init)
l_joint_hid = L.concat([L.flatten(l_conv_hid, name='conv_hidden_flat'), l_extra_hid], name='joint_hidden')
for (idx, hidden_size) in enumerate(hidden_sizes):
l_joint_hid = L.DenseLayer(l_joint_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('joint_hidden_%d' % idx), W=hidden_W_init, b=hidden_b_init)
l_out = L.DenseLayer(l_joint_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name='output', W=output_W_init, b=output_b_init)
self._l_in = l_in
self._l_out = l_out
LayersPowered.__init__(self, [l_out], input_layers=[l_in])
def input_layer(self):
return self._l_in
def output_layer(self):
return self._l_out
def input_var(self):
return self._l_in.input_var |
def main(cfg):
(train_loader, train_loader_ca, train_loader_cb, val_loader_c, val_loader_b, num_query_c, num_query_b, num_classes) = make_data_loader(cfg, use_eraser=True)
model = build_model(num_classes, 'base', pretrain_choice=True)
model = (torch.nn.DataParallel(model).cuda() if torch.cuda.is_available() else model)
loss_func = make_loss()
optimizer = make_optimizer(cfg, model)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[40, 80], gamma=0.1)
if (cfg.train == 1):
start_epoch = 0
acc_best = 0.0
last_model_wts = torch.load(os.path.join('pre_feat', 'msmt_ini_imagenet.pth'))
model_dict = model.state_dict()
checkpoint_dict = {k: v for (k, v) in last_model_wts['state_dict'].items() if ((k in model_dict) and ('classifier' not in k))}
model_dict.update(checkpoint_dict)
model.load_state_dict(model_dict)
do_train(cfg, model, train_loader, val_loader_c, optimizer, scheduler, loss_func, num_query_c, start_epoch, acc_best)
else:
last_model_wts = torch.load(os.path.join(cfg.logs_dir, 'checkpoint_best.pth'))
model_dict = model.state_dict()
checkpoint_dict = {k: v for (k, v) in last_model_wts['state_dict'].items() if ((k in model_dict) and ('classifier' not in k))}
model_dict.update(checkpoint_dict)
model.load_state_dict(model_dict)
(mAP, cmc1, cmc5, cmc10, cmc20) = inference_prcc_global(model, val_loader_c, num_query_c)
start_time = datetime.datetime.now()
start_time = ('%4d:%d:%d-%2d:%2d:%2d' % (start_time.year, start_time.month, start_time.day, start_time.hour, start_time.minute, start_time.second))
line = '{} - Test: cmc1: {:.1%}, cmc5: {:.1%}, cmc10: {:.1%}, cmc20: {:.1%}, mAP: {:.1%}\n'.format(start_time, cmc1, cmc5, cmc10, cmc20, mAP)
print(line) |
class TestTokenEmbedder(object):
def embedder(self):
vocab = SimpleVocab((['<unk>', '<start>', '<stop>'] + ['a', 'b', 'c']))
arr = np.eye(len(vocab), dtype=np.float32)
word_embeddings = Bunch(vocab=vocab, array=arr)
return TokenEmbedder(word_embeddings)
def test_embedding_from_array(self):
emb = TokenEmbedder._embedding_from_array(np.array([[9, 9], [8, 7]], dtype=np.float32))
assert isinstance(emb, Embedding)
values = emb(GPUVariable(torch.LongTensor([[0, 0], [1, 0]])))
assert_tensor_equal(values, [[[9, 9], [9, 9]], [[8, 7], [9, 9]]])
def test_embed_indices(self, embedder):
indices = GPUVariable(torch.LongTensor([[0, 1], [2, 2], [4, 5]]))
embeds = embedder.embed_indices(indices)
assert_tensor_equal(embeds, [[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]], [[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0]], [[0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]])
def test_embed_tokens(self, embedder):
tokens = ['b', 'c', 'c']
embeds = embedder.embed_tokens(tokens)
assert_tensor_equal(embeds, [[0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]]) |
def LF_implant_indication(c):
mention = c.implant.get_span().lower()
implant_boolean = False
if any(((implant_term in mention) for implant_term in implant_dict)):
implant_boolean = True
keywords = set()
lemma = ' '.join([w.lower() for w in c.complication.get_attrib_tokens('lemmas') if w.strip()])
v = implant_boolean
v &= (lemma in indications)
return ((- 1) if v else 0) |
class SoftBCEWithLogitsLoss(nn.Module):
__constants__ = ['weight', 'pos_weight', 'reduction', 'ignore_index', 'smooth_factor']
def __init__(self, weight=None, ignore_index: Optional[int]=(- 100), reduction='mean', smooth_factor=None, pos_weight=None):
super().__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.smooth_factor = smooth_factor
self.register_buffer('weight', weight)
self.register_buffer('pos_weight', pos_weight)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
if (self.smooth_factor is not None):
soft_targets = (((1 - target) * self.smooth_factor) + (target * (1 - self.smooth_factor))).type_as(input)
else:
soft_targets = target.type_as(input)
loss = F.binary_cross_entropy_with_logits(input, soft_targets, self.weight, pos_weight=self.pos_weight, reduction='none')
if (self.ignore_index is not None):
not_ignored_mask: Tensor = (target != self.ignore_index)
loss *= not_ignored_mask.type_as(loss)
if (self.reduction == 'mean'):
loss = loss.mean()
if (self.reduction == 'sum'):
loss = loss.sum()
return loss |
(frozen=True)
class Table():
title: str
header: List[HeaderCell]
rows: List[List[Cell]]
links: List[Hyperlink] = field(default_factory=list)
name: Optional[str] = None
description: Optional[str] = None |
def check_precomputed_polar(a, side, expected_u, expected_p):
(u, p) = polar(a, side=side)
assert_allclose(u, expected_u, atol=1e-15)
assert_allclose(p, expected_p, atol=1e-15) |
class DeviceSession_V1_1(DeviceSession):
def __init__(self, FNwkSIntKey=None, SNwkSIntKey=None, NwkSEncKey=None, **kwargs):
super().__init__(**kwargs)
self.FNwkSIntKey = FNwkSIntKey
self.SNwkSIntKey = SNwkSIntKey
self.NwkSEncKey = NwkSEncKey |
def poly():
for i in x:
v = x[i]
ret = 0.0
guard = 0.2
if ((v < (- guard)) or (v > guard)):
ret = (4 / ti.max(v, 0.1))
else:
ret = 0
y[i] = ret |
def distance2center(x1, y1, x2, y2, image):
im_cx = int((image.shape[1] / 2))
im_cy = int((image.shape[0] / 2))
cx = ((x2 + x1) / 2).astype(int)
cy = ((y2 + y1) / 2).astype(int)
return math.sqrt((math.pow((im_cx - cx), 2) + math.pow((im_cy - cy), 2))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.