code
stringlengths
101
5.91M
def decay(X, n_bins=4): ids = (np.round((np.linspace(1, len(X), (n_bins + 1)) + 1e-10)) - 1) ids = ids.astype(np.uint8) D_bins = [X[ids[i]:(ids[(i + 1)] + 1)] for i in range(0, 4)] with warnings.catch_warnings(): warnings.simplefilter('ignore', category=RuntimeWarning) D = (np.nanmean(D_bins[0]) - np.mean(D_bins[3])) return D
def read_requirements_file(path): with open(path, 'r') as f: return [_ for _ in f.readlines() if _[:1].isidentifier()]
class AsyncCpuSampler(AsyncParallelSamplerMixin, ParallelSamplerBase): def __init__(self, *args, CollectorCls=DbCpuResetCollector, eval_CollectorCls=CpuEvalCollector, **kwargs): super().__init__(*args, CollectorCls=CollectorCls, eval_CollectorCls=eval_CollectorCls, **kwargs) def initialize(self, affinity): p = psutil.Process() if affinity.get('set_affinity', True): p.cpu_affinity(affinity['master_cpus']) torch.set_num_threads(1) self.agent.async_cpu(share_memory=True) super().initialize(agent=self.agent, affinity=affinity, seed=self.seed, bootstrap_value=None, traj_info_kwargs=None, world_size=1, rank=0) def obtain_samples(self, itr, db_idx): self.agent.recv_shared_memory() return super().obtain_samples(itr, db_idx) def evaluate_agent(self, itr): self.agent.recv_shared_memory() return super().evaluate_agent(itr)
class PreResNet110Drop(): base = PreResNetDrop args = list() kwargs = {'depth': 110} transform_train = transforms.Compose([transforms.Resize(32), transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) transform_test = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
def distillation_loss(y, labels, teacher_scores, T, alpha, reduction_kd='mean', reduction_nll='mean'): if (teacher_scores is not None): d_loss = ((nn.KLDivLoss(reduction=reduction_kd)(F.log_softmax((y / T), dim=1), F.softmax((teacher_scores / T), dim=1)) * T) * T) else: assert (alpha == 0), 'alpha cannot be {} when teacher scores are not provided'.format(alpha) d_loss = 0.0 nll_loss = F.cross_entropy(y, labels, reduction=reduction_nll) tol_loss = ((alpha * d_loss) + ((1.0 - alpha) * nll_loss)) return (tol_loss, d_loss, nll_loss)
class DatasetsHolder(): def read_datasets(inp_folder_path): with os.scandir(inp_folder_path) as entries: return dict([(entry.name, pd.read_csv(entry, index_col=0)) for entry in entries if entry.is_file()])
def test_tuning(vrblvl=0): show_parameters(vrblvl) print('setting the condition level to 2 ...') set_condition_level(2, vrblvl) level = get_condition_level(vrblvl) print('the condition level :', level) autotune_parameters(level, 14, vrblvl) show_parameters(vrblvl) autotune_parameters(0, 14, vrblvl) show_parameters(vrblvl) return 0
class VGG(nn.Module): def __init__(self, vgg_name, Num_classes=100): super(VGG, self).__init__() self.features = self._make_layers(cfg[vgg_name]) self.classifier = nn.Linear(512, Num_classes) def forward(self, x): out = self.features(x) out = out.view(out.size(0), (- 1)) out = self.classifier(out) return out def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if (x == 'M'): layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers)
def levenshtein(reference, hypothesis, progress_bar=False): assert (len(reference) == len(hypothesis)) text = zip(reference, hypothesis) if progress_bar: text = tqdm(text, total=len(reference)) d = [distance(r, h) for (r, h) in text] output = pd.DataFrame({'reference': reference, 'hypothesis': hypothesis}).assign(distance=(lambda df: d)).assign(cer=(lambda df: df.apply((lambda r: ((100 * r['distance']) / max(len(r['reference']), 1))), axis=1))) return output
class Scenario(BaseScenario): def make_world(self): world = World() world.dim_c = 2 num_good_agents = 1 num_adversaries = 3 num_agents = (num_adversaries + num_good_agents) num_landmarks = 2 world.agents = [Agent() for i in range(num_agents)] for (i, agent) in enumerate(world.agents): agent.name = ('agent %d' % i) agent.collide = True agent.silent = True agent.adversary = (True if (i < num_adversaries) else False) agent.size = (0.075 if agent.adversary else 0.05) agent.accel = (3.0 if agent.adversary else 4.0) agent.max_speed = (1.0 if agent.adversary else 1.3) world.landmarks = [Landmark() for i in range(num_landmarks)] for (i, landmark) in enumerate(world.landmarks): landmark.name = ('landmark %d' % i) landmark.collide = True landmark.movable = False landmark.size = 0.2 landmark.boundary = False self.reset_world(world) return world def reset_world(self, world): for (i, agent) in enumerate(world.agents): agent.color = (np.array([0.35, 0.85, 0.35]) if (not agent.adversary) else np.array([0.85, 0.35, 0.35])) for (i, landmark) in enumerate(world.landmarks): landmark.color = np.array([0.25, 0.25, 0.25]) for agent in world.agents: agent.state.p_pos = np.random.uniform((- 1), (+ 1), world.dim_p) agent.state.p_vel = np.zeros(world.dim_p) agent.state.c = np.zeros(world.dim_c) for (i, landmark) in enumerate(world.landmarks): if (not landmark.boundary): landmark.state.p_pos = np.random.uniform((- 0.9), (+ 0.9), world.dim_p) landmark.state.p_vel = np.zeros(world.dim_p) def benchmark_data(self, agent, world): if agent.adversary: collisions = 0 for a in self.good_agents(world): if self.is_collision(a, agent): collisions += 1 return collisions else: return 0 def is_collision(self, agent1, agent2): delta_pos = (agent1.state.p_pos - agent2.state.p_pos) dist = np.sqrt(np.sum(np.square(delta_pos))) dist_min = (agent1.size + agent2.size) return (True if (dist < dist_min) else False) def good_agents(self, world): return [agent for agent in world.agents if (not agent.adversary)] def adversaries(self, world): return [agent for agent in world.agents if agent.adversary] def reward(self, agent, world): main_reward = (self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)) return main_reward def agent_reward(self, agent, world): rew = 0 shape = False adversaries = self.adversaries(world) if shape: for adv in adversaries: rew += (0.1 * np.sqrt(np.sum(np.square((agent.state.p_pos - adv.state.p_pos))))) if agent.collide: for a in adversaries: if self.is_collision(a, agent): rew -= 10 def bound(x): if (x < 0.9): return 0 if (x < 1.0): return ((x - 0.9) * 10) return min(np.exp(((2 * x) - 2)), 10) for p in range(world.dim_p): x = abs(agent.state.p_pos[p]) rew -= bound(x) return rew def adversary_reward(self, agent, world): rew = 0 shape = False agents = self.good_agents(world) adversaries = self.adversaries(world) if shape: for adv in adversaries: rew -= (0.1 * min([np.sqrt(np.sum(np.square((a.state.p_pos - adv.state.p_pos)))) for a in agents])) if agent.collide: for ag in agents: for adv in adversaries: if self.is_collision(ag, adv): rew += 10 return rew def observation(self, agent, world): entity_pos = [] for entity in world.landmarks: if (not entity.boundary): entity_pos.append((entity.state.p_pos - agent.state.p_pos)) comm = [] other_pos = [] other_vel = [] for other in world.agents: if (other is agent): continue comm.append(other.state.c) other_pos.append((other.state.p_pos - agent.state.p_pos)) if (not other.adversary): other_vel.append(other.state.p_vel) return np.concatenate((((([agent.state.p_vel] + [agent.state.p_pos]) + entity_pos) + other_pos) + other_vel))
def set_attr_shape(node, key, value): try: node.attr[key].CopyFrom(attr_value_pb2.AttrValue(shape=tensor_shape.as_shape(value).as_proto())) except KeyError: pass
class InceptionBlock(nn.Module): def __init__(self, in_channels, mid1_channels_list, mid2_channels_list, avg_pool, bias, use_bn): super(InceptionBlock, self).__init__() assert (len(mid1_channels_list) == 2) assert (len(mid2_channels_list) == 4) self.branches = Concurrent() self.branches.add_module('branch1', conv1x1_block(in_channels=in_channels, out_channels=mid2_channels_list[0], bias=bias, use_bn=use_bn)) self.branches.add_module('branch2', Inception3x3Branch(in_channels=in_channels, out_channels=mid2_channels_list[1], mid_channels=mid1_channels_list[0], bias=bias, use_bn=use_bn)) self.branches.add_module('branch3', InceptionDouble3x3Branch(in_channels=in_channels, out_channels=mid2_channels_list[2], mid_channels=mid1_channels_list[1], bias=bias, use_bn=use_bn)) self.branches.add_module('branch4', InceptionPoolBranch(in_channels=in_channels, out_channels=mid2_channels_list[3], avg_pool=avg_pool, bias=bias, use_bn=use_bn)) def forward(self, x): x = self.branches(x) return x
class BottleneckBlock(ResNetBlockBase): def __init__(self, in_channels, out_channels, *, bottleneck_channels, stride=1, num_groups=1, norm='BN', stride_in_1x1=False, dilation=1): super().__init__(in_channels, out_channels, stride) if (in_channels != out_channels): self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels)) else: self.shortcut = None (stride_1x1, stride_3x3) = ((stride, 1) if stride_in_1x1 else (1, stride)) self.conv1 = Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, bottleneck_channels)) self.conv2 = Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=(1 * dilation), bias=False, groups=num_groups, dilation=dilation, norm=get_norm(norm, bottleneck_channels)) self.conv3 = Conv2d(bottleneck_channels, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels)) for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: if (layer is not None): weight_init.c2_msra_fill(layer) def forward(self, x): out = self.conv1(x) out = F.relu_(out) out = self.conv2(out) out = F.relu_(out) out = self.conv3(out) if (self.shortcut is not None): shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = F.relu_(out) return out
def resnet18(in_channels=3, pretrained=False, progress=True, **kwargs): return _resnet(in_channels, 'resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def get_match_index(src_bboxes, dst_bboxes): indices = set() for src_bbox in src_bboxes: for (i, dst_bbox) in enumerate(dst_bboxes): iou = calculate_iou(src_bbox, dst_bbox) if (iou >= 0.5): indices.add(i) return list(indices)
def joint_coherence(): model.eval() with torch.no_grad(): pzs = model.pz(*model.pz_params).sample([1000]) gen_images = model.vaes[0].dec(pzs)[0].squeeze(1) gen_sentences = model.vaes[1].dec(pzs)[0].argmax(dim=(- 1)).squeeze(1) score = calculate_corr(gen_images, fn_to_emb(gen_sentences)) print('joint generation {:10.9f}'.format(score))
class ConvBnAct(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride=1, pad_type='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, norm_kwargs=None): super(ConvBnAct, self).__init__() assert (stride in [1, 2]) norm_kwargs = (norm_kwargs or {}) self.conv = select_conv2d(in_chs, out_chs, kernel_size, stride=stride, padding=pad_type) self.bn1 = norm_layer(out_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn1(x) x = self.act1(x) return x
def walk_to_root(node): result = [] n = node while (n.parent != None): result.append(n) n = n.parent result.append(n) return result
def get_model(): dvec_inp = Input(shape=[emb_dim], name='dvec') input_spec = Input(shape=[T_dim, num_freq], name='input_spec') x = Reshape((T_dim, num_freq, 1))(input_spec) x = ZeroPadding2D(((0, 0), (3, 3)))(x) x = Conv2D(filters=64, kernel_size=[1, 7], dilation_rate=[1, 1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = ZeroPadding2D(((3, 3), (0, 0)))(x) x = Conv2D(filters=64, kernel_size=[7, 1], dilation_rate=[1, 1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = ZeroPadding2D(((2, 2), (2, 2)))(x) x = Conv2D(filters=64, kernel_size=[5, 5], dilation_rate=[1, 1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = ZeroPadding2D(((4, 4), (2, 2)))(x) x = Conv2D(filters=64, kernel_size=[5, 5], dilation_rate=[2, 1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = ZeroPadding2D(((8, 8), (2, 2)))(x) x = Conv2D(filters=64, kernel_size=[5, 5], dilation_rate=[4, 1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = ZeroPadding2D(((16, 16), (2, 2)))(x) x = Conv2D(filters=64, kernel_size=[5, 5], dilation_rate=[8, 1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = ZeroPadding2D(((32, 32), (2, 2)))(x) x = Conv2D(filters=64, kernel_size=[5, 5], dilation_rate=[16, 1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=8, kernel_size=[1, 1], dilation_rate=[1, 1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Reshape((x.shape[1], (x.shape[2] * x.shape[3])))(x) dvec = Lambda((lambda a: tf.expand_dims(a, 1)))(dvec_inp) dvec = Lambda((lambda a: tf.repeat(a, repeats=x.shape[1], axis=1)))(dvec) x = concatenate([x, dvec], (- 1)) x = Bidirectional(LSTM(lstm_dim, return_sequences=True))(x) x = Dense(fc1_dim, activation='relu')(x) mask = Dense(fc2_dim, activation='sigmoid', name='mask')(x) output = Multiply()([input_spec, mask]) model = Model(inputs=[input_spec, dvec_inp], outputs=output) return model
def build_meta4train_lmdb(args): out_dir = os.path.join(args.saving_dir, 'meta') lmdb_path = os.path.join(args.saving_dir, 'lmdb') os.makedirs(out_dir, exist_ok=True) if os.path.exists(lmdb_path): shutil.rmtree(lmdb_path) os.makedirs(lmdb_path, exist_ok=True) trainset_dict_path = os.path.join(out_dir, 'trainset_dict.json') content_font = args.content_font train_font_dir = args.train_font_dir validation_font_dir = args.val_font_dir dict_save_path = os.path.join(out_dir, 'trainset_ori_meta.json') font_path_list = [] font_chosen = [] for font_name in os.listdir(train_font_dir): font_chosen.append(os.path.join(train_font_dir, font_name)) font_chosen += glob.glob((validation_font_dir + '/*')) font_chosen = list(set(font_chosen)) print('num of fonts: ', len(font_chosen)) if (content_font not in font_chosen): font_chosen.append(content_font) out_dict = getMetaDict(font_chosen) with open(dict_save_path, 'w') as fout: json.dump(out_dict, fout, indent=4, ensure_ascii=False) valid_dict = save_lmdb(lmdb_path, out_dict) with open(trainset_dict_path, 'w') as f: json.dump(valid_dict, f, indent=4, ensure_ascii=False)
def throughput(args, model_path, forecaster, train_loader, test_loader, records): try: forecaster.load(model_path) except: forecaster.fit(train_loader, epochs=1) if (args.framework == 'tensorflow'): inference_sample_num = sum([x.shape[0] for (x, _) in test_loader]) else: inference_sample_num = len(test_loader.dataset) if args.quantize: import onnxruntime sess_options = onnxruntime.SessionOptions() if args.cores: sess_options.intra_op_num_threads = args.cores sess_options.inter_op_num_threads = args.cores forecaster.quantize(test_loader, framework=args.quantize_type, sess_options=sess_options, thread_num=(args.cores if args.cores else None)) print('QUANTIZATION DONE') if ('torch' in args.inference_framework): import torch st = time.time() yhat = forecaster.predict(test_loader, quantize=args.quantize) total_time = (time.time() - st) records['torch_infer_throughput'] = (inference_sample_num / total_time) if ('onnx' in args.inference_framework): if (args.cores and (not args.quantize)): forecaster.build_onnx(thread_num=args.cores) st = time.time() yhat = forecaster.predict_with_onnx(test_loader, quantize=args.quantize) total_time = (time.time() - st) records['onnx_infer_throughput'] = (inference_sample_num / total_time) if ('openvino' in args.inference_framework): if (args.cores and (not args.quantize)): forecaster.build_openvino(thread_num=args.cores) st = time.time() yhat = forecaster.predict_with_openvino(test_loader, quantize=args.quantize) total_time = (time.time() - st) records['openvino_infer_throughput'] = (inference_sample_num / total_time) if ('jit' in args.inference_framework): if args.cores: forecaster.build_jit(thread_num=args.cores) st = time.time() yhat = forecaster.predict_with_jit(test_loader, quantize=args.quantize) total_time = (time.time() - st) records['jit_infer_throughput'] = (inference_sample_num / total_time)
def _parse_args(): parser = ArgumentParser() parser.add_argument('--input_folder', type=str, required=True, help='Path to the folder of parquet files.') parser.add_argument('--output_folder', type=str, default='.', help='The path to save the preprocessed data to parquet files. ') args = parser.parse_args() return args
def log_every_n_seconds(lvl, msg, n=1, *, name=None): (caller_module, key) = _find_caller() last_logged = _LOG_TIMER.get(key, None) current_time = time.time() if ((last_logged is None) or ((current_time - last_logged) >= n)): logging.getLogger((name or caller_module)).log(lvl, msg) _LOG_TIMER[key] = current_time
def get_act_layer(name: Union[(Type[nn.Module], str)]='relu'): if (not name): return None if isinstance(name, type): return name if (not (is_no_jit() or is_exportable() or is_scriptable())): if (name in _ACT_LAYER_ME): return _ACT_LAYER_ME[name] if (is_exportable() and (name in ('silu', 'swish'))): return Swish if (not (is_no_jit() or is_exportable())): if (name in _ACT_LAYER_JIT): return _ACT_LAYER_JIT[name] return _ACT_LAYER_DEFAULT[name]
def get_LR_cheating(): np.random.seed(500) random.seed(500) torch.manual_seed(500) i = 100 (acc, f1, prec, rec, _, _, _, _) = run_LR_cheating(i, True) LR = ['Logistic Regression Cheating', '{:.2f}'.format(acc), '{:.2f}'.format(prec), '{:.2f}'.format(rec), '{:.2f}'.format(f1)] return LR
def path2Path(path): assert isinstance(path, (Path, str)), type(path) return (Path(path) if isinstance(path, str) else path)
class LVIS(BaseImageDataset): def __init__(self, root=None, image_loader=jpeg4py_loader_w_failsafe, data_fraction=None, min_area=None, split='train'): root = (env_settings().lvis_dir if (root is None) else root) super().__init__('LVIS', root, image_loader) self.img_pth = os.path.join(root, 'images', f'{split}2017/') self.anno_path = os.path.join(root, 'annotations', f'lvis_v0.5_{split}.json') self.lvis_set = lvis_pk.LVIS(self.anno_path) self.cats = self.lvis_set.cats self.class_list = self.get_class_list() self.image_list = self._get_image_list(min_area=min_area) if (data_fraction is not None): self.image_list = random.sample(self.image_list, int((len(self.image_list) * data_fraction))) self.im_per_class = self._build_im_per_class() def _get_image_list(self, min_area=None): im_list = list(self.lvis_set.anns.keys()) if (min_area is not None): im_list = [s for s in im_list if (self.lvis_set.anns[s]['area'] > min_area)] return im_list def get_num_classes(self): return len(self.class_list) def get_name(self): return 'lvis' def has_class_info(self): return True def get_class_list(self): class_list = [] for cat_id in self.cats.keys(): class_list.append(self.cats[cat_id]['name']) return class_list def has_segmentation_info(self): return True def _build_im_per_class(self): im_per_class = {} for (i, im) in enumerate(self.image_list): class_name = self.cats[self.lvis_set.anns[im]['category_id']]['name'] if (class_name not in im_per_class): im_per_class[class_name] = [i] else: im_per_class[class_name].append(i) return im_per_class def get_images_in_class(self, class_name): return self.im_per_class[class_name] def get_image_info(self, im_id): anno = self._get_anno(im_id) bbox = torch.Tensor(anno['bbox']).view(4) mask = torch.Tensor(self.lvis_set.ann_to_mask(anno)) valid = ((bbox[2] > 0) & (bbox[3] > 0)) visible = valid.clone().byte() return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible} def _get_anno(self, im_id): anno = self.lvis_set.anns[self.image_list[im_id]] return anno def _get_image(self, im_id): path = self.lvis_set.load_imgs([self.lvis_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name'] img = self.image_loader(os.path.join(self.img_pth, path)) return img def get_meta_info(self, im_id): try: cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']] object_meta = OrderedDict({'object_class_name': cat_dict_current['name'], 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) except: object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None}) return object_meta def get_class_name(self, im_id): cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']] return cat_dict_current['name'] def get_image(self, image_id, anno=None): frame = self._get_image(image_id) if (anno is None): anno = self.get_image_info(image_id) object_meta = self.get_meta_info(image_id) return (frame, anno, object_meta)
class ACVLoss(): def __init__(self, loss_type='attn_only'): super().__init__() assert (loss_type in ['attn_only', 'freeze_attn', 'full', 'test']), f'loss_type {loss_type} not supported' self.loss_fn = None if (loss_type == 'attn_only'): self.loss_fn = self.model_loss_train_attn_only elif (loss_type == 'freeze_attn'): self.loss_fn = self.model_loss_train_freeze_attn elif (loss_type == 'full'): self.loss_fn = self.model_loss_train elif (loss_type == 'test'): self.loss_fn = self.model_loss_test else: raise NotImplementedError(f'loss_type {loss_type} not supported') def model_loss_train_attn_only(disp_ests, disp_gt, mask): weights = [1.0] all_losses = [] for (disp_est, weight) in zip(disp_ests, weights): all_losses.append((weight * F.smooth_l1_loss(disp_est[mask], disp_gt[mask], size_average=True))) return sum(all_losses) def model_loss_train_freeze_attn(disp_ests, disp_gt, mask): weights = [0.5, 0.7, 1.0] all_losses = [] for (disp_est, weight) in zip(disp_ests, weights): all_losses.append((weight * F.smooth_l1_loss(disp_est[mask], disp_gt[mask], size_average=True))) return sum(all_losses) def model_loss_train(disp_ests, disp_gt, mask): weights = [0.5, 0.5, 0.7, 1.0] all_losses = [] for (disp_est, weight) in zip(disp_ests, weights): all_losses.append((weight * F.smooth_l1_loss(disp_est[mask], disp_gt[mask], size_average=True))) return sum(all_losses) def model_loss_test(disp_ests, disp_gt, mask): weights = [1.0] all_losses = [] for (disp_est, weight) in zip(disp_ests, weights): all_losses.append((weight * F.l1_loss(disp_est[mask], disp_gt[mask], size_average=True))) return sum(all_losses) def __call__(self, training_output): disp_ests = training_output['disp']['disp_ests'] disp_gt = training_output['disp']['disp_gt'] mask = training_output['disp']['mask'] assert (self.loss_fn is not None), 'loss_fn not initialized' total_loss = self.loss_fn(disp_ests, disp_gt, mask) loss_info = {} loss_info['scalar/train/loss_disp'] = total_loss loss_info['scalar/train/loss_sum'] = total_loss return (total_loss, loss_info)
def get_std_of_list(list_of_values): if (len(list_of_values) > 1): return np.std(list_of_values) return 0
def gen_updates_adagrad(loss, all_parameters, learning_rate=1.0, epsilon=1e-06): all_grads = [theano.grad(loss, param) for param in all_parameters] all_accumulators = [theano.shared((param.get_value() * 0.0)) for param in all_parameters] updates = [] for (param_i, grad_i, acc_i) in zip(all_parameters, all_grads, all_accumulators): acc_i_new = (acc_i + (grad_i ** 2)) updates.append((acc_i, acc_i_new)) updates.append((param_i, (param_i - ((learning_rate * grad_i) / T.sqrt((acc_i_new + epsilon)))))) return updates
def create_indoor_map(height, width, corridor_radius, iterations, room_number, room_width, room_height, no_overlap): tree = [] map = initialize_map(height, width) insert_root_node(map, tree) for i in range(iterations): random_position = sample(map, corridor_radius) nearest_node = find_nearest_node(random_position, tree) insert_new_node(random_position, tree, map) create_path(random_position, nearest_node, corridor_radius, map) create_rooms(map, tree, room_number, room_width, room_height, no_overlap) return map
def prepare_parser(): usage = 'Calculate and store inception metrics.' parser = ArgumentParser(description=usage) parser.add_argument('--dataset', type=str, default='I128_hdf5', help='Which Dataset to train on, out of I128, I256, C10, C100...Append _hdf5 to use the hdf5 version of the dataset. (default: %(default)s)') parser.add_argument('--data_root', type=str, default='data', help='Default location where data is stored (default: %(default)s)') parser.add_argument('--batch_size', type=int, default=64, help='Default overall batchsize (default: %(default)s)') parser.add_argument('--parallel', action='store_true', default=False, help='Train with multiple GPUs (default: %(default)s)') parser.add_argument('--augment', action='store_true', default=False, help='Augment with random crops and flips (default: %(default)s)') parser.add_argument('--num_workers', type=int, default=8, help='Number of dataloader workers (default: %(default)s)') parser.add_argument('--shuffle', action='store_true', default=False, help='Shuffle the data? (default: %(default)s)') parser.add_argument('--seed', type=int, default=0, help='Random seed to use.') return parser
_pipeline_test class ZeroShotClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): classifier = ZeroShotClassificationPipeline(model=model, tokenizer=tokenizer, candidate_labels=['polics', 'health']) return (classifier, ['Who are you voting for in 2020?', 'My stomach hurts.']) def run_pipeline_test(self, classifier, _): outputs = classifier('Who are you voting for in 2020?', candidate_labels='politics') self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str)], 'scores': [ANY(float)]}) outputs = classifier('Who are you voting for in 2020?', ['politics']) self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str)], 'scores': [ANY(float)]}) outputs = classifier('Who are you voting for in 2020?', candidate_labels=['politics']) self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str)], 'scores': [ANY(float)]}) outputs = classifier('Who are you voting for in 2020?', candidate_labels='politics, public health') self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str), ANY(str)], 'scores': [ANY(float), ANY(float)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])), 1.0) outputs = classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health']) self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str), ANY(str)], 'scores': [ANY(float), ANY(float)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])), 1.0) outputs = classifier('Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template='This text is about {}') self.assertEqual(outputs, {'sequence': ANY(str), 'labels': [ANY(str)], 'scores': [ANY(float)]}) outputs = classifier(['I am happy'], ['positive', 'negative']) self.assertEqual(outputs, [{'sequence': ANY(str), 'labels': [ANY(str), ANY(str)], 'scores': [ANY(float), ANY(float)]} for i in range(1)]) outputs = classifier(['I am happy', 'I am sad'], ['positive', 'negative']) self.assertEqual(outputs, [{'sequence': ANY(str), 'labels': [ANY(str), ANY(str)], 'scores': [ANY(float), ANY(float)]} for i in range(2)]) with self.assertRaises(ValueError): classifier('', candidate_labels='politics') with self.assertRaises(TypeError): classifier(None, candidate_labels='politics') with self.assertRaises(ValueError): classifier('Who are you voting for in 2020?', candidate_labels='') with self.assertRaises(TypeError): classifier('Who are you voting for in 2020?', candidate_labels=None) with self.assertRaises(ValueError): classifier('Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template='Not formatting template') with self.assertRaises(AttributeError): classifier('Who are you voting for in 2020?', candidate_labels='politics', hypothesis_template=None) self.run_entailment_id(classifier) def run_entailment_id(self, zero_shot_classifier: Pipeline): config = zero_shot_classifier.model.config original_label2id = config.label2id original_entailment = zero_shot_classifier.entailment_id config.label2id = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2} self.assertEqual(zero_shot_classifier.entailment_id, (- 1)) config.label2id = {'entailment': 0, 'neutral': 1, 'contradiction': 2} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {'ENTAIL': 0, 'NON-ENTAIL': 1} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0} self.assertEqual(zero_shot_classifier.entailment_id, 2) zero_shot_classifier.model.config.label2id = original_label2id self.assertEqual(original_entailment, zero_shot_classifier.entailment_id) _torch def test_truncation(self): zero_shot_classifier = pipeline('zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='pt') zero_shot_classifier(('Who are you voting for in 2020?' * 100), candidate_labels=['politics', 'public health', 'science']) _torch def test_small_model_pt(self): zero_shot_classifier = pipeline('zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='pt') outputs = zero_shot_classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science']) self.assertEqual(nested_simplify(outputs), {'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.333, 0.333, 0.333]}) _tf def test_small_model_tf(self): zero_shot_classifier = pipeline('zero-shot-classification', model='sshleifer/tiny-distilbert-base-cased-distilled-squad', framework='tf') outputs = zero_shot_classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science']) self.assertEqual(nested_simplify(outputs), {'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.333, 0.333, 0.333]}) _torch def test_large_model_pt(self): zero_shot_classifier = pipeline('zero-shot-classification', model='roberta-large-mnli', framework='pt') outputs = zero_shot_classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science']) self.assertEqual(nested_simplify(outputs), {'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.976, 0.015, 0.009]}) outputs = zero_shot_classifier('The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.', candidate_labels=['machine learning', 'statistics', 'translation', 'vision'], multi_label=True) self.assertEqual(nested_simplify(outputs), {'sequence': 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.', 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.817, 0.713, 0.018, 0.018]}) _tf def test_large_model_tf(self): zero_shot_classifier = pipeline('zero-shot-classification', model='roberta-large-mnli', framework='tf') outputs = zero_shot_classifier('Who are you voting for in 2020?', candidate_labels=['politics', 'public health', 'science']) self.assertEqual(nested_simplify(outputs), {'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.976, 0.015, 0.009]}) outputs = zero_shot_classifier('The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.', candidate_labels=['machine learning', 'statistics', 'translation', 'vision'], multi_label=True) self.assertEqual(nested_simplify(outputs), {'sequence': 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.', 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.817, 0.713, 0.018, 0.018]})
class ParameterRange(): def __init__(self, lo, hi): self.lo = lo self.hi = hi def get_lower_bound(self): return self.lo def get_upper_bound(self): return self.hi
def info_to_nt(value, name='info'): if (not isinstance(value, dict)): return value ntc = globals()[name] values = {k: info_to_nt(v, '_'.join([name, k])) for (k, v) in value.items() if (k in ntc._fields)} values.update({k: 0 for k in ntc._fields if (k not in values)}) return ntc(**values)
def apply_grad_processors(grads, gradprocs): g = [] for (grad, var) in grads: if (grad is None): logger.warn('No Gradient w.r.t {}'.format(var.op.name)) else: g.append((grad, var)) for proc in gradprocs: g = proc.process(g) return g
class IterLoader(): def __init__(self, loader, length=None): self.loader = loader self.length = length self.iter = None def __len__(self): if (self.length is not None): return self.length return len(self.loader) def new_epoch(self): self.iter = iter(self.loader) def next(self): try: return next(self.iter) except: self.iter = iter(self.loader) return next(self.iter)
def retrieve_info_for_model(model_type, frameworks: Optional[List[str]]=None): if (model_type not in auto_module.MODEL_NAMES_MAPPING): raise ValueError(f'{model_type} is not a valid model type.') model_name = auto_module.MODEL_NAMES_MAPPING[model_type] config_class = auto_module.configuration_auto.CONFIG_MAPPING_NAMES[model_type] archive_map = auto_module.configuration_auto.CONFIG_ARCHIVE_MAP_MAPPING_NAMES.get(model_type, None) if (model_type in auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES): tokenizer_classes = auto_module.tokenization_auto.TOKENIZER_MAPPING_NAMES[model_type] tokenizer_class = (tokenizer_classes[0] if (tokenizer_classes[0] is not None) else tokenizer_classes[1]) else: tokenizer_class = None image_processor_class = auto_module.image_processing_auto.IMAGE_PROCESSOR_MAPPING_NAMES.get(model_type, None) feature_extractor_class = auto_module.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES.get(model_type, None) processor_class = auto_module.processing_auto.PROCESSOR_MAPPING_NAMES.get(model_type, None) model_files = get_model_files(model_type, frameworks=frameworks) model_camel_cased = config_class.replace('Config', '') available_frameworks = [] for fname in model_files['model_files']: if ('modeling_tf' in str(fname)): available_frameworks.append('tf') elif ('modeling_flax' in str(fname)): available_frameworks.append('flax') elif ('modeling' in str(fname)): available_frameworks.append('pt') if (frameworks is None): frameworks = get_default_frameworks() frameworks = [f for f in frameworks if (f in available_frameworks)] model_classes = retrieve_model_classes(model_type, frameworks=frameworks) if (archive_map is None): model_upper_cased = model_camel_cased.upper() else: parts = archive_map.split('_') idx = 0 while ((idx < len(parts)) and (parts[idx] != 'PRETRAINED')): idx += 1 if (idx < len(parts)): model_upper_cased = '_'.join(parts[:idx]) else: model_upper_cased = model_camel_cased.upper() model_patterns = ModelPatterns(model_name, checkpoint=find_base_model_checkpoint(model_type, model_files=model_files), model_type=model_type, model_camel_cased=model_camel_cased, model_lower_cased=model_files['module_name'], model_upper_cased=model_upper_cased, config_class=config_class, tokenizer_class=tokenizer_class, image_processor_class=image_processor_class, feature_extractor_class=feature_extractor_class, processor_class=processor_class) return {'frameworks': frameworks, 'model_classes': model_classes, 'model_files': model_files, 'model_patterns': model_patterns}
def compute_metrics(task_name, preds, labels): assert (len(preds) == len(labels)) if (task_name == 'cola'): return {'mcc': matthews_corrcoef(labels, preds)} elif (task_name == 'sst-2'): return {'acc': simple_accuracy(preds, labels)} elif (task_name == 'mrpc'): return acc_and_f1(preds, labels) elif (task_name == 'sts-b'): return pearson_and_spearman(preds, labels) elif (task_name == 'qqp'): return acc_and_f1(preds, labels) elif (task_name == 'mnli'): return {'acc': simple_accuracy(preds, labels)} elif (task_name == 'mnli-mm'): return {'acc': simple_accuracy(preds, labels)} elif (task_name == 'qnli'): return {'acc': simple_accuracy(preds, labels)} elif (task_name == 'rte'): return {'acc': simple_accuracy(preds, labels)} elif (task_name == 'wnli'): return {'acc': simple_accuracy(preds, labels)} elif (task_name == 'snli'): return {'acc': simple_accuracy(preds, labels)} elif (task_name == 'race'): return {'acc': simple_accuracy(preds, labels)} elif (task_name == 'dream'): return {'acc': simple_accuracy(preds, labels)} else: return {'acc': simple_accuracy(preds, labels)}
_model def ssl_resnext50_32x4d(pretrained=True, **kwargs): model = ResNet(Bottleneck, [3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) model.default_cfg = default_cfgs['ssl_resnext50_32x4d'] if pretrained: load_pretrained(model, num_classes=kwargs.get('num_classes', 0), in_chans=kwargs.get('in_chans', 3)) return model
def get_or_make(name: str, node_type: str, tree: bpy.types.NodeTree, label_tag: str='(zpy) ', pos: Tuple[float]=None) -> bpy.types.Node: node = tree.nodes.get(name, None) if (node is None): node = tree.nodes.new(node_type) node.name = name node.label = f'{label_tag}{name}' node.bl_description = 'This node has been created and/or modified by zpy' if (pos is not None): node.location = pos return node
class BallQuery(Function): def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor: if (not (open3d.core.cuda.device_count() > 0)): raise NotImplementedError assert new_xyz.is_contiguous() assert xyz.is_contiguous() idx = ball_query(xyz, new_xyz, radius, nsample) return idx def backward(ctx, a=None): return (None, None, None, None)
def eval_10_crop_accuracy(opt, model): print('10-crop test epoch ------->') valdir = opt.localization_val_path random_crop_nonreproducible_transform = get_nonreproducible_rand_transform(opt) print('Creating data loader for test set...') multi_crop_val_dataset = ImageFolder(root=valdir, opt=opt, transform=random_crop_nonreproducible_transform, split='test') multi_crop_evaluator = MultiCropEvaluator(opt, model, multi_crop_val_dataset) return multi_crop_evaluator.run_batched_eval_epoch()
_data_params('usps2mnist') class Usps2MnistParams(DatasetParams): num_channels = 3 image_size = 16 mean = 0.5 std = 0.5 num_cls = 10 target_transform = None
class _SetEvalIterationsHook(session_run_hook.SessionRunHook): def __init__(self, num_steps): self._num_steps = num_steps def begin(self): self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): self._iterations_per_loop_var.load(self._num_steps, session=session)
def _create_dummy_ann_file(ann_file): data = {'text': ',', 'label': {'address': {'': [[15, 16]]}, 'name': {'': [[0, 2]]}}} with open(ann_file, 'w') as fw: fw.write((json.dumps(data, ensure_ascii=False) + '\n'))
def validate_megengine_model(platform, model_file, input_file, mace_out_file, input_names, input_shapes, input_data_formats, output_names, output_shapes, output_data_formats, validation_threshold, input_data_types, log_file): import megengine._internal as mgb if (not os.path.isfile(model_file)): common.MaceLogger.error(VALIDATION_MODULE, (("Input graph file '" + model_file) + "' does not exist!")) feed_inputs = [] for i in range(len(input_names)): input_value = load_data(util.formatted_file_name(input_file, input_names[i]), input_data_types[i]) input_value = input_value.reshape(input_shapes[i]) if ((input_data_formats[i] == DataFormat.NHWC) and (len(input_shapes[i]) == 4)): input_value = input_value.transpose((0, 3, 1, 2)) feed_inputs.append(input_value) (cg, _, outputs) = mgb.load_comp_graph_from_file(model_file) inputs = mgb.cgtools.get_dep_vars(outputs, 'Host2DeviceCopy') inputs = sorted(inputs, key=(lambda i: i.name)) outputs = list(map(mgb.copy_output, outputs)) if (len(outputs) == 1): (outputs,) = outputs func = cg.compile(inputs, outputs) mge_output_value = func(*feed_inputs) for i in range(len(output_names)): output_file_name = util.formatted_file_name(mace_out_file, output_names[i]) mace_out_value = load_data(output_file_name) (mace_out_value, real_output_shape, real_output_data_format) = get_real_out_value_shape_df(platform, mace_out_value, output_shapes[i], output_data_formats[i]) compare_output(output_names[i], mace_out_value, mge_output_value, validation_threshold, log_file, real_output_shape, real_output_data_format)
class TrainLoop(object): def __init__(self, generator, disc_list, optimizer, train_loader, alpha=0.8, nadir_slack=1.1, train_mode='vanilla', checkpoint_path=None, checkpoint_epoch=None, cuda=True): if (checkpoint_path is None): self.checkpoint_path = os.getcwd() else: self.checkpoint_path = checkpoint_path if (not os.path.isdir(self.checkpoint_path)): os.mkdir(self.checkpoint_path) self.save_epoch_fmt_gen = os.path.join(self.checkpoint_path, (((('G_' + train_mode) + '_') + str(len(disc_list))) + '_{}ep.pt')) self.save_epoch_fmt_disc = os.path.join(self.checkpoint_path, (('D_{}_' + train_mode) + '_.pt')) self.cuda_mode = cuda self.model = generator self.disc_list = disc_list self.optimizer = optimizer self.train_loader = train_loader self.history = {'gen_loss': [], 'gen_loss_minibatch': [], 'disc_loss': [], 'disc_loss_minibatch': []} self.total_iters = 0 self.cur_epoch = 0 self.alpha = alpha self.nadir_slack = nadir_slack self.train_mode = train_mode self.constraints = make_constraints(len(disc_list)) self.proba = np.random.rand(len(disc_list)) self.proba /= np.sum(self.proba) self.Q = np.zeros(len(self.disc_list)) if (checkpoint_epoch is not None): self.load_checkpoint(checkpoint_epoch) def train(self, n_epochs=1, save_every=1): while (self.cur_epoch < n_epochs): print('Epoch {}/{}'.format((self.cur_epoch + 1), n_epochs)) train_iter = tqdm(enumerate(self.train_loader)) gen_loss = 0.0 disc_loss = 0.0 for (t, batch) in train_iter: (new_gen_loss, new_disc_loss) = self.train_step(batch) gen_loss += new_gen_loss disc_loss += new_disc_loss self.total_iters += 1 self.history['gen_loss_minibatch'].append(new_gen_loss) self.history['disc_loss_minibatch'].append(new_disc_loss) self.history['gen_loss'].append((gen_loss / (t + 1))) self.history['disc_loss'].append((disc_loss / (t + 1))) self.cur_epoch += 1 if ((self.cur_epoch % save_every) == 0): self.checkpointing() print('Saving final model...') self.checkpointing() def train_step(self, batch): (x, _) = batch z_ = torch.randn(x.size(0), 128).view((- 1), 128, 1, 1) y_real_ = torch.ones(x.size(0)) y_fake_ = torch.zeros(x.size(0)) if self.cuda_mode: x = x.cuda() z_ = z_.cuda() y_real_ = y_real_.cuda() y_fake_ = y_fake_.cuda() out_d = self.model.forward(z_).detach() loss_d = 0 for disc in self.disc_list: d_real = disc.forward(x).squeeze() d_fake = disc.forward(out_d).squeeze() loss_disc = (F.binary_cross_entropy(d_real, y_real_) + F.binary_cross_entropy(d_fake, y_fake_)) disc.optimizer.zero_grad() loss_disc.backward() disc.optimizer.step() loss_d += loss_disc.item() loss_d /= len(self.disc_list) self.model.train() z_ = torch.randn(x.size(0), 128).view((- 1), 128, 1, 1) if self.cuda_mode: z_ = z_.cuda() out = self.model.forward(z_) loss_G = 0 if (self.train_mode == 'hyper'): losses_list_float = [] losses_list_var = [] prob_list = [] for disc in self.disc_list: losses_list_var.append(F.binary_cross_entropy(disc.forward(out).squeeze(), y_real_)) losses_list_float.append(losses_list_var[(- 1)].item()) self.update_nadir_point(losses_list_float) coefs_sum = 0.0 for (i, loss) in enumerate(losses_list_var): loss_G -= torch.log((self.nadir - loss)) prob_list.append((1 / (self.nadir - losses_list_float[i]))) coefs_sum += prob_list[(- 1)] self.proba = (np.asarray(prob_list) / coefs_sum) elif (self.train_mode == 'gman'): losses_list_float = [] losses_list_var = [] for disc in self.disc_list: losses_list_var.append(F.binary_cross_entropy(disc.forward(out).squeeze(), y_real_)) losses_list_float.append(losses_list_var[(- 1)].item()) losses = torch.FloatTensor(losses_list_float) self.proba = torch.nn.functional.softmax((self.alpha * losses), dim=0).detach().cpu().numpy() acm = 0.0 for loss_weight in zip(losses_list_var, self.proba): loss_G += (loss_weight[0] * float(loss_weight[1])) loss_G elif (self.train_mode == 'gman_grad'): grads_list = [] losses_list = [] for disc in self.disc_list: loss = F.binary_cross_entropy(disc.forward(self.model.forward(z_)).squeeze(), y_real_) grads_list.append(self.get_gen_grads_norm(loss)) grads = torch.FloatTensor(grads_list) self.proba = torch.nn.functional.softmax((self.alpha * grads), dim=0).detach().cpu().numpy() self.model.zero_grad() out = self.model.forward(z_) for disc in self.disc_list: losses_list.append(F.binary_cross_entropy(disc.forward(out).squeeze(), y_real_)) for loss_weight in zip(losses_list, self.proba): loss_G += (loss_weight[0] * float(loss_weight[1])) elif (self.train_mode == 'mgd'): grads_list = [] losses_list = [] for disc in self.disc_list: loss = F.binary_cross_entropy(disc.forward(self.model.forward(z_)).squeeze(), y_real_) grads_list.append(self.get_gen_grads(loss).cpu().detach().numpy()) grads_list = np.asarray(grads_list).T result = minimize(steep_direct_cost, self.proba, args=grads_list, jac=steep_direc_cost_deriv, constraints=self.constraints, method='SLSQP', options={'disp': False}) self.proba = result.x self.model.zero_grad() out = self.model.forward(z_) for disc in self.disc_list: losses_list.append(F.binary_cross_entropy(disc.forward(out).squeeze(), y_real_)) for loss_weight in zip(losses_list, self.proba): loss_G += (loss_weight[0] * float(loss_weight[1])) elif (self.train_mode == 'loss_delta'): z_probs = torch.randn(x.size(0), 100).view((- 1), 100, 1, 1) if self.cuda_mode: z_probs = z_probs.cuda() out_probs = self.model.forward(z_probs) outs_before = [] losses_list = [] for (i, disc) in enumerate(self.disc_list): disc_out = disc.forward(out_probs).squeeze() losses_list.append((float(self.proba[i]) * F.binary_cross_entropy(disc_out, y_real_))) outs_before.append(disc_out.detach().mean()) for loss_ in losses_list: loss_G += loss_ elif (self.train_mode == 'vanilla'): for disc in self.disc_list: loss_G += F.binary_cross_entropy(disc.forward(out).squeeze(), y_real_) self.proba = ((np.ones(len(self.disc_list)) * 1) / len(self.disc_list)) self.optimizer.zero_grad() loss_G.backward() self.optimizer.step() if (self.train_mode == 'loss_delta'): out_probs = self.model.forward(z_probs) outs_after = [] for (i, disc) in enumerate(self.disc_list): disc_out = disc.forward(out_probs).squeeze() outs_after.append(disc_out.mean()) self.update_prob(outs_before, outs_after) return (loss_G.item(), loss_d) def checkpointing(self): print('Checkpointing...') ckpt = {'model_state': self.model.state_dict(), 'optimizer_state': self.optimizer.state_dict(), 'history': self.history, 'total_iters': self.total_iters, 'proba': self.proba, 'Q': self.Q, 'cur_epoch': self.cur_epoch} torch.save(ckpt, self.save_epoch_fmt_gen.format(self.cur_epoch)) for (i, disc) in enumerate(self.disc_list): ckpt = {'model_state': disc.state_dict(), 'optimizer_state': disc.optimizer.state_dict()} torch.save(ckpt, self.save_epoch_fmt_disc.format((i + 1))) def load_checkpoint(self, epoch): ckpt = self.save_epoch_fmt_gen.format(epoch) if os.path.isfile(ckpt): ckpt = torch.load(ckpt) self.model.load_state_dict(ckpt['model_state']) self.optimizer.load_state_dict(ckpt['optimizer_state']) self.history = ckpt['history'] self.total_iters = ckpt['total_iters'] self.cur_epoch = ckpt['cur_epoch'] self.proba = ckpt['proba'] self.Q = ckpt['Q'] for (i, disc) in enumerate(self.disc_list): ckpt = torch.load(self.save_epoch_fmt_disc.format((i + 1))) disc.load_state_dict(ckpt['model_state']) disc.optimizer.load_state_dict(ckpt['optimizer_state']) else: print('No checkpoint found at: {}'.format(ckpt)) def print_grad_norms(self): norm = 0.0 for params in list(self.model.parameters()): norm += params.grad.norm(2).item() print('Sum of grads norms: {}'.format(norm)) def check_nans(self): for params in list(self.model.parameters()): if np.any(np.isnan(params.detach().cpu().numpy())): print('params NANs!!!!!') if np.any(np.isnan(params.grad.detach().cpu().numpy())): print('grads NANs!!!!!!') def define_nadir_point(self): disc_outs = [] z_ = torch.randn(20, 100).view((- 1), 100, 1, 1) y_real_ = torch.ones(z_.size(0)) if self.cuda_mode: z_ = z_.cuda() y_real_ = y_real_.cuda() out = self.model.forward(z_) for disc in self.disc_list: d_out = disc.forward(out).squeeze() disc_outs.append(F.binary_cross_entropy(d_out, y_real_).item()) self.nadir = float((np.max(disc_outs) + self.nadir_slack)) def update_nadir_point(self, losses_list): self.nadir = float(((np.max(losses_list) * self.nadir_slack) + 1e-08)) def update_prob(self, before, after): reward = [(el[1] - el[0]) for el in zip(before, after)] for i in range(len(self.Q)): self.Q[i] = ((self.alpha * reward[i]) + ((1 - self.alpha) * self.Q[i])) self.proba = torch.nn.functional.softmax((15 * torch.FloatTensor(self.Q)), dim=0).detach().cpu().numpy() def get_gen_grads(self, loss_): grads = torch.autograd.grad(outputs=loss_, inputs=self.model.parameters()) self.model.zero_grad() for params_grads in grads: try: grads_ = torch.cat([grads_, params_grads.view((- 1))], 0) except: grads_ = params_grads.view((- 1)) return (grads_ / grads_.norm()) def get_gen_grads_norm(self, loss_): norm = 0.0 self.model.zero_grad() grads = torch.autograd.grad(outputs=loss_, inputs=self.model.parameters()) for params_grads in grads: norm += (params_grads.norm(2).item() ** 2) return np.sqrt(norm)
def train(args): processor = data_utils.AscProcessor() label_list = processor.get_labels() tokenizer = ABSATokenizer.from_pretrained('bert-base-multilingual-cased') train_examples = processor.get_train_examples(args.data_dir, 'train_rels.json', method=args.method) num_train_steps = (int((len(train_examples) / args.train_batch_size)) * args.num_train_epochs) train_features = data_utils.convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, 'asc') logger.info('***** Running training *****') logger.info(' Num examples = %d', len(train_examples)) logger.info(' Batch size = %d', args.train_batch_size) logger.info(' Num steps = %d', num_train_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_label_ids) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) if args.do_valid: valid_examples = processor.get_dev_examples(args.data_dir, 'dev_rels.json', method=args.method) valid_features = data_utils.convert_examples_to_features(valid_examples, label_list, args.max_seq_length, tokenizer, 'asc') valid_all_input_ids = torch.tensor([f.input_ids for f in valid_features], dtype=torch.long) valid_all_segment_ids = torch.tensor([f.segment_ids for f in valid_features], dtype=torch.long) valid_all_input_mask = torch.tensor([f.input_mask for f in valid_features], dtype=torch.long) valid_all_label_ids = torch.tensor([f.label_id for f in valid_features], dtype=torch.long) valid_data = TensorDataset(valid_all_input_ids, valid_all_segment_ids, valid_all_input_mask, valid_all_label_ids) logger.info('***** Running validations *****') logger.info(' Num orig examples = %d', len(valid_examples)) logger.info(' Num split examples = %d', len(valid_features)) logger.info(' Batch size = %d', args.train_batch_size) valid_sampler = SequentialSampler(valid_data) valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=args.train_batch_size) best_valid_loss = float('inf') valid_losses = [] model = TargetPooledClassification('bert-base-multilingual-cased', hidden_dropout_prob=0.3, num_labels=len(label_list), pool_target=args.pooling) model.cuda() param_optimizer = [(k, v) for (k, v) in model.named_parameters() if (v.requires_grad == True)] param_optimizer = [n for n in param_optimizer if ('pooler' not in n[0])] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] t_total = num_train_steps optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) global_step = 0 model.train() for _ in range(args.num_train_epochs): for (step, batch) in enumerate(train_dataloader): batch = tuple((t.cuda() for t in batch)) (input_ids, segment_ids, input_mask, label_ids, target_ids) = batch loss = model(input_ids=input_ids, attention_mask=input_mask, labels=label_ids, target_indices=target_ids) loss.backward() lr_this_step = (args.learning_rate * warmup_linear((global_step / t_total), args.warmup_proportion)) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if args.do_valid: model.eval() with torch.no_grad(): losses = [] preds = [] golds = [] ids = [str(e.guid) for e in valid_examples] targets = [str(e.text_a) for e in valid_examples] texts = [str(e.text_b) for e in valid_examples] valid_size = 0 for (step, batch) in enumerate(valid_dataloader): batch = tuple((t.cuda() for t in batch)) (input_ids, segment_ids, input_mask, label_ids, target_ids) = batch loss = model(input_ids=input_ids, attention_mask=input_mask, labels=label_ids, target_indices=target_ids) losses.append((loss.data.item() * input_ids.size(0))) with torch.no_grad(): logits = model(input_ids=input_ids, attention_mask=input_mask, target_indices=target_ids) logits = logits.detach().cpu().numpy() pred = logits.argmax(1) label_ids = label_ids.cpu().numpy() preds.extend(pred.tolist()) golds.extend(label_ids.tolist()) valid_size += input_ids.size(0) valid_loss = (sum(losses) / valid_size) valid_acc = accuracy_score(golds, preds) valid_f1 = f1_score(golds, preds, average='macro') logger.info('validation loss: %f', valid_loss) logger.info('validation acc: %f', valid_acc) logger.info('validation macro F1: %f', valid_f1) valid_losses.append(valid_loss) if (valid_loss < best_valid_loss): torch.save(model, os.path.join(args.output_dir, 'model.pt')) best_valid_loss = valid_loss model.train() if args.do_valid: with open(os.path.join(args.output_dir, 'dev_preds.json'), 'w') as fw: json.dump({'predictions': preds, 'golds': golds, 'acc': valid_acc, 'f1': valid_f1, 'ids': ids, 'targets': targets, 'texts': texts}, fw) else: torch.save(model, os.path.join(args.output_dir, 'model.pt'))
def test_digits_cosine_lazy(): model = GraphCutSelection(100, 'cosine', optimizer='lazy') model.fit(X_digits) assert_array_equal(model.ranking, digits_cosine_ranking) assert_array_almost_equal(model.gains, digits_cosine_gains, 4) assert_array_almost_equal(model.subset, X_digits[model.ranking])
def process_folder(q, data_dir, output_dir, stride=1): while True: if q.empty(): break folder = q.get() image_path = os.path.join(data_dir, folder) dump_image_path = os.path.join(output_dir, folder) if (not os.path.isdir(dump_image_path)): os.makedirs(dump_image_path) f = open(os.path.join(dump_image_path, 'train.txt'), 'w') numbers = len(os.listdir(image_path)) names = list(os.listdir(image_path)) names.sort() if (numbers < 3): print('this folder do not have enough image, numbers < 3!') for n in range((numbers - (2 * stride))): s_idx = n m_idx = (s_idx + stride) e_idx = (s_idx + (2 * stride)) curr_image = cv2.imread(os.path.join(image_path, names[s_idx])) middle_image = cv2.imread(os.path.join(image_path, names[m_idx])) next_image = cv2.imread(os.path.join(image_path, names[e_idx])) if (curr_image is None): print((os.path.join(image_path, ('%.5d' % s_idx)) + '.png')) continue if (middle_image is None): print((os.path.join(image_path, ('%.5d' % m_idx)) + '.png')) continue if (next_image is None): print((os.path.join(image_path, ('%.5d' % e_idx)) + '.png')) continue seq_images = np.concatenate([curr_image, middle_image, next_image], axis=0) cv2.imwrite((os.path.join(dump_image_path, ('%.10d' % s_idx)) + '.png'), seq_images.astype('uint8')) f.write(('%s\n' % (os.path.join(folder, ('%.10d' % s_idx)) + '.png'))) print(folder)
def get_down_block(down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, resnet_act_fn, transformer_layers_per_block=1, num_attention_heads=None, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift='default'): down_block_type = (down_block_type[7:] if down_block_type.startswith('UNetRes') else down_block_type) if (down_block_type == 'DownBlock2D'): return DownBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift) elif (down_block_type == 'CrossAttnDownBlock2D'): if (cross_attention_dim is None): raise ValueError('cross_attention_dim must be specified for CrossAttnDownBlock2D') return CrossAttnDownBlock2D(num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift) raise ValueError(f'{down_block_type} does not exist.')
class TestTokenBlockDataset(unittest.TestCase): def _build_dataset(self, data, **kwargs): sizes = [len(x) for x in data] underlying_ds = test_utils.TestDataset(data) return TokenBlockMixtureDataset(underlying_ds, sizes, **kwargs) def test_complete_break_mode(self): data = [torch.tensor([3, 4, 1], dtype=torch.long), torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), torch.tensor([4, 1], dtype=torch.long), torch.tensor([3, 4, 6, 7, 8, 1], dtype=torch.long)] ds = self._build_dataset(data, block_sizes=[4, 8, 16], pad=0, eos=1) print(len(ds)) for i in range(len(ds)): print(i, ds[i]) self.assertEqual(ds[0].tolist(), [3, 4, 1]) self.assertEqual(ds[1].tolist(), [5, 4, 3, 2, 1]) print(ds.number_of_inst_in_block) with data_utils.numpy_seed(1): shuffle = np.random.permutation(len(ds)) print(ds.sizes) print(shuffle) sds = SortDataset(ds, sort_order=[shuffle, ds.sizes]) print(sds.ordered_indices())
class DenoisingDataset(FairseqDataset): def __init__(self, dataset, sizes, vocab, mask_idx, mask_whole_words, shuffle, seed, args, eos=None, item_transform_func=None): self.dataset = dataset self.sizes = sizes self.vocab = vocab self.shuffle = shuffle self.seed = seed self.mask_idx = mask_idx self.mask_whole_word = mask_whole_words self.mask_ratio = args.mask self.random_ratio = args.mask_random self.insert_ratio = args.insert self.rotate_ratio = args.rotate self.permute_sentence_ratio = args.permute_sentences self.eos = (eos if (eos is not None) else vocab.eos()) self.item_transform_func = item_transform_func if (args.bpe != 'gpt2'): self.full_stop_index = self.vocab.eos() else: assert (args.bpe == 'gpt2') self.full_stop_index = self.vocab.index('13') self.replace_length = args.replace_length if (self.replace_length not in [(- 1), 0, 1]): raise ValueError(f'invalid arg: replace_length={self.replace_length}') if (args.mask_length not in ['subword', 'word', 'span-poisson']): raise ValueError(f'invalid arg: mask-length={args.mask_length}') if ((args.mask_length == 'subword') and (args.replace_length not in [0, 1])): raise ValueError(f'if using subwords, use replace-length=1 or 0') self.mask_span_distribution = None if (args.mask_length == 'span-poisson'): _lambda = args.poisson_lambda lambda_to_the_k = 1 e_to_the_minus_lambda = math.exp((- _lambda)) k_factorial = 1 ps = [] for k in range(0, 128): ps.append(((e_to_the_minus_lambda * lambda_to_the_k) / k_factorial)) lambda_to_the_k *= _lambda k_factorial *= (k + 1) if (ps[(- 1)] < 1e-07): break ps = torch.FloatTensor(ps) self.mask_span_distribution = torch.distributions.Categorical(ps) self.epoch = 0 def set_epoch(self, epoch, **unused): self.epoch = epoch def __getitem__(self, index): with data_utils.numpy_seed(self.seed, self.epoch, index): tokens = self.dataset[index] assert (tokens[(- 1)] == self.eos) (source, target) = (tokens, tokens.clone()) if (self.permute_sentence_ratio > 0.0): source = self.permute_sentences(source, self.permute_sentence_ratio) if (self.mask_ratio > 0): source = self.add_whole_word_mask(source, self.mask_ratio) if (self.insert_ratio > 0): source = self.add_insertion_noise(source, self.insert_ratio) if ((self.rotate_ratio > 0.0) and (np.random.random() < self.rotate_ratio)): source = self.add_rolling_noise(source) if (self.item_transform_func is not None): (source, target) = self.item_transform_func(source, target) assert (source >= 0).all() assert (source[1:(- 1)] >= 1).all() assert (source <= len(self.vocab)).all() assert (source[0] == self.vocab.bos()) assert (source[(- 1)] == self.eos) return {'id': index, 'source': source, 'target': target} def __len__(self): return len(self.dataset) def permute_sentences(self, source, p=1.0): full_stops = (source == self.full_stop_index) full_stops[(- 2)] = 1 sentence_ends = ((full_stops[1:] * (~ full_stops[:(- 1)])).nonzero() + 2) result = source.clone() num_sentences = sentence_ends.size(0) num_to_permute = math.ceil((((num_sentences * 2) * p) / 2.0)) substitutions = torch.randperm(num_sentences)[:num_to_permute] ordering = torch.arange(0, num_sentences) ordering[substitutions] = substitutions[torch.randperm(num_to_permute)] index = 1 for i in ordering: sentence = source[(sentence_ends[(i - 1)] if (i > 0) else 1):sentence_ends[i]] result[index:(index + sentence.size(0))] = sentence index += sentence.size(0) return result def word_starts(self, source): if (self.mask_whole_word is not None): is_word_start = self.mask_whole_word.gather(0, source) else: is_word_start = torch.ones(source.size()) is_word_start[0] = 0 is_word_start[(- 1)] = 0 return is_word_start def add_whole_word_mask(self, source, p): is_word_start = self.word_starts(source) num_to_mask = int(math.ceil((is_word_start.float().sum() * p))) num_inserts = 0 if (num_to_mask == 0): return source if (self.mask_span_distribution is not None): lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,)) cum_length = torch.cumsum(lengths, 0) while (cum_length[(- 1)] < num_to_mask): lengths = torch.cat([lengths, self.mask_span_distribution.sample(sample_shape=(num_to_mask,))], dim=0) cum_length = torch.cumsum(lengths, 0) i = 0 while (cum_length[i] < num_to_mask): i += 1 lengths[i] = (num_to_mask - (0 if (i == 0) else cum_length[(i - 1)])) num_to_mask = (i + 1) lengths = lengths[:num_to_mask] lengths = lengths[(lengths > 0)] num_inserts = (num_to_mask - lengths.size(0)) num_to_mask -= num_inserts if (num_to_mask == 0): return self.add_insertion_noise(source, (num_inserts / source.size(0))) assert (lengths > 0).all() else: lengths = torch.ones((num_to_mask,)).long() assert (is_word_start[(- 1)] == 0) word_starts = is_word_start.nonzero() indices = word_starts[torch.randperm(word_starts.size(0))[:num_to_mask]].squeeze(1) mask_random = (torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio) source_length = source.size(0) assert ((source_length - 1) not in indices) to_keep = torch.ones(source_length, dtype=torch.bool) is_word_start[(- 1)] = 255 if (self.replace_length == 0): to_keep[indices] = 0 else: source[indices] = self.mask_idx source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),)) if (self.mask_span_distribution is not None): assert (len(lengths.size()) == 1) assert (lengths.size() == indices.size()) lengths -= 1 while (indices.size(0) > 0): assert (lengths.size() == indices.size()) lengths -= is_word_start[(indices + 1)].long() uncompleted = (lengths >= 0) indices = (indices[uncompleted] + 1) mask_random = mask_random[uncompleted] lengths = lengths[uncompleted] if (self.replace_length != (- 1)): to_keep[indices] = 0 else: source[indices] = self.mask_idx source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),)) else: while (indices.size(0) > 0): uncompleted = (is_word_start[(indices + 1)] == 0) indices = (indices[uncompleted] + 1) mask_random = mask_random[uncompleted] if (self.replace_length != (- 1)): to_keep[indices] = 0 else: source[indices] = self.mask_idx source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),)) assert ((source_length - 1) not in indices) source = source[to_keep] if (num_inserts > 0): source = self.add_insertion_noise(source, (num_inserts / source.size(0))) return source def add_permuted_noise(self, tokens, p): num_words = len(tokens) num_to_permute = math.ceil((((num_words * 2) * p) / 2.0)) substitutions = (torch.randperm((num_words - 2))[:num_to_permute] + 1) tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]] return tokens def add_rolling_noise(self, tokens): offset = np.random.randint(1, (max(1, (tokens.size((- 1)) - 1)) + 1)) tokens = torch.cat((tokens[0:1], tokens[offset:(- 1)], tokens[1:offset], tokens[(- 1):]), dim=0) return tokens def add_insertion_noise(self, tokens, p): if (p == 0.0): return tokens num_tokens = len(tokens) n = int(math.ceil((num_tokens * p))) noise_indices = (torch.randperm(((num_tokens + n) - 2))[:n] + 1) noise_mask = torch.zeros(size=((num_tokens + n),), dtype=torch.bool) noise_mask[noise_indices] = 1 result = torch.LongTensor((n + len(tokens))).fill_((- 1)) num_random = int(math.ceil((n * self.random_ratio))) result[noise_indices[num_random:]] = self.mask_idx result[noise_indices[:num_random]] = torch.randint(low=1, high=len(self.vocab), size=(num_random,)) result[(~ noise_mask)] = tokens assert (result >= 0).all() return result def collater(self, samples, pad_to_length=None): return collate(samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length) def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] def ordered_indices(self): if self.shuffle: indices = np.random.permutation(len(self)) else: indices = np.arange(len(self)) return indices[np.argsort(self.sizes[indices], kind='mergesort')] def prefetch(self, indices): self.src.prefetch(indices) self.tgt.prefetch(indices) def supports_prefetch(self): return (hasattr(self.src, 'supports_prefetch') and self.src.supports_prefetch and hasattr(self.tgt, 'supports_prefetch') and self.tgt.supports_prefetch)
class SetupCallback(Callback): def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config): super().__init__() self.resume = resume self.now = now self.logdir = logdir self.ckptdir = ckptdir self.cfgdir = cfgdir self.config = config self.lightning_config = lightning_config def on_keyboard_interrupt(self, trainer, pl_module): if (trainer.global_rank == 0): print('Summoning checkpoint.') ckpt_path = os.path.join(self.ckptdir, 'last.ckpt') trainer.save_checkpoint(ckpt_path) def on_pretrain_routine_start(self, trainer, pl_module): if (trainer.global_rank == 0): if (not self.resume): os.makedirs(self.logdir, exist_ok=True) os.makedirs(self.ckptdir, exist_ok=True) os.makedirs(self.cfgdir, exist_ok=True) print('Project config') print(OmegaConf.to_yaml(self.config)) OmegaConf.save(self.config, os.path.join(self.cfgdir, '{}-project.yaml'.format(self.now))) print('Lightning config') print(OmegaConf.to_yaml(self.lightning_config)) OmegaConf.save(OmegaConf.create({'lightning': self.lightning_config}), os.path.join(self.cfgdir, '{}-lightning.yaml'.format(self.now))) elif ((not self.resume) and os.path.exists(self.logdir)): (dst, name) = os.path.split(self.logdir) dst = os.path.join(dst, 'child_runs', name) os.makedirs(os.path.split(dst)[0], exist_ok=True) try: os.rename(self.logdir, dst) except FileNotFoundError: pass
class TestCheckpointUtils(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def _train_transformer(self, seed, extra_args=None): if (extra_args is None): extra_args = [] with tempfile.TemporaryDirectory(f'_train_transformer_seed{seed}') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', (['--encoder-layers', '3', '--decoder-layers', '3', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--seed', str(seed)] + extra_args)) (yield os.path.join(data_dir, 'checkpoint_last.pt')) def test_load_model_ensemble_and_task(self): with self._train_transformer(seed=123) as model1: with self._train_transformer(seed=456) as model2: (ensemble, cfg, task) = checkpoint_utils.load_model_ensemble_and_task(filenames=[model1, model2]) self.assertEqual(len(ensemble), 2) self.assertEqual(ensemble[0].args.seed, 123) self.assertEqual(ensemble[1].args.seed, 456) self.assertTrue(('seed123' in task.cfg.data)) self.assertEqual(cfg.common.seed, 456) def test_prune_state_dict(self): with contextlib.redirect_stdout(StringIO()): extra_args = ['--encoder-layerdrop', '0.01', '--decoder-layerdrop', '0.01'] with self._train_transformer(seed=1, extra_args=extra_args) as model: (ensemble, cfg, task) = checkpoint_utils.load_model_ensemble_and_task(filenames=[model], arg_overrides={'encoder_layers_to_keep': '0,2', 'decoder_layers_to_keep': '1'}) self.assertEqual(len(ensemble), 1) self.assertEqual(len(ensemble[0].encoder.layers), 2) self.assertEqual(len(ensemble[0].decoder.layers), 1) def test_torch_persistent_save_async(self): state_dict = {} filename = 'async_checkpoint.pt' with patch(f'{checkpoint_utils.__name__}.PathManager.opena') as mock_opena: with patch(f'{checkpoint_utils.__name__}._torch_persistent_save') as mock_save: checkpoint_utils.torch_persistent_save(state_dict, filename, async_write=True) mock_opena.assert_called_with(filename, 'wb') mock_save.assert_called()
class XconfigTrivialOutputLayer(XconfigLayerBase): def __init__(self, first_token, key_to_value, prev_names=None): assert (first_token == 'output') XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names) def set_default_configs(self): self.config = {'input': '[-1]', 'dim': (- 1), 'objective-type': 'linear', 'output-delay': 0} def check_configs(self): if ((self.config['objective-type'] != 'linear') and (self.config['objective-type'] != 'quadratic')): raise RuntimeError('In output, objective-type has invalid value {0}'.format(self.config['objective-type'])) def output_name(self, auxiliary_outputs=None): assert (auxiliary_outputs is None) return self.name def output_dim(self, auxiliary_outputs=None): assert (auxiliary_outputs is None) return self.descriptors['input']['dim'] def get_full_config(self): ans = [] descriptor_final_str = self.descriptors['input']['final-string'] objective_type = self.config['objective-type'] output_delay = self.config['output-delay'] if (output_delay != 0): descriptor_final_str = 'Offset({0}, {1})'.format(descriptor_final_str, output_delay) for config_name in ['ref', 'final']: ans.append((config_name, 'output-node name={0} input={1} objective={2}'.format(self.name, descriptor_final_str, objective_type))) return ans
def to_categorical(y_seq, nb_classes): Y = np.zeros((y_seq.shape + (nb_classes,))) for (sample_idx, sample) in enumerate(y_seq): for (tag_idx, tag) in enumerate(sample): if (tag != 0): Y[(sample_idx, tag_idx, (int(tag) - 1))] = 1 return Y
class ResNet(nn.Module): __factory = {18: torchvision.models.resnet18, 34: torchvision.models.resnet34, 50: torchvision.models.resnet50, 101: torchvision.models.resnet101, 152: torchvision.models.resnet152} def __init__(self, depth, pretrained=True, cut_at_pooling=False, num_features=0, norm=False, dropout=0, num_classes=0, FCN=False, radius=1.0, thresh=0.5): super(ResNet, self).__init__() self.depth = depth self.pretrained = pretrained self.cut_at_pooling = cut_at_pooling self.FCN = FCN if (depth not in ResNet.__factory): raise KeyError('Unsupported depth:', depth) self.base = ResNet.__factory[depth](pretrained=pretrained) if self.FCN: for mo in self.base.layer4[0].modules(): if isinstance(mo, nn.Conv2d): mo.stride = (1, 1) self.num_features = num_features self.num_classes = 751 self.dropout = dropout out_planes = self.base.fc.in_features self.local_conv = nn.Conv2d(out_planes, self.num_features, kernel_size=1, padding=0, bias=False) init.kaiming_normal(self.local_conv.weight, mode='fan_out') self.feat_bn2d = nn.BatchNorm2d(self.num_features) init.constant(self.feat_bn2d.weight, 1) init.constant(self.feat_bn2d.bias, 0) self.instance0 = nn.Linear(self.num_features, self.num_classes) init.normal(self.instance0.weight, std=0.001) init.constant(self.instance0.bias, 0) self.instance1 = nn.Linear(self.num_features, self.num_classes) init.normal(self.instance1.weight, std=0.001) init.constant(self.instance1.bias, 0) self.instance2 = nn.Linear(self.num_features, self.num_classes) init.normal(self.instance2.weight, std=0.001) init.constant(self.instance2.bias, 0) self.instance3 = nn.Linear(self.num_features, self.num_classes) init.normal(self.instance3.weight, std=0.001) init.constant(self.instance3.bias, 0) self.instance4 = nn.Linear(self.num_features, self.num_classes) init.normal(self.instance4.weight, std=0.001) init.constant(self.instance4.bias, 0) self.instance5 = nn.Linear(self.num_features, self.num_classes) init.normal(self.instance5.weight, std=0.001) init.constant(self.instance5.bias, 0) self.drop = nn.Dropout(self.dropout) elif (not self.cut_at_pooling): self.num_features = num_features self.norm = norm self.dropout = dropout self.has_embedding = (num_features > 0) self.num_classes = num_classes self.radius = nn.Parameter(torch.FloatTensor([radius])) self.thresh = nn.Parameter(torch.FloatTensor([thresh])) out_planes = self.base.fc.in_features if self.has_embedding: self.feat = nn.Linear(out_planes, self.num_features, bias=False) self.feat_bn = nn.BatchNorm1d(self.num_features) init.kaiming_normal(self.feat.weight, mode='fan_out') else: self.num_features = out_planes if (self.dropout > 0): self.drop = nn.Dropout(self.dropout) if (self.num_classes > 0): self.classifier = nn.Linear(self.num_features, self.num_classes, bias=True) init.normal(self.classifier.weight, std=0.001) init.constant(self.classifier.bias, 0) if (not self.pretrained): self.reset_params() def forward(self, x): for (name, module) in self.base._modules.items(): if (name == 'avgpool'): break x = module(x) if self.cut_at_pooling: return x if self.FCN: y = x.unsqueeze(1) y = F.avg_pool3d(x, (16, 1, 1)).squeeze(1) sx = (x.size(2) / 6) kx = (x.size(2) - (sx * 5)) x = F.avg_pool2d(x, kernel_size=(kx, x.size(3)), stride=(sx, x.size(3))) out0 = x.view(x.size(0), (- 1)) out0 = (x / x.norm(2, 1).unsqueeze(1).expand_as(x)) x = self.drop(x) x = self.local_conv(x) out1 = (x / x.norm(2, 1).unsqueeze(1).expand_as(x)) x = self.feat_bn2d(x) x = F.relu(x) x = x.chunk(6, 2) x0 = x[0].contiguous().view(x[0].size(0), (- 1)) x1 = x[1].contiguous().view(x[1].size(0), (- 1)) x2 = x[2].contiguous().view(x[2].size(0), (- 1)) x3 = x[3].contiguous().view(x[3].size(0), (- 1)) x4 = x[4].contiguous().view(x[4].size(0), (- 1)) x5 = x[5].contiguous().view(x[5].size(0), (- 1)) c0 = self.instance0(x0) c1 = self.instance1(x1) c2 = self.instance2(x2) c3 = self.instance3(x3) c4 = self.instance4(x4) c5 = self.instance5(x5) return (out0, (c0, c1, c2, c3, c4, c5)) x = F.avg_pool2d(x, x.size()[2:]) x = x.view(x.size(0), (- 1)) out1 = x.view(x.size(0), (- 1)) center = out1.mean(0).unsqueeze(0).expand_as(out1) out2 = (x / x.norm(2, 1).unsqueeze(1).expand_as(x)) if self.has_embedding: x = self.feat(x) out3 = (x / x.norm(2, 1).unsqueeze(1).expand_as(x)) x = self.feat_bn(x) if self.norm: x = (x / x.norm(2, 1).unsqueeze(1).expand_as(x)) elif self.has_embedding: x = F.relu(x) out4 = (x / x.norm(2, 1).unsqueeze(1).expand_as(x)) if (self.dropout > 0): x = self.drop(x) if (self.num_classes > 0): x = self.classifier(x) return (out2, x, out2, out2) def reset_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if (m.bias is not None): init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=0.001) if (m.bias is not None): init.constant(m.bias, 0)
class Statistics(): def __init__(self, name='AVG'): self.name = name self.history = [] self.sum = 0 self.cnt = 0 def update(self, val): self.history.append(val) self.sum += val self.cnt += 1 def mean_std(self): mean = np.mean(self.history) std = np.std(self.history) return (mean, std) def mean(self): return np.mean(self.history) def std(self): return np.std(self.history)
def get_netG(): model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub', 'PGAN', model_name='celebAHQ-512', pretrained=True, useGPU=False) return model.netG
def get_ipex_version(): global _ipex_version if (_ipex_version is not None): return _ipex_version import intel_extension_for_pytorch as ipex _ipex_version = ipex.__version__ return _ipex_version
def discount(x, gamma): assert (x.ndim >= 1) return scipy.signal.lfilter([1], [1, (- gamma)], x[::(- 1)], axis=0)[::(- 1)]
_optimizer('adafactor') class FairseqAdafactor(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = Adafactor(params, **self.optimizer_config) def add_args(parser): parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar='E', help='epsilons for Adafactor optimizer') parser.add_argument('--clip-threshold', type=float, default=1.0, metavar='C', help='threshold for clipping update root mean square') parser.add_argument('--decay-rate', type=float, default=(- 0.8), metavar='D', help='decay rate of the second moment estimator') parser.add_argument('--beta1', type=float, default=None, metavar='B', help='beta for first moment estimator. Optional') parser.add_argument('--scale-parameter', action='store_true', help='scale learning rate by root mean square of parameter.') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--warmup-init', action='store_true', help='use relative step for warm-up learning rate schedule') parser.add_argument('--relative-step', action='store_true', help='set learning rate to inverse square root of timestep.If false, external learning rate applied') def optimizer_config(self): return {'lr': self.args.lr[0], 'eps': eval(self.args.adafactor_eps), 'clip_threshold': self.args.clip_threshold, 'beta1': self.args.beta1, 'decay_rate': self.args.decay_rate, 'scale_parameter': self.args.scale_parameter, 'weight_decay': self.args.weight_decay, 'relative_step': self.args.relative_step, 'warmup_init': self.args.warmup_init}
def dataloader_msrvtt_test(args, tokenizer): msrvtt_testset = MSRVTT_DataLoader(jsonl_path=args.val_csv, train_jsonl=args.train_csv, ans2label_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, unfold_sentences=args.expand_msrvtt_sentences, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos, use_num=args.num_labels) dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False) return (dataloader_msrvtt, len(msrvtt_testset))
def blind(output_size, dtype=np.float32): def _thunk(obs_space): pipeline = (lambda x: torch.zeros(output_size)) return (pipeline, spaces.Box((- 1), 1, output_size, dtype)) return _thunk
def sqnxt23v5_w1(**kwargs): return get_squeezenext(version='23v5', width_scale=1.0, model_name='sqnxt23v5_w1', **kwargs)
class ResidualDenseBlock(nn.Module): def __init__(self, mid_channels=64, growth_channels=32): super().__init__() for i in range(5): out_channels = (mid_channels if (i == 4) else growth_channels) self.add_module(f'conv{(i + 1)}', nn.Conv2d((mid_channels + (i * growth_channels)), out_channels, 3, 1, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.init_weights() def init_weights(self): for i in range(5): default_init_weights(getattr(self, f'conv{(i + 1)}'), 0.1) def forward(self, x): x1 = self.lrelu(self.conv1(x)) x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) return ((x5 * 0.2) + x)
class SpatialDropout1D(KerasLayer): def __init__(self, p=0.5, input_shape=None, **kwargs): super(SpatialDropout1D, self).__init__(None, float(p), (list(input_shape) if input_shape else None), **kwargs)
def get_speed(vehicle): vel = vehicle.get_velocity() return (3.6 * math.sqrt((((vel.x ** 2) + (vel.y ** 2)) + (vel.z ** 2))))
def add_generic_args(parser, root_dir) -> None: parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.') parser.add_argument('--n_tpu_cores', dest='tpu_cores', type=int) parser.add_argument('--max_grad_norm', dest='gradient_clip_val', default=1.0, type=float, help='Max gradient norm') parser.add_argument('--do_train', default=True, action='store_true', help='Whether to run training.') parser.add_argument('--do_predict', default=False, type=bool, help='Whether to run training.') parser.add_argument('--gradient_accumulation_steps', dest='accumulate_grad_batches', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.') parser.add_argument('--seed', type=int, default=101, help='random seed for initialization') parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.')
class ResNet34Fc(nn.Module): def __init__(self): super(ResNet34Fc, self).__init__() model_resnet34 = models.resnet34(pretrained=True) self.conv1 = model_resnet34.conv1 self.bn1 = model_resnet34.bn1 self.relu = model_resnet34.relu self.maxpool = model_resnet34.maxpool self.layer1 = model_resnet34.layer1 self.layer2 = model_resnet34.layer2 self.layer3 = model_resnet34.layer3 self.layer4 = model_resnet34.layer4 self.avgpool = model_resnet34.avgpool def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), (- 1)) return x
def train(): parser = HfArgumentParser((ModelArguments, TrainingArguments, Args)) (model_args, training_args, args) = cast(tuple[(ModelArguments, TrainingArguments, Args)], parser.parse_args_into_dataclasses()) dataset = load_dataset('json', data_files=args.datafile_paths, split='train') model_key = model_args.model_key if ((model_name_or_path := model_args.model_name_or_path) is None): model_name_or_path = model_key tokenization_context = TokenizationContext.from_model_key(model_key, model_name_or_path) train_dataset = dataset.map(function=map_dataset, fn_kwargs=dict(args=args, context=tokenization_context), batched=True, num_proc=N_CORES, remove_columns=dataset.column_names, load_from_cache_file=False, desc='Running tokenizer on train dataset') msg = f"#Examples truncated: {sum(train_dataset['exceeding_length'])} / {len(train_dataset)}" print(msg) if ((training_args.eval_steps is None) and (training_args.evaluation_strategy == 'no')): train_dataset = train_dataset.shuffle(seed=training_args.seed) eval_dataset = None else: print('Splitting dataset') split_dataset = train_dataset.train_test_split(test_size=args.eval_dataset_size, shuffle=True, seed=training_args.seed) train_dataset = split_dataset['train'] eval_dataset = split_dataset['test'] state = get_model_context(model_key, model_name_or_path, tokenization_context, inference_mode=False, use_flash_attention=args.use_flash_attention) print('Parallel mode:', training_args.parallel_mode) data_collator = get_data_collator(args, state.tokenization_context.pad_token_id) trainer = Trainer(model=state.model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator) trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) trainer.save_state() trainer.save_model(training_args.output_dir) state.tokenization_context.tokenizer.save_pretrained(training_args.output_dir)
def compute_rank(tensor): tensor = tensor.detach().cpu() rank = np.linalg.matrix_rank(tensor, tol=0.0001) return rank
def fast_auc(actual, predicted): pred_ranks = rankdata(predicted) return _auc(actual, pred_ranks)
def get_sub_feed(input, place): new_dict = {} res_feed = {} key_name = ['bbox', 'im_info', 'im_id', 'im_shape', 'bbox_flip'] for k in key_name: if (k in input.keys()): new_dict[k] = input[k] for k in input.keys(): if ('image' in k): new_dict[k] = input[k] for (k, v) in new_dict.items(): data_t = fluid.LoDTensor() data_t.set(v[0], place) if ('bbox' in k): lod = length2lod(v[1][0]) data_t.set_lod(lod) res_feed[k] = data_t return res_feed
class MergeFeatureLabelFeatureTransformer(FeatureTransformer): def __init__(self, bigdl_type='float'): super(MergeFeatureLabelFeatureTransformer, self).__init__(bigdl_type)
def slogdet_product(xs: PyTree) -> SLArray: slogdets = jax.tree_map(jnp.linalg.slogdet, xs) (slogdet_leaves, _) = jax.tree_util.tree_flatten(slogdets, is_tuple_of_arrays) (sign_prod, log_prod) = functools.reduce((lambda a, b: ((a[0] * b[0]), (a[1] + b[1]))), slogdet_leaves) return (sign_prod, log_prod)
def merge_dict_list(dict_list): new_dict = {} for d in dict_list: for key in d: if (key not in new_dict): new_dict[key] = d[key] return new_dict
def main(_): mkdir_if_missing(FLAGS.checkpoint_dir) mkdir_if_missing(FLAGS.log_dir) mkdir_if_missing(FLAGS.train_samples_dir) train_models.train()
def run(*args, **kwargs) -> Any: assert_tf_initialized() return tf.get_default_session().run(*args, **kwargs)
def _split_runs_on_parameters(runs): def _is_dagnode_parameterized(node): return any((isinstance(param, Parameter) for param in node.op.params)) out = [] for run in runs: groups = groupby(run, _is_dagnode_parameterized) for (group_is_parameterized, gates) in groups: if (not group_is_parameterized): out.append(list(gates)) return out
def test_anisotropic_hernquist_meanvr_directint(): pot = potential.HernquistPotential(amp=2.3, a=1.3) betas = [(- 0.7), (- 0.5), (- 0.4), 0.0, 0.3, 0.5] for beta in betas: dfh = constantbetaHernquistdf(pot=pot, beta=beta) tol = 1e-08 check_meanvr_directint(dfh, pot, tol, beta=beta, rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31) return None
class IntentDetector(): def __init__(self): pass def intent_detection(self, model_name, query): prompt = generate_intent_prompt(query) params = {} params['model_name'] = model_name params['prompt'] = prompt params['temperature'] = 0.001 params['top_k'] = 1 params['max_new_tokens'] = 10 intent = predict(**params) return intent
def change_path(json_file_path, target_path): with open(json_file_path, 'r') as fp: data_json = json.load(fp) data = data_json['data'] for i in range(len(data)): ori_path = data[i]['wav'] new_path = ((target_path + '/audio_16k/') + ori_path.split('/')[(- 1)]) data[i]['wav'] = new_path with open(json_file_path, 'w') as f: json.dump({'data': data}, f, indent=1)
def identify_meshes(dir_): meshes_track1 = list(dir_.glob('**/*_normalized.npz')) meshes_track2 = list(dir_.glob('**/fusion_textured.npz')) meshes_challenge2 = list(dir_.glob('**/model_*.obj')) if meshes_track1: meshes = sorted(meshes_track1) challenge = 1 track = 1 elif meshes_track2: meshes = sorted(meshes_track2) challenge = 1 track = 2 elif meshes_challenge2: meshes = sorted(meshes_challenge2) challenge = 2 track = None else: meshes = [] challenge = None track = None return (meshes, challenge, track)
class ChatGLMForCausalLM(_BaseGGMLClass): GGML_Module = 'bigdl.llm.ggml.model.chatglm' GGML_Model = 'ChatGLM' HF_Class = AutoModel
class ExampleThing(HyperBase): def __init__(self, **hyper_params): super(ExampleThing, self).__init__(**hyper_params) self.register_hyper_param('hyper_a') self.register_hyper_param('hyper_b', default=23) self.register_hyper_param('hyper_c', default=(lambda : (2 * 21)), help='help') self.set_hyper_params(hyper_params)
class MobileNetV2_LandScape(nn.Module): def __init__(self, num_classes=1000, width_mult=1.0): super(MobileNetV2_LandScape, self).__init__() block = InvertedResidual input_channel = 32 last_channel = 1280 inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] input_channel = int((input_channel * width_mult)) self.last_channel = int((last_channel * max(1.0, width_mult))) features = [] for (t, c, n, s) in inverted_residual_setting: output_channel = int((c * width_mult)) for i in range(n): stride = (s if (i == 0) else 1) features.append(block(input_channel, output_channel, stride, expand_ratio=t)) input_channel = output_channel features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) self.features = nn.Sequential(*features) self.fc = nn.Linear(self.last_channel, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out') if (m.bias is not None): nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def forward(self, x): x = self.features(x) x = x.mean([2, 3]) x = self.fc(x) return x
def get_input_output(graph_path, args): fix_dynamic_shape = 300 if args.use_nc: from neural_compressor.model import Model model = Model(graph_path) if (args.output_name in [[], ['']]): raise AttributeError("Empty '--output_name', please specify a valid '--output_name'.") elif (args.output_name is not None): model.output_tensor_names = args.output_name graph_def = model.graph_def output_nodes = summarize_graph(graph_def, fix_dynamic_shape) output_nodes['outputs'] = args.output_name else: graph_def = model.graph_def output_nodes = summarize_graph(graph_def, fix_dynamic_shape) elif args.is_meta: graph_def = _load_meta(graph_path) output_nodes = summarize_graph(graph_def, fix_dynamic_shape) graph_prefix = graph_path[:(- 5)] output_freeze_model_dir = (graph_prefix + '_freeze.pb') output_graph_def = freeze_graph(input_checkpoint=graph_prefix, output_graph=output_freeze_model_dir, output_node_names=output_nodes['outputs']) print('****** {} is a ckpt model, now save freezed model at {}'.format(graph_path, output_freeze_model_dir)) else: graph_def = tf_v1.GraphDef() load_graph = _load_pb(graph_def, graph_file_name=graph_path) output_nodes = summarize_graph(load_graph, fix_dynamic_shape) return (graph_def, output_nodes)
_arg_scope def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, outputs_collections=None, scope=None): with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact') if (depth == depth_in): shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride, normalizer_fn=None, activation_fn=None, scope='shortcut') residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1') residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2') residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3') output = (shortcut + residual) return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
def FeatureDropout(x): attention = torch.mean(x, dim=1, keepdim=True) (max_val, _) = torch.max(attention.view(x.size(0), (- 1)), dim=1, keepdim=True) threshold = (max_val * np.random.uniform(0.7, 0.9)) threshold = threshold.view(x.size(0), 1, 1, 1, 1).expand_as(attention) drop_mask = (attention < threshold).float() x = x.mul(drop_mask) return x
def fix_word_offset(row): try: if (row.raw_sentence[(row.word_offset - 1)].lower() != row.verb): if (row.raw_sentence[row.word_offset].lower() == row.verb): print('Fixing word offset {}'.format(row.id)) return (row.word_offset + 1) if (row.raw_sentence[(row.word_offset + 1)].lower() == row.verb): print('Fixing word offset {}'.format(row.id)) return (row.word_offset + 2) lower_sentence = [x.lower() for x in row.raw_sentence] if (row.verb not in lower_sentence): return row.word_offset else: return (lower_sentence.index(row.verb) + 1) return row.word_offset except IndexError: return row.word_offset
def map_fn(fun, x): ensembles = [fun(elem) for elem in x] features = ensembles[0].keys() ensembled_dict = {} for feat in features: ensembled_dict[feat] = torch.stack([dict_i[feat] for dict_i in ensembles], dim=(- 1)) return ensembled_dict
class DensePoseOutputsVisualizer(object): def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, to_visualize=None, **kwargs): assert (to_visualize in 'IUV'), 'can only visualize IUV' self.to_visualize = to_visualize if (self.to_visualize == 'I'): val_scale = (255.0 / DensePoseDataRelative.N_PART_LABELS) else: val_scale = 1.0 self.mask_visualizer = MatrixVisualizer(inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha) def visualize(self, image_bgr: Image, dp_output_with_bboxes: Tuple[(Optional[DensePoseChartPredictorOutput], Optional[Boxes])]) -> Image: (densepose_output, bboxes_xywh) = dp_output_with_bboxes if ((densepose_output is None) or (bboxes_xywh is None)): return image_bgr assert isinstance(densepose_output, DensePoseChartPredictorOutput), 'DensePoseChartPredictorOutput expected, {} encountered'.format(type(densepose_output)) S = densepose_output.coarse_segm I = densepose_output.fine_segm U = densepose_output.u V = densepose_output.v N = S.size(0) assert (N == I.size(0)), 'densepose outputs S {} and I {} should have equal first dim size'.format(S.size(), I.size()) assert (N == U.size(0)), 'densepose outputs S {} and U {} should have equal first dim size'.format(S.size(), U.size()) assert (N == V.size(0)), 'densepose outputs S {} and V {} should have equal first dim size'.format(S.size(), V.size()) assert (N == len(bboxes_xywh)), 'number of bounding boxes {} should be equal to first dim size of outputs {}'.format(len(bboxes_xywh), N) for n in range(N): Sn = S[n].argmax(dim=0) In = (I[n].argmax(dim=0) * (Sn > 0).long()) segmentation = In.cpu().numpy().astype(np.uint8) mask = np.zeros(segmentation.shape, dtype=np.uint8) mask[(segmentation > 0)] = 1 bbox_xywh = bboxes_xywh[n] if (self.to_visualize == 'I'): vis = segmentation elif (self.to_visualize in 'UV'): U_or_Vn = {'U': U, 'V': V}[self.to_visualize][n].cpu().numpy().astype(np.float32) vis = np.zeros(segmentation.shape, dtype=np.float32) for partId in range(U_or_Vn.shape[0]): vis[(segmentation == partId)] = (U_or_Vn[partId][(segmentation == partId)].clip(0, 1) * 255) image_bgr = self.mask_visualizer.visualize(image_bgr, mask, vis, bbox_xywh) return image_bgr
class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True res = dict() res['default_iris'] = 0.92 res['iris_n_calls'] = 5 res['default_iris_iterative'] = 0.92 res['iris_iterative_n_iter'] = 32 res['default_iris_proba'] = 0. res['default_iris_sparse'] = 0.4 res['default_digits'] = 0. res['digits_n_calls'] = 6 res['default_digits_iterative'] = 0. res['digits_iterative_n_iter'] = 64 res['default_digits_binary'] = 0. res['default_digits_multilabel'] = 0. res['default_digits_multilabel_proba'] = 1.0 res['ignore_hps'] = ['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive step_hyperparameter = {'name': 'max_iter', 'value': module.get_max_iter()}
def train(model_id, max_steps): import tensorflow as tf from template_ffd.model import get_builder tf.logging.set_verbosity(tf.logging.INFO) builder = get_builder(model_id) builder.initialize_variables() if (max_steps is None): max_steps = builder.default_max_steps builder.train(max_steps=max_steps)
def unpack_data_file(source_file_name, target_dir, start_idx): print('Unpacking {} to {}'.format(source_file_name, target_dir)) data = load_file(source_file_name) for (idx, (image_data, label_idx)) in tqdm(enumerate(zip(data['data'], data['labels'])), total=len(data['data'])): subdir = os.path.join(target_dir, label_names[label_idx]) name = '{}_{}.png'.format((start_idx + idx), label_names[label_idx]) os.makedirs(subdir, exist_ok=True) image = np.moveaxis(image_data.reshape(3, 32, 32), 0, 2) matplotlib.image.imsave(os.path.join(subdir, name), image) return len(data['data'])
def solve_fbrfive4(): print('\nsolving a generic 5-point 4-bar design problem ...', end='') pols = fbrfive4() sols = solve(pols) fail = (len(sols) != 36) if (not fail): print(' passed') else: print(' failed') return int(fail)
def l2re_loss(data, name, pred, solution): l2re = (torch.norm((pred - solution)) / torch.norm(solution)) return l2re