code
stringlengths
101
5.91M
class LocalJobManagerTest(unittest.TestCase): def test_local_job_manager(self): args = LocalJobArgs('local', 'default', 'test') job_mananger = LocalJobManager(args) job_mananger.start() self.assertEqual(len(job_mananger._job_nodes[NodeType.WORKER]), 1) gpu_stats: list[GPUStats] = [GPUStats(index=0, total_memory_mb=24000, used_memory_mb=4000, gpu_utilization=55.5)] job_mananger.update_node_resource_usage(NodeType.WORKER, 0, 10, 10240, gpu_stats) worker = job_mananger._job_nodes[NodeType.WORKER][0] self.assertEqual(worker.used_resource.cpu, 10) self.assertEqual(worker.used_resource.memory, 10240) self.assertEqual(worker.used_resource.gpu_stats, gpu_stats) dataloader_config = DataLoaderConfig(1, 'test_dataloader', 2, 3, 4) optimizer_config = OptimizerConfig(1, 'test_optimizer', 2) paral_config = ParallelConfig(dataloader_config, optimizer_config) job_mananger.update_node_paral_config(NodeType.WORKER, 0, paral_config) worker = job_mananger._job_nodes[NodeType.WORKER][0] self.assertEqual(worker.paral_config, paral_config)
def load_imgnet(train): import pickle name = ('train' if train else 'valid') with open('./natural_{}.pkl'.format(name), 'rb') as fin: imgnet = pickle.load(fin) imgnet = np.transpose(imgnet, axes=(0, 1, 3, 4, 2)) return imgnet
class SMACMap(lib.Map): directory = 'SMAC_Maps' download = ' players = 2 step_mul = 8 game_steps_per_episode = 0
class swapRef(): var1: ImageMessage = None var2: ImageMessage = None lock: Lock = Lock() def set_var1(self, value): with self.lock: self.var1 = value self.swap() def get_var2(self): with self.lock: return self.var2 def swap(self): (self.var1, self.var2) = (self.var2, self.var1)
.dataclass class FlaxSeq2SeqLMOutput(ModelOutput): logits: jnp.ndarray = None past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None decoder_attentions: Optional[Tuple[jnp.ndarray]] = None cross_attentions: Optional[Tuple[jnp.ndarray]] = None encoder_last_hidden_state: Optional[jnp.ndarray] = None encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
class FlaxDownsample2D(nn.Module): in_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv(self.in_channels, kernel_size=(3, 3), strides=(2, 2), padding='VALID', dtype=self.dtype) def __call__(self, hidden_states): pad = ((0, 0), (0, 1), (0, 1), (0, 0)) hidden_states = jnp.pad(hidden_states, pad_width=pad) hidden_states = self.conv(hidden_states) return hidden_states
_module() class GlobalContextHead(BaseModule): def __init__(self, num_convs: int=4, in_channels: int=256, conv_out_channels: int=256, num_classes: int=80, loss_weight: float=1.0, conv_cfg: OptConfigType=None, norm_cfg: OptConfigType=None, conv_to_res: bool=False, init_cfg: MultiConfig=dict(type='Normal', std=0.01, override=dict(name='fc'))) -> None: super().__init__(init_cfg=init_cfg) self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.conv_to_res = conv_to_res self.fp16_enabled = False if self.conv_to_res: num_res_blocks = (num_convs // 2) self.convs = ResLayer(SimplifiedBasicBlock, in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks else: self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = (self.in_channels if (i == 0) else conv_out_channels) self.convs.append(ConvModule(in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(conv_out_channels, num_classes) self.criterion = nn.BCEWithLogitsLoss() def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]: x = feats[(- 1)] for i in range(self.num_convs): x = self.convs[i](x) x = self.pool(x) mc_pred = x.reshape(x.size(0), (- 1)) mc_pred = self.fc(mc_pred) return (mc_pred, x) def loss(self, pred: Tensor, labels: List[Tensor]) -> Tensor: labels = [lbl.unique() for lbl in labels] targets = pred.new_zeros(pred.size()) for (i, label) in enumerate(labels): targets[(i, label)] = 1.0 loss = (self.loss_weight * self.criterion(pred, targets)) return loss
def _resnet(arch, block, layers, pretrained, progress, **kwargs): model = ResNet(block, layers, **kwargs) return model
def create_dummy_saved_model(path): class ImageModel(tf.train.Checkpoint): def __init__(self): super(ImageModel, self).__init__() self.v = tf.Variable(1.0, use_resource=True) (input_signature=[tf.TensorSpec(name='input', shape=[32, 224, 224, 3], dtype=tf.float32), tf.TensorSpec(name='training', shape=None, dtype=tf.bool)]) def __call__(self, x, training): return (tf.reduce_mean(x, axis=[1, 2]) + self.v) with tf.Session() as sess: model = ImageModel() model.save_counter model.trainable_variables = [model.v] init = tf.initialize_all_variables() sess.run(init) tf.compat.v2.saved_model.save(model, path)
def _reshape_batch(tensors: Sequence[torch.Tensor], old_shape: torch.Size, new_shape: torch.Size): if (old_shape == new_shape): return tensors return [t.reshape((new_shape + t.shape[len(old_shape):])) for t in tensors]
def get_params(model_type, n_train, tree_type=None): if (model_type == 'ngboost'): params = {'n_estimators': [10, 25, 50, 100, 250, 500, 1000, 2000]} elif (model_type == 'pgbm'): params = {'n_estimators': [10, 25, 50, 100, 250, 500, 1000, 2000], 'max_leaves': [15, 31, 61, 91], 'learning_rate': [0.01, 0.1], 'min_data_in_leaf': [1, 20], 'max_bin': [255]} elif ((model_type == 'knn') and (tree_type == 'knn')): k_list = [3, 5, 7, 11, 15, 31, 61, 91, 121, 151, 201, 301, 401, 501, 601, 701] params = {'n_neighbors': [k for k in k_list if (k <= n_train)]} elif (model_type == 'bart'): params = {'n_trees': [10, 50, 100, 200], 'n_chains': [5]} elif (model_type == 'cbu'): params = {'n_estimators': [10, 25, 50, 100, 250, 500, 1000, 2000], 'max_depth': [2, 3, 5, 7, None], 'learning_rate': [0.01, 0.1], 'min_data_in_leaf': [1, 20], 'max_bin': [255]} elif (model_type in ['constant', 'ibug', 'knn']): assert (tree_type is not None) if (tree_type == 'lgb'): params = {'n_estimators': [10, 25, 50, 100, 250, 500, 1000, 2000], 'num_leaves': [15, 31, 61, 91], 'learning_rate': [0.01, 0.1], 'min_child_samples': [1, 20], 'max_bin': [255]} elif (tree_type == 'xgb'): params = {'n_estimators': [10, 25, 50, 100, 250, 500, 1000, 2000], 'max_depth': [2, 3, 5, 7, None], 'learning_rate': [0.01, 0.1], 'min_child_weight': [1, 20], 'max_bin': [255]} elif (tree_type == 'cb'): params = {'n_estimators': [10, 25, 50, 100, 250, 500, 1000, 2000], 'max_depth': [2, 3, 5, 7, None], 'learning_rate': [0.01, 0.1], 'min_data_in_leaf': [1, 20], 'max_bin': [255]} elif (tree_type == 'ngboost'): params = {'n_estimators': [10, 25, 50, 100, 250, 500, 1000, 2000]} elif (tree_type == 'pgbm'): params = {'n_estimators': [10, 25, 50, 100, 250, 500, 1000, 2000], 'max_leaves': [15, 31, 61, 91], 'learning_rate': [0.01, 0.1], 'min_data_in_leaf': [1, 20], 'max_bin': [255]} elif (tree_type == 'skrf'): params = {'n_estimators': [10, 25, 50, 100, 250, 500, 1000, 2000], 'max_depth': [2, 3, 5, 7, None], 'min_samples_leaf': [1, 20]} else: raise ValueError('tree_type unknown: {}'.format(tree_type)) else: raise ValueError('model_type unknown: {}'.format(model_type)) return params
def transform_multiple_input_dataloader_to_inc_mode(model, dataloader): (need_transformation, forward_args_len) = _need_dataloader_type_transformation(model, dataloader) if need_transformation: def tuple_collate_fn_wrapper(func, forward_args_len): def collate_fn(batch): res = func(batch) if ((len(res) - forward_args_len) == 1): return (tuple(res[:forward_args_len]), res[(- 1)]) else: return (tuple(res[:forward_args_len]), tuple(res[forward_args_len:])) return collate_fn new_dataloader = deepcopy(dataloader) new_dataloader.collate_fn = tuple_collate_fn_wrapper(new_dataloader.collate_fn, forward_args_len) return new_dataloader return dataloader
def unzip_file(file, dest): with zipfile.ZipFile(file, 'r') as zip_ref: zip_ref.extractall(dest)
def test_factor(): from phcpy.sets import witness_set_of_hypersurface hyp = '(x+1)*(x^2 + y^2 + 1);' (wsys, wsols) = witness_set_of_hypersurface(2, hyp) fac = factor(1, wsys, wsols) print(fac)
def load(checkpoint_dir=None, model=None, layer_wise=False, history_cfg=None, **kwargs): weigth_only = kwargs.get('weight_only', False) if weigth_only: return load_weight_only(checkpoint_dir, model, layer_wise=layer_wise) if (checkpoint_dir is not None): if isinstance(checkpoint_dir, dict): stat_dict = checkpoint_dir elif os.path.isfile(checkpoint_dir): weights_file = checkpoint_dir try: stat_dict = torch.jit.load(weights_file) logger.info('torch.jit.load is used to recovery the int8 model quantized by INC IPEX backend') except: stat_dict = torch.load(weights_file) elif os.path.isdir(checkpoint_dir): try: weights_file = os.path.join(os.path.abspath(os.path.expanduser(checkpoint_dir)), 'best_model.pt') try: stat_dict = torch.jit.load(weights_file) logger.info('torch.jit.load is used to recovery the int8 model quantized by INC IPEX backend') except: stat_dict = torch.load(weights_file) except: tune_cfg_file = os.path.join(os.path.abspath(os.path.expanduser(checkpoint_dir)), 'best_configure.yaml') weights_file = os.path.join(os.path.abspath(os.path.expanduser(checkpoint_dir)), 'best_model_weights.pt') stat_dict = torch.load(weights_file) with open(tune_cfg_file, 'r') as f: tune_cfg = yaml.safe_load(f) stat_dict['best_configure'] = tune_cfg else: logger.error('Unexpected checkpoint type:{}. Only file dir/path or state_dict is acceptable') if (not isinstance(stat_dict, torch.jit._script.RecursiveScriptModule)): assert ('best_configure' in stat_dict), 'No best_configure found in the model file, please use the int8 model file generated by INC.' tune_cfg = stat_dict.pop('best_configure') else: assert (history_cfg is not None), 'Need chieckpoint_dir or history_cfg to rebuild int8 model' tune_cfg = history_cfg stat_dict = None version = get_torch_version() example_inputs = None if isinstance(stat_dict, torch.jit._script.RecursiveScriptModule): q_model = torch.jit.freeze(stat_dict.eval()) logger.info('Finish load the model quantized by INC IPEX backend.') return q_model if (('is_oneshot' in tune_cfg) and tune_cfg['is_oneshot']): return _load_int8_orchestration(model, tune_cfg, stat_dict, example_inputs, **kwargs) model.eval() approach_quant_mode = None if (tune_cfg['approach'] == 'post_training_dynamic_quant'): approach_quant_mode = 'dynamic' elif (tune_cfg['approach'] == 'post_training_static_quant'): approach_quant_mode = 'static' recipe_cfgs = tune_cfg.get('recipe_cfgs', None) if (recipe_cfgs and recipe_cfgs.get('smooth_quant', False) and (not recipe_cfgs['smooth_quant_args']['folding']) and (approach_quant_mode != 'dynamic')): from ..adaptor.torch_utils.model_wrapper import _wrapper_qdq_linear, _wrapper_sq_linear model = _wrapper_sq_linear(model, recipe_cfgs['smoothquant_op_info']['sq_linear']) model = _wrapper_qdq_linear(model, recipe_cfgs['smoothquant_op_info']['qdq_linear']) model.load_state_dict(stat_dict) return model if (recipe_cfgs and recipe_cfgs.get('layer_wise_quant', False) and (approach_quant_mode != 'dynamic')): from ..adaptor.torch_utils.model_wrapper import _wrap_lwq_layer op_cfgs = _cfg_to_qconfig(tune_cfg, tune_cfg['approach']) fx_op_cfgs = _cfgs_to_fx_cfgs(op_cfgs, tune_cfg['approach']) model = _wrap_lwq_layer(model, recipe_cfgs['lwq_layers'], fx_op_cfgs) model.load_state_dict(stat_dict) return model for (_, op_cfg) in tune_cfg['op'].items(): if ('quant_mode' not in op_cfg['activation']): op_cfg['activation']['quant_mode'] = approach_quant_mode if (tune_cfg['approach'] != 'post_training_dynamic_quant'): if (version.release < Version('1.7.0').release): q_mapping = tq.default_mappings.DEFAULT_MODULE_MAPPING elif (version.release < Version('1.8.0').release): q_mapping = tq.quantization_mappings.get_static_quant_module_mappings() else: q_mapping = tq.quantization_mappings.get_default_static_quant_module_mappings() elif (version.release < Version('1.7.0').release): q_mapping = tq.default_mappings.DEFAULT_DYNAMIC_MODULE_MAPPING elif (version.release < Version('1.8.0').release): q_mapping = tq.quantization_mappings.get_dynamic_quant_module_mappings() else: q_mapping = tq.quantization_mappings.get_default_dynamic_quant_module_mappings() if (tune_cfg['framework'] == 'pytorch_fx'): assert (version.release >= Version('1.8.0').release), 'Please use PyTroch 1.8 or higher version with pytorch_fx backend' from torch.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx if (kwargs is None): kwargs = {} prepare_custom_config_dict = kwargs.get('prepare_custom_config_dict', None) convert_custom_config_dict = kwargs.get('convert_custom_config_dict', None) op_cfgs = _cfg_to_qconfig(tune_cfg, tune_cfg['approach']) fx_op_cfgs = _cfgs_to_fx_cfgs(op_cfgs, tune_cfg['approach']) if (not tune_cfg['fx_sub_module_list']): tmp_model = model if (tune_cfg['approach'] == 'quant_aware_training'): model.train() if (version.release > Version('1.12.1').release): model = prepare_qat_fx(model, fx_op_cfgs, prepare_custom_config=prepare_custom_config_dict, example_inputs=example_inputs) else: model = prepare_qat_fx(model, fx_op_cfgs, prepare_custom_config_dict=prepare_custom_config_dict) elif (version.release > Version('1.12.1').release): model = prepare_fx(model, fx_op_cfgs, prepare_custom_config=prepare_custom_config_dict, example_inputs=example_inputs) else: model = prepare_fx(model, fx_op_cfgs, prepare_custom_config_dict=prepare_custom_config_dict) if (version.release > Version('1.12.1').release): model = convert_fx(model, convert_custom_config=convert_custom_config_dict) else: model = convert_fx(model, convert_custom_config_dict=convert_custom_config_dict) util.append_attr(model, tmp_model) del tmp_model else: sub_module_list = tune_cfg['fx_sub_module_list'] if (tune_cfg['approach'] == 'quant_aware_training'): model.train() PyTorch_FXAdaptor.prepare_sub_graph(sub_module_list, fx_op_cfgs, model, prefix='', is_qat=True, example_inputs=example_inputs) else: PyTorch_FXAdaptor.prepare_sub_graph(sub_module_list, fx_op_cfgs, model, prefix='', example_inputs=example_inputs) PyTorch_FXAdaptor.convert_sub_graph(sub_module_list, model, prefix='') else: if (tune_cfg['approach'] == 'post_training_dynamic_quant'): op_cfgs = _cfg_to_qconfig(tune_cfg, tune_cfg['approach']) else: op_cfgs = _cfg_to_qconfig(tune_cfg) _propagate_qconfig(model, op_cfgs, approach=tune_cfg['approach']) if (not any(((hasattr(m, 'qconfig') and m.qconfig) for m in model.modules()))): logger.warn('None of the submodule got qconfig applied. Make sure you passed correct configuration through `qconfig_dict` or by assigning the `.qconfig` attribute directly on submodules') if (tune_cfg['approach'] != 'post_training_dynamic_quant'): if (version.release < Version('2.0.0').release): from torch.quantization.quantize import add_observer_ else: from torch.quantization.quantize import _add_observer_ as add_observer_ add_observer_(model) model = convert(model, mapping=q_mapping, inplace=True) bf16_ops_list = (tune_cfg['bf16_ops_list'] if ('bf16_ops_list' in tune_cfg.keys()) else []) if ((len(bf16_ops_list) > 0) and (version >= Version('1.11.0-rc1'))): from ..adaptor.torch_utils.bf16_convert import Convert model = Convert(model, tune_cfg) if (not is_int8_model(model)): logger.warning('The loaded model is not a int8 model.') if ((checkpoint_dir is None) and (history_cfg is not None)): _set_activation_scale_zeropoint(model, history_cfg) else: try: model.load_state_dict(stat_dict) except: mismatch_log = model.load_state_dict(stat_dict, strict=False) assert (len(mismatch_log.unexpected_keys) == 0), 'Loading state_dict failed: {}'.format(mismatch_log) util.get_embedding_contiguous(model) return model
class MockBatchNorm1d(_MockBatchNorm): def _check_input_dim(self, input): if ((input.dim() != 2) and (input.dim() != 3)): raise ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim()))
def has_hf_hub(necessary=False): if ((hf_hub_url is None) and necessary): raise RuntimeError('Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') return (hf_hub_url is not None)
class TcnCheckpointer(Checkpointer): def __init__(self, cfg, model, optimizer=None, scheduler=None, save_dir='', save_to_disk=None, logger=None): super(TcnCheckpointer, self).__init__(model, optimizer, scheduler, save_dir, save_to_disk, logger) self.cfg = cfg.clone() def _load_file(self, f): loaded = super(TcnCheckpointer, self)._load_file(f) if ('model' not in loaded): loaded = dict(model=loaded) return loaded
def mask_to_test_readable_only_shape(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {'shape': shape}
class LinearEps(EpsProposal): def __init__(self, T, max, min): super(LinearEps, self).__init__(T) self.eps_vals = np.linspace(max, min, T) def __call__(self, t): return self.eps_vals[t]
def psnr(rgbs: torch.Tensor, target_rgbs: torch.Tensor) -> float: mse = torch.mean(((rgbs - target_rgbs) ** 2)) return ((- 10) * torch.log10(mse).item())
def test_linearPotential_method_value(): from galpy.potential import PlummerPotential from galpy.util import conversion (ro, vo) = (8.0, 220.0) pot = PlummerPotential(normalize=True, ro=ro, vo=vo).toVertical(1.1) potu = PlummerPotential(normalize=True).toVertical(1.1) assert (numpy.fabs((pot(1.1).to(((units.km ** 2) / (units.s ** 2))).value - (potu(1.1) * (vo ** 2.0)))) < (10.0 ** (- 8.0))), 'Potential method __call__ does not return the correct value as Quantity' assert (numpy.fabs(((pot.force(1.1).to((units.km / (units.s ** 2))).value * (10.0 ** 13.0)) - (potu.force(1.1) * conversion.force_in_10m13kms2(vo, ro)))) < (10.0 ** (- 4.0))), 'Potential method force does not return the correct value as Quantity' return None
def se_resnet18(pretrained=False, **kwargs): model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) return model
class Pct(nn.Module): def __init__(self, args, output_channels=40): super(Pct, self).__init__() self.args = args self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False) self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm1d(64) self.bn2 = nn.BatchNorm1d(64) self.gather_local_0 = Local_op(in_channels=128, out_channels=128) self.gather_local_1 = Local_op(in_channels=256, out_channels=256) self.pt_last = Point_Transformer_Last(args) self.conv_fuse = nn.Sequential(nn.Conv1d(1280, 1024, kernel_size=1, bias=False), nn.BatchNorm1d(1024), nn.LeakyReLU(negative_slope=0.2)) self.linear1 = nn.Linear(1024, 512, bias=False) self.bn6 = nn.BatchNorm1d(512) self.dp1 = nn.Dropout(p=args.dropout) self.linear2 = nn.Linear(512, 256) self.bn7 = nn.BatchNorm1d(256) self.dp2 = nn.Dropout(p=args.dropout) self.linear3 = nn.Linear(256, output_channels) def forward(self, x): xyz = x.permute(0, 2, 1) (batch_size, _, _) = x.size() x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) x = x.permute(0, 2, 1) (new_xyz, new_feature) = sample_and_group(npoint=512, radius=0.15, nsample=32, xyz=xyz, points=x) feature_0 = self.gather_local_0(new_feature) feature = feature_0.permute(0, 2, 1) (new_xyz, new_feature) = sample_and_group(npoint=256, radius=0.2, nsample=32, xyz=new_xyz, points=feature) feature_1 = self.gather_local_1(new_feature) x = self.pt_last(feature_1, new_xyz) x = torch.cat([x, feature_1], dim=1) x = self.conv_fuse(x) x = F.adaptive_max_pool1d(x, 1).view(batch_size, (- 1)) x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2) x = self.dp1(x) x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2) x = self.dp2(x) x = self.linear3(x) return x
def weights_init(m): classname = m.__class__.__name__ if (classname.find('Conv') != (- 1)): nn.init.normal_(m.weight.data, 0.0, 0.02) elif (classname.find('BatchNorm') != (- 1)): nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0)
def modify_uppercase_phrase(s): if (s == s.upper()): return s.title() else: return s
def create_supervised_evaluator_with_mask_new_eval(model, metrics, device=None): if device: model.to(device) def _inference(engine, batch): model.eval() with torch.no_grad(): (data, pids, ambi, camids, masks) = batch data = (data.to(device) if (torch.cuda.device_count() >= 1) else data) feat = model(data, masks) return (feat, pids, ambi, camids) engine = Engine(_inference) for (name, metric) in metrics.items(): metric.attach(engine, name) return engine
_LAYER.register_module() class BaseTransformerLayer(BaseModule): def __init__(self, attn_cfgs=None, ffn_cfgs=dict(type='FFN', embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)), operation_order=None, norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False, **kwargs): deprecated_args = dict(feedforward_channels='feedforward_channels', ffn_dropout='ffn_drop', ffn_num_fcs='num_fcs') for (ori_name, new_name) in deprecated_args.items(): if (ori_name in kwargs): warnings.warn(f'The arguments `{ori_name}` in BaseTransformerLayer has been deprecated, now you should set `{new_name}` and other FFN related arguments to a dict named `ffn_cfgs`. ', DeprecationWarning) ffn_cfgs[new_name] = kwargs[ori_name] super(BaseTransformerLayer, self).__init__(init_cfg) self.batch_first = batch_first assert ((set(operation_order) & set(['self_attn', 'norm', 'ffn', 'cross_attn'])) == set(operation_order)), f"The operation_order of {self.__class__.__name__} should contains all four operation type {['self_attn', 'norm', 'ffn', 'cross_attn']}" num_attn = (operation_order.count('self_attn') + operation_order.count('cross_attn')) if isinstance(attn_cfgs, dict): attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] else: assert (num_attn == len(attn_cfgs)), f'The length of attn_cfg {num_attn} is not consistent with the number of attentionin operation_order {operation_order}.' self.num_attn = num_attn self.operation_order = operation_order self.norm_cfg = norm_cfg self.pre_norm = (operation_order[0] == 'norm') self.attentions = ModuleList() index = 0 for operation_name in operation_order: if (operation_name in ['self_attn', 'cross_attn']): if ('batch_first' in attn_cfgs[index]): assert (self.batch_first == attn_cfgs[index]['batch_first']) else: attn_cfgs[index]['batch_first'] = self.batch_first attention = build_attention(attn_cfgs[index]) attention.operation_name = operation_name self.attentions.append(attention) index += 1 self.embed_dims = self.attentions[0].embed_dims self.ffns = ModuleList() num_ffns = operation_order.count('ffn') if isinstance(ffn_cfgs, dict): ffn_cfgs = ConfigDict(ffn_cfgs) if isinstance(ffn_cfgs, dict): ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] assert (len(ffn_cfgs) == num_ffns) for ffn_index in range(num_ffns): if ('embed_dims' not in ffn_cfgs[ffn_index]): ffn_cfgs[ffn_index]['embed_dims'] = self.embed_dims else: assert (ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims) self.ffns.append(build_feedforward_network(ffn_cfgs[ffn_index], dict(type='FFN'))) self.norms = ModuleList() num_norms = operation_order.count('norm') for _ in range(num_norms): self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) def forward(self, query, key=None, value=None, query_pos=None, key_pos=None, attn_masks=None, query_key_padding_mask=None, key_padding_mask=None, **kwargs): norm_index = 0 attn_index = 0 ffn_index = 0 identity = query if (attn_masks is None): attn_masks = [None for _ in range(self.num_attn)] elif isinstance(attn_masks, torch.Tensor): attn_masks = [copy.deepcopy(attn_masks) for _ in range(self.num_attn)] warnings.warn(f'Use same attn_mask in all attentions in {self.__class__.__name__} ') else: assert (len(attn_masks) == self.num_attn), f'The length of attn_masks {len(attn_masks)} must be equal to the number of attention in operation_order {self.num_attn}' for layer in self.operation_order: if (layer == 'self_attn'): temp_key = temp_value = query query = self.attentions[attn_index](query, temp_key, temp_value, (identity if self.pre_norm else None), query_pos=query_pos, key_pos=query_pos, attn_mask=attn_masks[attn_index], key_padding_mask=query_key_padding_mask, **kwargs) attn_index += 1 identity = query elif (layer == 'norm'): query = self.norms[norm_index](query) norm_index += 1 elif (layer == 'cross_attn'): query = self.attentions[attn_index](query, key, value, (identity if self.pre_norm else None), query_pos=query_pos, key_pos=key_pos, attn_mask=attn_masks[attn_index], key_padding_mask=key_padding_mask, **kwargs) attn_index += 1 identity = query elif (layer == 'ffn'): query = self.ffns[ffn_index](query, (identity if self.pre_norm else None)) ffn_index += 1 return query
class FlaubertForQuestionAnswering(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def read_datafile(filename): data = [] with open(filename) as csvfile: reader = csv.DictReader(csvfile) count = 0 for row in reader: fixed = fix_spans.fix_spans(ast.literal_eval(row['spans']), row['text']) data.append((fixed, row['text'])) return data
def create_dictionary(dataroot, task='vqa'): dictionary = Dictionary() if (task == 'vqa'): files = ['v2_OpenEnded_mscoco_train2014_questions.json', 'v2_OpenEnded_mscoco_val2014_questions.json', 'v2_OpenEnded_mscoco_test2015_questions.json', 'v2_OpenEnded_mscoco_test-dev2015_questions.json'] for path in files: question_path = os.path.join(dataroot, path) qs = json.load(open(question_path))['questions'] for q in qs: dictionary.tokenize(q['question'], True) elif (task == 'flickr'): files = ['train_ids.pkl', 'val_ids.pkl', 'test_ids.pkl'] sentence_dir = os.path.join(dataroot, 'Flickr30kEntities/Sentences') for path in files: ids_file = os.path.join(dataroot, path) with open(ids_file, 'rb') as f: imgids = cPickle.load(f) for image_id in imgids: question_path = os.path.join(sentence_dir, ('%d.txt' % image_id)) phrases = get_sent_data(question_path) for phrase in phrases: dictionary.tokenize(phrase, True) return dictionary
class ViTForMaskedImageModeling(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def prefixFilename(name: str) -> str: if isinstance(name, CWDPath): return name.f if ((len(name) == 0) or (name[0] == '/')): return name prefixed: List[str] = [] for s in _filePrefixes: if ((len(s) != 0) and (s[0] == '/')): prefixed[:] = [s] else: prefixed.append(s) prefixed.append(name) return ''.join(prefixed)
def load_data_from_file(filename): data = read_game_data_from_file(filename) return convert_to_cheating_data(data)
def run_job(num_jobs, job_id, dirname, iter_no, model_file, lr, frame_shift, egs_dir, num_archives, num_archives_processed, minibatch_size, job_cmd): log_file = '{}/log/train.{}.{}.log'.format(dirname, iter_no, job_id) process_out = subprocess.run([*job_cmd.split(), log_file, model_file, '--dir', dirname, '--mode', 'training', '--lr', str(lr), '--frame-shift', str(frame_shift), '--egs', 'ark:{}/cegs.{}.ark'.format(egs_dir, ((num_archives_processed % num_archives) + 1)), '--l2-regularize-factor', str((1.0 / num_jobs)), '--minibatch-size', '128,64', '--new-model', os.path.join(dirname, '{}.{}.pt'.format(iter_no, job_id)), os.path.join(dirname, '{}.pt'.format(iter_no))]) return process_out.returncode
def convert_kilt_to_fairseq(dataset): source = [] target = [] for doc in tqdm(dataset, desc='Processing'): title_set = set((prov['title'] for out in doc['output'] if ('provenance' in out) for prov in out['provenance'] if (prov.get('bleu_score', 1) > 0.5))) if (list(title_set) == []): continue title = ' | '.join(title_set) source.append(create_input(doc, max_length=384, start_delimiter='[START_ENT]', end_delimiter='[END_ENT]')) target.append(title) if (('meta' in doc) and ('template_questions' in doc['meta'])): for template_question in doc['meta']['template_questions']: source.append(template_question) target.append(title) return (source, target)
def parse_args(): usage = '\n1. You can compute character similarity by:\npython char_sim.py \n\n2. You can use ted in computing character similarity by:\npython char_sim.py -t\n\n' parser = argparse.ArgumentParser(description='A script to compute Chinese character (Kanji) similarity', usage=usage) parser.add_argument('multiargs', nargs='*', type=str, default=None, help='Chinese characters in question') parser.add_argument('--ted', '-t', action='store_true', default=False, help='True=to use tree edit distence (TED)False=to use string edit distance') args = parser.parse_args() return args
def pytest_configure(config): config.addinivalue_line('markers', 'is_pipeline_test: mark test to run only when pipeline are tested') config.addinivalue_line('markers', 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested') config.addinivalue_line('markers', 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested') config.addinivalue_line('markers', 'is_staging_test: mark test to run only in the staging environment')
.parametrize('loss_class', [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, EIoULoss]) def test_iou_type_loss_zeros_weight(loss_class): pred = torch.rand((10, 4)) target = torch.rand((10, 4)) weight = torch.zeros(10) loss = loss_class()(pred, target, weight) assert (loss == 0.0)
class M2M100Model(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test_digits_cosine_greedi_ll_object(): model1 = FacilityLocationSelection(100) model2 = GraphCutSelection(100) model = MixtureSelection(100, [model1, model2], [1.0, 0.3], metric='cosine', optimizer=GreeDi(optimizer1='lazy', optimizer2='lazy', random_state=0)) model.fit(X_digits) assert_array_equal(model.ranking[:30], digits_cosine_greedi_ranking[:30]) assert_array_almost_equal(model.gains[:30], digits_cosine_greedi_gains[:30], 4) assert_array_almost_equal(model.subset, X_digits[model.ranking])
def save_tensor_img(path, tensor): pil_image = transforms.ToPILImage()(tensor) pil_image.save(path)
def test_digits_cosine_lazy_sparse(): model = FacilityLocationSelection(100, 'precomputed', optimizer='lazy') model.fit(X_digits_cosine_sparse) assert_array_equal(model.ranking, digits_cosine_ranking) assert_array_almost_equal(model.gains, digits_cosine_gains, 4)
def process_tensorboard_in_callbacks(callbacks, mode='train', rank=None): import tensorflow as tf class EpochCopyCallback(tf.keras.callbacks.Callback): def __init__(self, local_dir, remote_dir, rank=None): super(EpochCopyCallback, self).__init__() self.local_dir = local_dir self.remote_dir = remote_dir self.rank = rank def on_epoch_end(self, epoch, logs=None): if (self.rank is not None): if (self.rank == 0): put_local_dir_tree_to_remote(self.local_dir, self.remote_dir) class TrainBatchCopyCallback(tf.keras.callbacks.Callback): def __init__(self, local_dir, remote_dir, freq, rank=None): super(TrainBatchCopyCallback, self).__init__() self.local_dir = local_dir self.remote_dir = remote_dir self.freq = freq self.rank = rank def on_train_batch_end(self, batch, logs=None): if (self.rank is not None): if (self.rank == 0): if ((batch % self.freq) == 0): put_local_dir_tree_to_remote(self.local_dir, self.remote_dir) class BatchCopyCallback(tf.keras.callbacks.Callback): def __init__(self, local_dir, remote_dir, freq, rank=None): super(BatchCopyCallback, self).__init__() self.local_dir = local_dir self.remote_dir = remote_dir self.freq = freq self.rank = rank def on_test_batch_end(self, batch, logs=None): if (self.rank is not None): if (self.rank == 0): if ((batch % self.freq) == 0): put_local_dir_tree_to_remote(self.local_dir, self.remote_dir) tensorboard = get_specific_object_from_callbacks(tf.keras.callbacks.TensorBoard, callbacks) if tensorboard: original_log_dir = tensorboard.log_dir replaced_log_dir = get_replaced_path(original_log_dir) tensorboard.log_dir = replaced_log_dir if (tensorboard.update_freq == 'epoch'): copy_callback = EpochCopyCallback(replaced_log_dir, original_log_dir, rank) else: update_freq = (tensorboard.update_freq if (tensorboard.update_freq > 10) else 10) if (mode == 'fit'): copy_callback = TrainBatchCopyCallback(replaced_log_dir, original_log_dir, update_freq, rank) else: copy_callback = BatchCopyCallback(replaced_log_dir, original_log_dir, update_freq, rank) callbacks.append(copy_callback) return replaced_log_dir return None
class GATConv(nn.Module): def __init__(self): super().__init__() ... self.reset_parameters() def forward(self, X: torch.Tensor, g: Graph) -> torch.Tensor: X = self.theta(X) x_for_src = self.atten_src(X) x_for_dst = self.atten_dst(X) e_atten_score = (x_for_src[g.e_src] + x_for_dst[g.e_dst]) e_atten_score = F.leaky_relu(e_atten_score).squeeze() X = g.v2v(X, aggr='softmax_then_sum', e_weight=e_atten_score) X = F.elu(X) return X
def restrictdict(d, keys): inter = [k for k in d.keys() if (k in keys)] return subdict(d, inter)
class InceptionV3(nn.Module): DEFAULT_BLOCK_INDEX = 3 BLOCK_INDEX_BY_DIM = {64: 0, 192: 1, 768: 2, 2048: 3} def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False): super(InceptionV3, self).__init__() self.resize_input = resize_input self.normalize_input = normalize_input self.output_blocks = sorted(output_blocks) self.last_needed_block = max(output_blocks) assert (self.last_needed_block <= 3), 'Last possible output block index is 3' self.blocks = nn.ModuleList() inception = models.inception_v3(pretrained=True) block0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)] self.blocks.append(nn.Sequential(*block0)) if (self.last_needed_block >= 1): block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)] self.blocks.append(nn.Sequential(*block1)) if (self.last_needed_block >= 2): block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e] self.blocks.append(nn.Sequential(*block2)) if (self.last_needed_block >= 3): block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))] self.blocks.append(nn.Sequential(*block3)) for param in self.parameters(): param.requires_grad = requires_grad def forward(self, inp): outp = [] x = inp if self.resize_input: x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False) if self.normalize_input: x = ((2 * x) - 1) for (idx, block) in enumerate(self.blocks): x = block(x) if (idx in self.output_blocks): outp.append(x) if (idx == self.last_needed_block): break return outp
def lora_reassign_weights(model, state_dict, r, lora_alpha, fan_in_fan_out=False, merge=True): is_merged = getattr(model, 'is_merged', False) assert (is_merged != merge), f'{is_merged} != {merge}: if is_merged, then must be unmerge; if not is_merged, then must merge' named_params = [(n, p) for (n, p) in model.named_parameters()] scaling = (lora_alpha / r) state_dict = {k.replace('base_model.model.', ''): v for (k, v) in state_dict.items()} replaced = set() merged_names = {'qkv_proj': ['q_proj', 'k_proj', 'v_proj'], 'gate_up_proj': ['gate_proj', 'up_proj']} non_merged_names = ['o_proj', 'down_proj'] for (name, param) in named_params: param.requires_grad = False if ('_proj.weight' not in name): continue for (wn, wn_series) in merged_names.items(): if name.endswith(f'{wn}.weight'): for (stride_id, att_weight_name) in enumerate(wn_series): lora_a = name.replace(f'{wn}.weight', f'{att_weight_name}.lora_A.weight') lora_b = name.replace(f'{wn}.weight', f'{att_weight_name}.lora_B.weight') shard_size = (param.shape[0] // len(wn_series)) if (lora_a in state_dict): assert (lora_b in state_dict), f'{lora_b} not in state_dict' assert (state_dict[lora_b].shape[1] == r), f'r={r!r} != {state_dict[lora_b].shape}' matrix = (transpose((state_dict[lora_b] state_dict[lora_a]), fan_in_fan_out) * scaling) assert (param.data[(shard_size * stride_id):(shard_size * (stride_id + 1))].shape == matrix.shape) if merge: param.data[(shard_size * stride_id):(shard_size * (stride_id + 1))] += matrix else: param.data[(shard_size * stride_id):(shard_size * (stride_id + 1))] -= matrix replaced.add(lora_a) replaced.add(lora_b) for wn in non_merged_names: if name.endswith(f'{wn}.weight'): lora_a = name.replace(f'{wn}.weight', f'{wn}.lora_A.weight') lora_b = name.replace(f'{wn}.weight', f'{wn}.lora_B.weight') if (lora_a in state_dict): assert (lora_b in state_dict) matrix = (transpose((state_dict[lora_b] state_dict[lora_a]), fan_in_fan_out) * scaling) assert (param.data.shape == matrix.shape), f'invalid shape: {name} {param.data.shape} != {matrix.shape}' if merge: param.data += matrix else: param.data -= matrix replaced.add(lora_a) replaced.add(lora_b) no_replaced = [k for k in state_dict.keys() if (k not in replaced)] assert (len(no_replaced) == 0), f'some lora states not loaded, check again!: {no_replaced}' model.is_merged = merge
def get_args_parser(): parser = argparse.ArgumentParser('Set transformer detector', add_help=False) parser.add_argument('--lr', default=0.0001, type=float) parser.add_argument('--lr_backbone', default=1e-05, type=float) parser.add_argument('--batch_size', default=2, type=int) parser.add_argument('--weight_decay', default=0.0001, type=float) parser.add_argument('--epochs', default=300, type=int) parser.add_argument('--lr_drop', default=200, type=int) parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm') parser.add_argument('--frozen_weights', type=str, default=None, help='Path to the pretrained model. If set, only the mask head will be trained') parser.add_argument('--backbone', default='resnet50', type=str, help='Name of the convolutional backbone to use') parser.add_argument('--dilation', action='store_true', help='If true, we replace stride with dilation in the last convolutional block (DC5)') parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help='Type of positional embedding to use on top of the image features') parser.add_argument('--enc_layers', default=6, type=int, help='Number of encoding layers in the transformer') parser.add_argument('--dec_layers', default=6, type=int, help='Number of decoding layers in the transformer') parser.add_argument('--dim_feedforward', default=2048, type=int, help='Intermediate size of the feedforward layers in the transformer blocks') parser.add_argument('--hidden_dim', default=256, type=int, help='Size of the embeddings (dimension of the transformer)') parser.add_argument('--dropout', default=0.1, type=float, help='Dropout applied in the transformer') parser.add_argument('--nheads', default=8, type=int, help="Number of attention heads inside the transformer's attentions") parser.add_argument('--num_queries', default=100, type=int, help='Number of query slots') parser.add_argument('--pre_norm', action='store_true') parser.add_argument('--masks', action='store_true', help='Train segmentation head if the flag is provided') parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', help='Disables auxiliary decoding losses (loss at each layer)') parser.add_argument('--set_cost_class', default=1, type=float, help='Class coefficient in the matching cost') parser.add_argument('--set_cost_bbox', default=5, type=float, help='L1 box coefficient in the matching cost') parser.add_argument('--set_cost_giou', default=2, type=float, help='giou box coefficient in the matching cost') parser.add_argument('--mask_loss_coef', default=1, type=float) parser.add_argument('--dice_loss_coef', default=1, type=float) parser.add_argument('--bbox_loss_coef', default=5, type=float) parser.add_argument('--giou_loss_coef', default=2, type=float) parser.add_argument('--eos_coef', default=0.1, type=float, help='Relative classification weight of the no-object class') parser.add_argument('--coco_path', type=str, default=None) parser.add_argument('--coco_panoptic_path', type=str) parser.add_argument('--remove_difficult', action='store_true') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=42, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true') parser.add_argument('--num_workers', default=2, type=int) parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--num_classes', default=(- 1), type=int, help='Number of target classes') parser.add_argument('--num_colors', default=(- 1), type=int, help='Number of target colors') parser.add_argument('--color_loss_coef', default=5, type=float) parser.add_argument('--dataset_file', default='skill') parser.add_argument('--skill_name', type=str, default='object', choices=['object', 'count', 'spatial']) parser.add_argument('--paintskills_dir', type=str, default=None) return parser
def performances(map_score_val_filename, map_score_test_filename): with open(map_score_val_filename, 'r') as file: lines = file.readlines() val_scores = [] val_labels = [] data = [] count = 0.0 num_real = 0.0 num_fake = 0.0 for line in lines: count += 1 tokens = line.split() score = float(tokens[0]) label = float(tokens[1]) val_scores.append(score) val_labels.append(label) data.append({'map_score': score, 'label': label}) if (label == 1): num_real += 1 else: num_fake += 1 (fpr, tpr, threshold) = roc_curve(val_labels, val_scores, pos_label=1) (val_err, val_threshold) = get_err_threhold(fpr, tpr, threshold) type1 = len([s for s in data if ((s['map_score'] <= val_threshold) and (s['label'] == 1))]) type2 = len([s for s in data if ((s['map_score'] > val_threshold) and (s['label'] == 0))]) val_ACC = (1 - ((type1 + type2) / count)) val_APCER = (type2 / num_fake) val_BPCER = (type1 / num_real) val_ACER = ((val_APCER + val_BPCER) / 2.0) with open(map_score_test_filename, 'r') as file2: lines = file2.readlines() test_scores = [] test_labels = [] data = [] count = 0.0 num_real = 0.0 num_fake = 0.0 for line in lines: count += 1 tokens = line.split() score = float(tokens[0]) label = float(tokens[1]) test_scores.append(score) test_labels.append(label) data.append({'map_score': score, 'label': label}) if (label == 1): num_real += 1 else: num_fake += 1 type1 = len([s for s in data if ((s['map_score'] <= val_threshold) and (s['label'] == 1))]) type2 = len([s for s in data if ((s['map_score'] > val_threshold) and (s['label'] == 0))]) test_ACC = (1 - ((type1 + type2) / count)) test_APCER = (type2 / num_fake) test_BPCER = (type1 / num_real) test_ACER = ((test_APCER + test_BPCER) / 2.0) (fpr_test, tpr_test, threshold_test) = roc_curve(test_labels, test_scores, pos_label=1) (err_test, best_test_threshold) = get_err_threhold(fpr_test, tpr_test, threshold_test) type1 = len([s for s in data if ((s['map_score'] <= best_test_threshold) and (s['label'] == 1))]) type2 = len([s for s in data if ((s['map_score'] > best_test_threshold) and (s['label'] == 0))]) test_threshold_ACC = (1 - ((type1 + type2) / count)) test_threshold_APCER = (type2 / num_fake) test_threshold_BPCER = (type1 / num_real) test_threshold_ACER = ((test_threshold_APCER + test_threshold_BPCER) / 2.0) return (val_threshold, best_test_threshold, val_ACC, val_ACER, test_ACC, test_APCER, test_BPCER, test_ACER, test_threshold_ACER)
def get_data(sess, data_dir, shards, rank, pmap, fmap, n_batch_train, n_batch_test, n_batch_init, resolution, rnd_crop): assert (resolution == (2 ** int(np.log2(resolution)))) train_file = get_tfr_file(data_dir, 'train', int(np.log2(resolution))) valid_file = get_tfr_file(data_dir, 'validation', int(np.log2(resolution))) train_itr = input_fn(train_file, shards, rank, pmap, fmap, n_batch_train, resolution, rnd_crop, True) valid_itr = input_fn(valid_file, shards, rank, pmap, fmap, n_batch_test, resolution, rnd_crop, False) data_init = make_batch(sess, train_itr, n_batch_train, n_batch_init) return (train_itr, valid_itr, data_init)
class LocallyConnected2D(ZooKerasLayer): def __init__(self, nb_filter, nb_row, nb_col, activation=None, border_mode='valid', subsample=(1, 1), dim_ordering='th', W_regularizer=None, b_regularizer=None, bias=True, input_shape=None, **kwargs): super(LocallyConnected2D, self).__init__(None, nb_filter, nb_row, nb_col, activation, border_mode, subsample, dim_ordering, W_regularizer, b_regularizer, bias, (list(input_shape) if input_shape else None), **kwargs)
class PostHostConstConverter(GraphRewriterBase): _elapsed_time('Pass PostHostConstConverter') def do_transformation(self): if (os.environ.get('DISABLE_HOSTCONST') == '1'): return self.model output_graph_def = graph_pb2.GraphDef() for node in self.model.node: new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) new_node.device = '' if ((node.op == 'Const') and (node.attr['dtype'].type in [1, 3]) and (node.name.endswith('_min') or node.name.endswith('_max') or node.name.endswith('_max_only') or node.name.endswith('_min_only'))): new_node.op = 'HostConst' output_graph_def.node.extend([new_node]) return output_graph_def
def generate_and_save_features(args: Args): makedirs(args.save_path, isfile=True) smiles = get_smiles(path=args.data_path, smiles_column=args.smiles_column) features_generator = get_features_generator(args.features_generator) temp_save_dir = (args.save_path + '_temp') if args.restart: if os.path.exists(args.save_path): os.remove(args.save_path) if os.path.exists(temp_save_dir): shutil.rmtree(temp_save_dir) else: if os.path.exists(args.save_path): raise ValueError(f'"{args.save_path}" already exists and args.restart is False.') if os.path.exists(temp_save_dir): (features, temp_num) = load_temp(temp_save_dir) if (not os.path.exists(temp_save_dir)): makedirs(temp_save_dir) (features, temp_num) = ([], 0) smiles = smiles[len(features):] if args.sequential: features_map = map(features_generator, smiles) else: features_map = Pool().imap(features_generator, smiles) temp_features = [] for (i, feats) in tqdm(enumerate(features_map), total=len(smiles)): temp_features.append(feats) if (((i > 0) and (((i + 1) % args.save_frequency) == 0)) or (i == (len(smiles) - 1))): save_features(os.path.join(temp_save_dir, f'{temp_num}.npz'), temp_features) features.extend(temp_features) temp_features = [] temp_num += 1 try: save_features(args.save_path, features) shutil.rmtree(temp_save_dir) except OverflowError: print('Features array is too large to save as a single file. Instead keeping features as a directory of files.')
def load_energy_latency2_data(HOSTS): dataset_path = (('datasets/energy_latency2_' + str(HOSTS)) + '_scheduling.csv') data = (pd.read_csv(dataset_path, header=None) if os.path.exists(dataset_path) else pd.read_csv(('scheduler/BaGTI/' + dataset_path), header=None)) data = data.values.astype(np.float) max_ips_container = max(data.max(0)[HOSTS:(2 * HOSTS)]) max_energy = data.max(0)[(3 * HOSTS)] max_response = data.max(0)[((3 * HOSTS) + 1)] dataset = [] print('Dataset size', data.shape[0]) for i in range(data.shape[0]): (cpuH, cpuC, alloc) = ([], [], []) for j in range(HOSTS): cpuH.append((data[i][j] / 100)) cpuC.append((data[i][(j + HOSTS)] / max_ips_container)) oneHot = ([0] * HOSTS) if (int(data[i][(j + (2 * HOSTS))]) >= 0): oneHot[int(data[i][(j + (2 * HOSTS))])] = 1 alloc.append(oneHot) cpuH = np.array([cpuH]).transpose() cpuC = np.array([cpuC]).transpose() alloc = np.array(alloc) pred_vals = np.broadcast_to(np.array([(data[i][(3 * HOSTS)] / max_energy), (data[i][((3 * HOSTS) + 1)] / max_response)]), (HOSTS, 2)) dataset.append((np.concatenate((cpuH, cpuC, alloc, pred_vals), axis=1), torch.Tensor([((data[i][(- 2)] - data.min(0)[(- 2)]) / (data.max(0)[(- 2)] - data.min(0)[(- 2)])), (max(0, data[i][(- 1)]) / data.max(0)[(- 1)])]))) return (dataset, len(dataset), (max_ips_container, max_energy, max_response))
def filter_invoice_items(args: argparse.Namespace, rows: typing.List) -> typing.Dict: try: import vast_pdf import dateutil from dateutil import parser except ImportError: print("\nWARNING: The 'vast_pdf' library is not present. This library is used to print invoices in PDF format. If\n you do not need this feature you can ignore this message. To get the library you should download the vast-python\n github repository. Just do ':vast-ai/vast-python.git' and then 'cd vast-python'. Once in that\n directory you can run 'vast.py' and it will have access to 'vast_pdf.py'. The library depends on a Python\n package called Borb to make the PDF files. To install this package do 'pip3 install borb'.\n") try: vast_pdf except NameError: vast_pdf = Object() vast_pdf.invoice_number = (- 1) selector_flag = '' end_timestamp: float = start_timestamp: float = 0 start_date_txt = '' end_date_txt = '' if args.end_date: try: end_date = dateutil.parser.parse(str(args.end_date)) end_date_txt = end_date.isoformat() end_timestamp = time.mktime(end_date.timetuple()) except ValueError: print('Warning: Invalid end date format! Ignoring end date!') if args.start_date: try: start_date = dateutil.parser.parse(str(args.start_date)) start_date_txt = start_date.isoformat() start_timestamp = time.mktime(start_date.timetuple()) except ValueError: print('Warning: Invalid start date format! Ignoring start date!') if args.only_charges: type_txt = 'Only showing charges.' selector_flag = 'only_charges' def type_filter_fn(row): return (True if (row['type'] == 'charge') else False) elif args.only_credits: type_txt = 'Only showing credits.' selector_flag = 'only_credits' def type_filter_fn(row): return (True if (row['type'] == 'payment') else False) else: type_txt = '' def type_filter_fn(row): return True if args.end_date: if args.start_date: header_text = f'Invoice items after {start_date_txt} and before {end_date_txt}.' else: header_text = f'Invoice items before {end_date_txt}.' elif args.start_date: header_text = f'Invoice items after {start_date_txt}.' else: header_text = ' ' header_text = ((header_text + ' ') + type_txt) rows = list(filter((lambda row: ((end_timestamp >= row['timestamp'] >= start_timestamp) and type_filter_fn(row) and (float(row['amount']) != 0))), rows)) if start_date_txt: start_date_txt = ('S:' + start_date_txt) if end_date_txt: end_date_txt = ('E:' + end_date_txt) pdf_filename_fields = list(filter((lambda fld: (False if (fld == '') else True)), [str(invoice_number), start_date_txt, end_date_txt, selector_flag])) filename = (('invoice_' + '-'.join(pdf_filename_fields)) + '.pdf') return {'rows': rows, 'header_text': header_text, 'pdf_filename': filename}
def getEntityEmbeddings(model_name, kge_model, hops): e = {} model_dir = f'../../pretrained_models/embeddings/{model_name}' entity_dict = f'{model_dir}_fbwq_full/entity_ids.del' if ('half' in hops): entity_dict = f'{model_dir}_fbwq_half/entity_ids.del' print('Loading half entity_ids.del') embedder = kge_model._entity_embedder f = open(entity_dict, 'r') for line in f: line = line[:(- 1)].split('\t') ent_id = int(line[0]) ent_name = line[1] e[ent_name] = embedder._embeddings(torch.LongTensor([ent_id]))[0] f.close() return e
def tensorboard_cb(log_path): tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_path, histogram_freq=0, write_graph=True, write_images=True, update_freq='epoch', profile_batch=2, embeddings_freq=0, embeddings_metadata=None) return tensorboard_callback
_model def tf_efficientnet_b1_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model
def main(): parser = argparse.ArgumentParser() parser.add_argument('--input', help='Input file', required=True) parser.add_argument('--output', help='Output file', required=True) args = parser.parse_args() shuffle_pc(args.input, args.output)
def softplus_inverse(t: torch.Tensor, beta: float=1.0, threshold: float=20) -> torch.Tensor: below_thresh = ((beta * t) < threshold) res = t res[below_thresh] = (torch.log(torch.expm1((beta * t[below_thresh])).clamp_min(1e-323)) / beta) return res
def log_scalar(key: str, value: float, weight: float=1, priority: int=10, round: Optional[int]=None): for agg in get_active_aggregators(): if (key not in agg): agg.add_meter(key, AverageMeter(round=round), priority) agg[key].update(value, weight)
def set_quantizer(name, mod, quantizer, k, v): quantizer_mod = getattr(mod, quantizer, None) if (quantizer_mod is not None): assert hasattr(quantizer_mod, k) setattr(quantizer_mod, k, v) else: logger.warn(f'{name} has no {quantizer}')
def concat_datasets(datasets): for split_name in datasets: if (split_name != 'train'): assert (len(datasets[split_name]) == 1), 'Do not support multiple {} datasets.'.format(split_name) datasets[split_name] = datasets[split_name][0] else: (iterable_datasets, map_datasets) = ([], []) for dataset in datasets[split_name]: if isinstance(dataset, wds.DataPipeline): logging.info("Dataset {} is IterableDataset, can't be concatenated.".format(dataset)) iterable_datasets.append(dataset) elif isinstance(dataset, IterableDataset): raise NotImplementedError('Do not support concatenation of generic IterableDataset.') else: map_datasets.append(dataset) chained_datasets = (ChainDataset(iterable_datasets) if (len(iterable_datasets) > 0) else None) concat_datasets = (ConcatDataset(map_datasets) if (len(map_datasets) > 0) else None) train_datasets = (concat_datasets, chained_datasets) train_datasets = tuple([x for x in train_datasets if (x is not None)]) train_datasets = (train_datasets[0] if (len(train_datasets) == 1) else train_datasets) datasets[split_name] = train_datasets return datasets
_module class DefaultFormatBundle(object): def __call__(self, results): if ('img' in results): img = results['img'] if (len(img.shape) < 3): img = np.expand_dims(img, (- 1)) img = np.ascontiguousarray(img.transpose(2, 0, 1)) results['img'] = DC(to_tensor(img), stack=True) for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels', 'gt_coefs', 'gt_resized_masks']: if (key not in results): continue results[key] = DC(to_tensor(results[key])) if ('gt_masks' in results): results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) if ('gt_semantic_seg' in results): results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)]), stack=True) return results def __repr__(self): return self.__class__.__name__
class Qreg(Node): def __init__(self, children): super().__init__('qreg', children, None) self.id = children[0] self.name = self.id.name self.line = self.id.line self.file = self.id.file self.index = self.id.index def to_string(self, indent): ind = (indent * ' ') print(ind, 'qreg') self.children[0].to_string((indent + 3)) def qasm(self, prec=15): return (('qreg ' + self.id.qasm(prec)) + ';')
def test_actionAngleTorus_hessian_symm(): from galpy.actionAngle import actionAngleTorus from galpy.potential import MWPotential2014 aAT = actionAngleTorus(pot=MWPotential2014) (jr, jphi, jz) = (0.075, 1.1, 0.05) h = aAT.hessianFreqs(jr, jphi, jz, tol=0.0001, nosym=True)[0] assert numpy.all((numpy.fabs(((h - h.T) / h)) < 0.03)), 'actionAngleTorus Hessian is not symmetric' return None
class AtorchCrossEntropyLoss(nn.Module): def __init__(self, ignore_index=(- 100), reduction='mean', label_smoothing=0.0, lse_square_scale=0.0, inplace_backward=False, process_group=None): super().__init__() if (reduction not in ['mean', 'none', 'sum']): raise NotImplementedError("Only support reduction = 'mean' or 'none' or 'sum'") self.ignore_index = ignore_index self.reduction = reduction self.label_smoothing = label_smoothing self.lse_square_scale = lse_square_scale self.inplace_backward = inplace_backward self.process_group = process_group def forward(self, input, target): assert (input.is_cuda and target.is_cuda), 'Only support CUDA tensors' loss = cross_entropy_loss(input, target, label_smoothing=self.label_smoothing, lse_square_scale=self.lse_square_scale, ignored_index=self.ignore_index, inplace_backward=self.inplace_backward, process_group=self.process_group) if (self.reduction == 'mean'): return (loss.sum() / (target != self.ignore_index).sum()) elif (self.reduction == 'sum'): return loss.sum() else: return loss
def save_strategy(strategy, filename): data = pickle.dumps(strategy) with open(filename, 'wb') as fp: fp.write(data)
class BeforeAfterDatasetBatches(Dataset): def __init__(self, batches, input_vars, target, mean_std_dict=None): print(' INIT CALLED ') self.batches = batches self.target = target self.input_vars = input_vars self.mean = np.stack([mean_std_dict[var]['mean'] for var in input_vars]).reshape(((- 1), 1, 1)) self.std = np.stack([mean_std_dict[var]['std'] for var in input_vars]).reshape(((- 1), 1, 1)) def __len__(self): return len(self.batches) def __getitem__(self, idx): batch = self.batches[idx] inputs = np.stack([batch[var].values for var in self.input_vars]) inputs = ((inputs - self.mean) / self.std) target = batch[self.target].values inputs = np.nan_to_num(inputs, nan=0) target = np.nan_to_num(target, nan=0) target = (target > 0) return (inputs, target)
def densenet161(pretrained=False, **kwargs): model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24)) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['densenet161'])) return model
class DABModule(nn.Module): def __init__(self, nIn, d=1, kSize=3, dkSize=3): super().__init__() self.bn_relu_1 = BNPReLU(nIn) self.conv3x3 = Conv(nIn, (nIn // 2), kSize, 1, padding=1, bn_acti=True) self.dconv3x1 = Conv((nIn // 2), (nIn // 2), (dkSize, 1), 1, padding=(1, 0), groups=(nIn // 2), bn_acti=True) self.dconv1x3 = Conv((nIn // 2), (nIn // 2), (1, dkSize), 1, padding=(0, 1), groups=(nIn // 2), bn_acti=True) self.ddconv3x1 = Conv((nIn // 2), (nIn // 2), (dkSize, 1), 1, padding=((1 * d), 0), dilation=(d, 1), groups=(nIn // 2), bn_acti=True) self.ddconv1x3 = Conv((nIn // 2), (nIn // 2), (1, dkSize), 1, padding=(0, (1 * d)), dilation=(1, d), groups=(nIn // 2), bn_acti=True) self.bn_relu_2 = BNPReLU((nIn // 2)) self.conv1x1 = Conv((nIn // 2), nIn, 1, 1, padding=0, bn_acti=False) def forward(self, input): output = self.bn_relu_1(input) output = self.conv3x3(output) br1 = self.dconv3x1(output) br1 = self.dconv1x3(br1) br2 = self.ddconv3x1(output) br2 = self.ddconv1x3(br2) output = (br1 + br2) output = self.bn_relu_2(output) output = self.conv1x1(output) return (output + input)
('/typing', methods=['GET', 'POST']) def typing(): sentence = request.values.get('sentence') words = tokenize_toolkit.run(sentence) ner_result = ner_toolkit.run(words) et_result = et_toolkit.run(ner_result) return jsonify({'words': words, 'et_result': [{'mention': words[entity['start']:entity['end']], 'start': entity['start'], 'end': entity['end'], 'type': entity['types']} for entity in et_result]})
class BuddyRobotCamera(CameraBase): def __init__(self, robot_camera: Robot, target: Union[(List, Robot)], **kwargs): self.robot_camera = robot_camera position = pyb.getLinkState(self.robot_camera, self.robot_camera.end_effector_link_id)[4] self.robot_target = None if (type(target) is Robot): self.robot_target = target target = pyb.getLinkState(self.robot_target, self.robot_target.end_effector_link_id)[4] elif (type(target) is List): pass else: raise ValueError(f'target is of type: {type(target)}, supported types are {Robot} and {List}') super().__init__(position=position, target=target, **kwargs) def _adapt_to_environment(self): (self.pos, camera_orientation) = pyb.getLinkState(self.robot_camera, self.robot_camera.end_effector_link_id)[4:6] self.target = pyb.getLinkState(self.robot_target, self.robot_target.end_effector_link_id)[4] self.camera_args['up_vector'] = directionalVectorsFromQuaternion(camera_orientation)[0] return super()._adapt_to_environment()
class MSDBaseBlock(nn.Module): def __init__(self, in_channels, out_channels, stride, use_bottleneck, bottleneck_factor): super(MSDBaseBlock, self).__init__() self.use_bottleneck = use_bottleneck mid_channels = (min(in_channels, (bottleneck_factor * out_channels)) if use_bottleneck else in_channels) if self.use_bottleneck: self.bn_conv = conv1x1_block(in_channels=in_channels, out_channels=mid_channels) self.conv = conv3x3_block(in_channels=mid_channels, out_channels=out_channels, stride=stride) def forward(self, x): if self.use_bottleneck: x = self.bn_conv(x) x = self.conv(x) return x
class Brightness(object): def __init__(self, var): self.var = var def __call__(self, img): gs = img.new().resize_as_(img).zero_() alpha = random.uniform((- self.var), self.var) return img.lerp(gs, alpha)
class ResidualConvUnit(nn.Module): def __init__(self, features): super().__init__() self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.relu = nn.ReLU(inplace=True) def forward(self, x): out = self.relu(x) out = self.conv1(out) out = self.relu(out) out = self.conv2(out) return (out + x)
class SwitchOptimizer(GraphRewriterBase): _elapsed_time('Pass SwitchOptimizer') def do_transformation(self): cur_graph = GraphAnalyzer() cur_graph.graph = self.model graph_info = cur_graph.parse_graph() target_nodes = cur_graph.query_fusion_pattern_nodes([['Switch']]) for node_combination in target_nodes: switch_node = graph_info[node_combination[0]].node pred_node = graph_info[switch_node.input[1]].node if (((pred_node.op == 'Const') and tensor_util.MakeNdarray(graph_info[pred_node.name].node.attr['value'].tensor)) or ((pred_node.op == 'PlaceholderWithDefault') and tensor_util.MakeNdarray(graph_info[pred_node.input[0]].node.attr['value'].tensor))): condition = [] for output in graph_info[node_combination[0]].outputs: successor_node = graph_info[output].node for (index, value) in enumerate(successor_node.input): if (value == (node_combination[0] + ':1')): condition.append(True) elif (value == (node_combination[0] + ':0')): condition.append(False) if (not all(condition)): continue for output in graph_info[node_combination[0]].outputs: successor_node = graph_info[output].node replace_index = None for (index, value) in enumerate(successor_node.input): if (value == (node_combination[0] + ':1')): replace_index = index break if (not replace_index): break successor_node.input[replace_index] = switch_node.input[0] switch_node_outputs = list(graph_info[node_combination[0]].outputs) if (switch_node_outputs.index(output) == (len(switch_node_outputs) - 1)): cur_graph.remove_node_with_single_input_output(node_combination[0]) else: continue return cur_graph.dump_graph()
def test_graph_gnp_fast(): n_v = 100 g = graph_Gnp_fast(n_v, 0.5) max_n_e = ((n_v * (n_v - 1)) // 2) assert (pytest.approx((g.num_e / max_n_e), 0.05) == 0.5)
def init_tf_weights(pfile, spath, model): PARAMSFILE = pfile SAVERPATH = spath if (not tf.gfile.Exists(SAVERPATH)): tf.gfile.MakeDirs(SAVERPATH) with tf.Session() as sess: saver = tf.train.Saver() model.load_weights(sess, PARAMSFILE) saver.save(sess, os.path.join(SAVERPATH, 'vnect_tf'))
def move_optimizer_to_cuda(optimizer): for param_group in optimizer.param_groups: for param in param_group['params']: if param.is_cuda: param_state = optimizer.state[param] for k in param_state.keys(): if isinstance(param_state[k], torch.Tensor): param_state[k] = param_state[k].cuda(device=param.get_device())
def torch_unique_consecutive(input, **kwargs): output = torch.unique_consecutive(torch.zeros_like(input, device='cpu'), **kwargs) if isinstance(output, torch.Tensor): return output.to('meta') else: return tuple(map(output, (lambda x: x.to('meta'))))
def mutate(chrom, mchance, msigma): return [((x + random.gauss(0, msigma)) if (random.randrange(mchance) == 0) else x) for x in chrom]
class NoAugment(nn.Module): def __init__(self): super(NoAugment, self).__init__() def forward(self, input): return input
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}')) logger.info(f'Training/evaluation parameters {training_args}') if ((data_args.source_prefix is None) and (model_args.model_name_or_path in ['t5-small', 't5-base', 't5-large', 't5-3b', 't5-11b'])): logger.warning("You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with `--source_prefix 'summarize: ' `") last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') set_seed(training_args.seed) if (data_args.dataset_name is not None): raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) else: data_files = {} if (data_args.train_file is not None): data_files['train'] = data_args.train_file extension = data_args.train_file.split('.')[(- 1)] if (data_args.validation_file is not None): data_files['validation'] = data_args.validation_file extension = data_args.validation_file.split('.')[(- 1)] if (data_args.test_file is not None): data_files['test'] = data_args.test_file extension = data_args.test_file.split('.')[(- 1)] raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) model.resize_token_embeddings(len(tokenizer)) if (model.config.decoder_start_token_id is None): raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined') if (hasattr(model.config, 'max_position_embeddings') and (model.config.max_position_embeddings < data_args.max_source_length)): if (model_args.resize_position_embeddings is None): logger.warning(f"Increasing the model's number of position embedding vectors from {model.config.max_position_embeddings} to {data_args.max_source_length}.") model.resize_position_embeddings(data_args.max_source_length) elif model_args.resize_position_embeddings: model.resize_position_embeddings(data_args.max_source_length) else: raise ValueError(f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has {model.config.max_position_embeddings} position encodings. Consider either reducing `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the model's position encodings by passing `--resize_position_embeddings`.") prefix = (data_args.source_prefix if (data_args.source_prefix is not None) else '') if training_args.do_train: column_names = raw_datasets['train'].column_names elif training_args.do_eval: column_names = raw_datasets['validation'].column_names elif training_args.do_predict: column_names = raw_datasets['test'].column_names else: logger.info('There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.') return dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None) if (data_args.text_column is None): text_column = (dataset_columns[0] if (dataset_columns is not None) else column_names[0]) else: text_column = data_args.text_column if (text_column not in column_names): raise ValueError(f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}") if (data_args.summary_column is None): summary_column = (dataset_columns[1] if (dataset_columns is not None) else column_names[1]) else: summary_column = data_args.summary_column if (summary_column not in column_names): raise ValueError(f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}") max_target_length = data_args.max_target_length padding = ('max_length' if data_args.pad_to_max_length else False) if ((training_args.label_smoothing_factor > 0) and (not hasattr(model, 'prepare_decoder_input_ids_from_labels'))): logger.warning(f'label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory') def preprocess_function(examples): (inputs, targets) = ([], []) for i in range(len(examples[text_column])): if ((examples[text_column][i] is not None) and (examples[summary_column][i] is not None)): inputs.append(examples[text_column][i]) targets.append(examples[summary_column][i]) inputs = [(prefix + inp) for inp in inputs] model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) with tokenizer.as_target_tokenizer(): labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True) if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss): labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']] model_inputs['labels'] = labels['input_ids'] return model_inputs if training_args.do_train: if ('train' not in raw_datasets): raise ValueError('--do_train requires a train dataset') train_dataset = raw_datasets['train'] if (data_args.max_train_samples is not None): train_dataset = train_dataset.select(range(data_args.max_train_samples)) with training_args.main_process_first(desc='train dataset map pre-processing'): train_dataset = train_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset') if training_args.do_eval: max_target_length = data_args.val_max_target_length if ('validation' not in raw_datasets): raise ValueError('--do_eval requires a validation dataset') eval_dataset = raw_datasets['validation'] if (data_args.max_eval_samples is not None): eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) with training_args.main_process_first(desc='validation dataset map pre-processing'): eval_dataset = eval_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset') if training_args.do_predict: max_target_length = data_args.val_max_target_length if ('test' not in raw_datasets): raise ValueError('--do_predict requires a test dataset') predict_dataset = raw_datasets['test'] if (data_args.max_predict_samples is not None): predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) with training_args.main_process_first(desc='prediction dataset map pre-processing'): predict_dataset = predict_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset') label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id) data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None)) metric = load_metric('rouge') def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [label.strip() for label in labels] preds = ['\n'.join(nltk.sent_tokenize(pred)) for pred in preds] labels = ['\n'.join(nltk.sent_tokenize(label)) for label in labels] return (preds, labels) def compute_metrics(eval_preds): (preds, labels) = eval_preds if isinstance(preds, tuple): preds = preds[0] decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) if data_args.ignore_pad_token_for_loss: labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) (decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels) result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) result = {key: (value.mid.fmeasure * 100) for (key, value) in result.items()} prediction_lens = [np.count_nonzero((pred != tokenizer.pad_token_id)) for pred in preds] result['gen_len'] = np.mean(prediction_lens) result = {k: round(v, 4) for (k, v) in result.items()} return result trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if training_args.predict_with_generate else None)) max_length = (training_args.generation_max_length if (training_args.generation_max_length is not None) else data_args.val_max_target_length) num_beams = (data_args.num_beams if (data_args.num_beams is not None) else training_args.generation_num_beams) eval_dataloader = trainer.get_eval_dataloader() if (eval_dataloader.batch_size is None): def _build_inc_dataloader(dataloader): class INCDataLoader(): __iter__ = dataloader.__iter__ def __init__(self) -> None: self.dataloader = dataloader self.batch_size = dataloader.total_batch_size return INCDataLoader() eval_dataloader = _build_inc_dataloader(eval_dataloader) batch_size = eval_dataloader.batch_size def eval_func_for_nc(model): trainer.model = model results = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix='eval') perplexity = math.exp(results['eval_rougeLsum']) results = {'perplexity': perplexity, 'eval_rougeLsum': results['eval_rougeLsum'], 'eval_samples_per_second': results['eval_samples_per_second']} sum_task_metrics_keys = ['perplexity', 'eval_rougeLsum'] for key in sum_task_metrics_keys: if (key in results.keys()): logger.info('Finally Eval {}:{}'.format(key, results[key])) if (key == 'eval_rougeLsum'): eval_rougeLsum = results[key] break print(('Accuracy: %.5f' % eval_rougeLsum)) print(('Throughput: %.3f samples/sec' % results['eval_samples_per_second'])) print(('Latency: %.3f ms' % ((1 * 1000) / results['eval_samples_per_second']))) print(('Batch size = %d' % training_args.per_device_eval_batch_size)) return eval_rougeLsum if model_args.tune: from neural_compressor.config import PostTrainingQuantConfig from neural_compressor import quantization conf = PostTrainingQuantConfig(approach='dynamic') q_model = quantization.fit(model, conf, calib_dataloader=eval_dataloader, eval_func=eval_func_for_nc) q_model.save(training_args.output_dir) exit(0) if (model_args.performance or model_args.accuracy): if model_args.int8: from neural_compressor.utils.pytorch import load new_model = load(os.path.abspath(os.path.expanduser(training_args.output_dir)), model) else: new_model = model if model_args.performance: from neural_compressor.config import BenchmarkConfig from neural_compressor import benchmark b_conf = BenchmarkConfig(warmup=5, iteration=100, cores_per_instance=4, num_of_instance=1) benchmark.fit(new_model, b_conf, b_dataloader=eval_dataloader) else: eval_func_for_nc(new_model) exit(0) if training_args.do_train: checkpoint = None if (training_args.resume_from_checkpoint is not None): checkpoint = training_args.resume_from_checkpoint elif (last_checkpoint is not None): checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() metrics = train_result.metrics max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset)) metrics['train_samples'] = min(max_train_samples, len(train_dataset)) trainer.log_metrics('train', metrics) trainer.save_metrics('train', metrics) trainer.save_state() results = {} max_length = (training_args.generation_max_length if (training_args.generation_max_length is not None) else data_args.val_max_target_length) num_beams = (data_args.num_beams if (data_args.num_beams is not None) else training_args.generation_num_beams) if training_args.do_eval: logger.info('*** Evaluate ***') metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix='eval') max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset)) metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics('eval', metrics) trainer.save_metrics('eval', metrics) if training_args.do_predict: logger.info('*** Predict ***') predict_results = trainer.predict(predict_dataset, metric_key_prefix='predict', max_length=max_length, num_beams=num_beams) metrics = predict_results.metrics max_predict_samples = (data_args.max_predict_samples if (data_args.max_predict_samples is not None) else len(predict_dataset)) metrics['predict_samples'] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics('predict', metrics) trainer.save_metrics('predict', metrics) if trainer.is_world_process_zero(): if training_args.predict_with_generate: predictions = tokenizer.batch_decode(predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True) predictions = [pred.strip() for pred in predictions] output_prediction_file = os.path.join(training_args.output_dir, 'generated_predictions.txt') with open(output_prediction_file, 'w') as writer: writer.write('\n'.join(predictions)) kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'summarization'} if (data_args.dataset_name is not None): kwargs['dataset_tags'] = data_args.dataset_name if (data_args.dataset_config_name is not None): kwargs['dataset_args'] = data_args.dataset_config_name kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}' else: kwargs['dataset'] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) return results
def set_exec_schedule(line_: str) -> None: usage = f"Usage: %flow schedule [{'|'.join((schedule.value for schedule in ExecutionSchedule))}]" if line_.startswith('liveness'): schedule = ExecutionSchedule.LIVENESS_BASED elif line_.startswith('dag'): schedule = ExecutionSchedule.DAG_BASED elif line_.startswith('hybrid'): schedule = ExecutionSchedule.HYBRID_DAG_LIVENESS_BASED else: warn(usage) return flow().mut_settings.exec_schedule = schedule
def draw_fig_1(cnndm_peg, xsum_peg, cnndm_bart, xsum_bart): fig = plt.figure(figsize=(FIG_SIZE_x, ysize_fig1)) spec2 = gridspec.GridSpec(ncols=2, nrows=2, figure=fig) plot_fig1_single(fig, spec2[(0, 0)], dir=dir_datadrive, spec_name=cnndm_peg, SEPS=10, x_ticklabel_vis=False, y_ticklabel_vis=False, ylim=3000, title='PEGASUS') plot_fig1_single(fig, spec2[(0, 1)], dir=dir_datadrive, spec_name=cnndm_bart, SEPS=10, y_ticklabel_vis=False, x_ticklabel_vis=False, ylim=3000, showLegend=True, title='BART') plot_fig1_single(fig, spec2[(1, 0)], dir=dir_datadrive, spec_name=xsum_peg, SEPS=10, y_ticklabel_vis=False, ylim=1200) plot_fig1_single(fig, spec2[(1, 1)], dir=dir_datadrive, spec_name=xsum_bart, SEPS=10, y_namelabel_vis=False, y_ticklabel_vis=False, ylim=1200) fig.tight_layout() plt.savefig(f'x_entropy-y_bigram-grid.pdf', dpi=dpi) plt.show() plt.close()
def print_ante_distance_stats(results): for distance in sorted(results['ante_dist_stats']): print('ante distance {0} :'.format(distance)) for category in sorted(results['ante_dist_stats'][distance]): total = results['ante_dist_stats'][distance][category] if total: print('{} {} '.format(category, total))
_cleanup class Sinusoidal(GOModel): bounds = [[0, (2 * np.pi)]] xmax = 3. def _f(x): return (- np.ravel((np.cos(x) + np.sin((3 * x)))))
def gen_svm_nodearray(xi, feature_max=None, isKernel=None): if isinstance(xi, dict): index_range = xi.keys() elif isinstance(xi, (list, tuple)): if (not isKernel): xi = ([0] + xi) index_range = range(len(xi)) else: raise TypeError('xi should be a dictionary, list or tuple') if feature_max: assert isinstance(feature_max, int) index_range = list(filter((lambda j: (j <= feature_max)), index_range)) if (not isKernel): index_range = list(filter((lambda j: (xi[j] != 0)), index_range)) index_range = sorted(index_range) ret = (svm_node * (len(index_range) + 1))() ret[(- 1)].index = (- 1) for (idx, j) in enumerate(index_range): ret[idx].index = j ret[idx].value = xi[j] max_idx = 0 if index_range: max_idx = index_range[(- 1)] return (ret, max_idx)
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() last_checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): last_checkpoint = get_last_checkpoint(training_args.output_dir) if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif (last_checkpoint is not None): logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)) logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}')) if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f'Training/evaluation parameters {training_args}') set_seed(training_args.seed) datasets = load_dataset(os.path.abspath(funsd.__file__)) if training_args.do_train: column_names = datasets['train'].column_names features = datasets['train'].features else: column_names = datasets['test'].column_names features = datasets['test'].features text_column_name = ('tokens' if ('tokens' in column_names) else column_names[0]) label_column_name = (f'{data_args.task_name}_tags' if (f'{data_args.task_name}_tags' in column_names) else column_names[1]) remove_columns = column_names def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = (unique_labels | set(label)) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets['train'][label_column_name]) label_to_id = {l: i for (i, l) in enumerate(label_list)} num_labels = len(label_list) config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) model = AutoModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) if (not isinstance(tokenizer, PreTrainedTokenizerFast)): raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at to find the model types that meet this requirement') padding = ('max_length' if data_args.pad_to_max_length else False) def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer(examples[text_column_name], padding=padding, truncation=True, return_overflowing_tokens=True, is_split_into_words=True) labels = [] bboxes = [] images = [] for batch_index in range(len(tokenized_inputs['input_ids'])): word_ids = tokenized_inputs.word_ids(batch_index=batch_index) org_batch_index = tokenized_inputs['overflow_to_sample_mapping'][batch_index] label = examples[label_column_name][org_batch_index] bbox = examples['bboxes'][org_batch_index] image = examples['image'][org_batch_index] previous_word_idx = None label_ids = [] bbox_inputs = [] for word_idx in word_ids: if (word_idx is None): label_ids.append((- 100)) bbox_inputs.append([0, 0, 0, 0]) elif (word_idx != previous_word_idx): label_ids.append(label_to_id[label[word_idx]]) bbox_inputs.append(bbox[word_idx]) else: label_ids.append((label_to_id[label[word_idx]] if data_args.label_all_tokens else (- 100))) bbox_inputs.append(bbox[word_idx]) previous_word_idx = word_idx labels.append(label_ids) bboxes.append(bbox_inputs) images.append(image) tokenized_inputs['labels'] = labels tokenized_inputs['bbox'] = bboxes tokenized_inputs['image'] = images return tokenized_inputs if training_args.do_train: if ('train' not in datasets): raise ValueError('--do_train requires a train dataset') train_dataset = datasets['train'] if (data_args.max_train_samples is not None): train_dataset = train_dataset.select(range(data_args.max_train_samples)) train_dataset = train_dataset.map(tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache)) if training_args.do_eval: if ('test' not in datasets): raise ValueError('--do_eval requires a test dataset') test_dataset = datasets['test'] if (data_args.max_test_samples is not None): test_dataset = test_dataset.select(range(data_args.max_test_samples)) test_dataset = test_dataset.map(tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache)) data_collator = DataCollatorForKeyValueExtraction(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None), padding=padding, max_length=512) metric = load_metric('seqeval') def compute_metrics(p): (predictions, labels) = p predictions = np.argmax(predictions, axis=2) true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)] true_labels = [[label_list[l] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: final_results = {} for (key, value) in results.items(): if isinstance(value, dict): for (n, v) in value.items(): final_results[f'{key}_{n}'] = v else: final_results[key] = value return final_results else: return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']} trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(test_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics) if training_args.do_train: checkpoint = (last_checkpoint if last_checkpoint else None) train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset)) metrics['train_samples'] = min(max_train_samples, len(train_dataset)) trainer.log_metrics('train', metrics) trainer.save_metrics('train', metrics) trainer.save_state() if training_args.do_eval: from model import ORTModel def eval_func(model): logger.info('*** Evaluate ***') ort_model = ORTModel(model, compute_metrics=compute_metrics) outputs = ort_model.evaluation_loop(test_dataset) return outputs.metrics['f1'] if model_args.tune: from onnxruntime.transformers import optimizer from onnxruntime.transformers.fusion_options import FusionOptions opt_options = FusionOptions('bert') model_optimizer = optimizer.optimize_model(model_args.input_model, 'bert', num_heads=12, hidden_size=768, optimization_options=opt_options) onnx_model = model_optimizer.model try: onnxruntime.InferenceSession(onnx_model.SerializeToString(), providers=onnxruntime.get_available_providers()) except Exception as e: logger.warning('Optimized model is invalid: {}. '.format(e)) logger.warning('Model optimizer will be skipped. Try to upgrade onnxruntime to avoid this error') onnx_model = onnx.load(model_args.input_model) from neural_compressor import quantization, PostTrainingQuantConfig from neural_compressor.utils.constant import FP32 calib_dataset = IncDataset(test_dataset, onnx_model) fp32_op_names = ['Attention_(0|3|4)', '/layoutlm/embeddings/Add(_(2|4|6)|)', '.*?_position_embeddings.*?'] config = PostTrainingQuantConfig(approach='static', quant_format=model_args.quant_format, op_name_dict={op_name: FP32 for op_name in fp32_op_names}) q_model = quantization.fit(onnx_model, config, eval_func=eval_func, calib_dataloader=DataLoader(framework='onnxruntime', dataset=calib_dataset, batch_size=1)) q_model.save(model_args.save_path) if model_args.benchmark: onnx_model = onnx.load(model_args.input_model) if (model_args.mode == 'performance'): from neural_compressor.benchmark import fit from neural_compressor.config import BenchmarkConfig b_dataset = IncDataset(test_dataset, onnx_model) conf = BenchmarkConfig(iteration=100, cores_per_instance=28, num_of_instance=1) b_dataloader = DataLoader(framework='onnxruntime', dataset=b_dataset, batch_size=model_args.batch_size) fit(onnx_model, conf, b_dataloader=b_dataloader) elif (model_args.mode == 'accuracy'): eval_f1 = eval_func(onnx_model) print(('Batch size = %d' % model_args.batch_size)) print(('Accuracy: %.5f' % eval_f1))
class SplitUnlabeledWrapper(_SSLDatasetWrapper): def __init__(self, dataset, sublabeled_prefix, ignore_unlabeled=False): super(SplitUnlabeledWrapper, self).__init__() self.dataset = dataset self.sublabeled_prefix = sublabeled_prefix self.ignore_unlabeled = ignore_unlabeled self._split_labeled() def __len__(self): return self.dataset.__len__() def __getitem__(self, idx): return self.dataset.__getitem__(idx) def _split_labeled(self): (labeled_list, unlabeled_list) = ([], []) for img in self.dataset.sample_list: is_labeled = False for (pdx, prefix) in enumerate(self.sublabeled_prefix): if img.startswith(prefix): labeled_list.append(img) is_labeled = True break if (not is_labeled): unlabeled_list.append(img) (labeled_size, unlabeled_size) = (len(labeled_list), len(unlabeled_list)) assert ((labeled_size + unlabeled_size) == len(self.dataset.sample_list)) if self.ignore_unlabeled: self.dataset.sample_list = labeled_list self.dataset.idxs = [_ for _ in range(0, len(self.dataset.sample_list))] self.labeled_idxs = self.dataset.idxs self.unlabeled_idxs = [] else: self.dataset.sample_list = (labeled_list + unlabeled_list) self.dataset.idxs = [_ for _ in range(0, len(self.dataset.sample_list))] self.labeled_idxs = [_ for _ in range(0, labeled_size)] self.unlabeled_idxs = [(_ + labeled_size) for _ in range(0, unlabeled_size)]
def scanLineForCreate(suite, lineNo, line): if create_re.search(line): addSuiteCreateDestroy(suite, 'create', lineNo)
class EnvAgent(): initial_position = attrib(type=Tuple[(int, int)]) initial_direction = attrib(type=Grid4TransitionsEnum) direction = attrib(type=Grid4TransitionsEnum) target = attrib(type=Tuple[(int, int)]) moving = attrib(default=False, type=bool) speed_data = attrib(default=Factory((lambda : dict({'position_fraction': 0.0, 'speed': 1.0, 'transition_action_on_cellexit': 0})))) malfunction_data = attrib(default=Factory((lambda : dict({'malfunction': 0, 'malfunction_rate': 0, 'next_malfunction': 0, 'nr_malfunctions': 0, 'moving_before_malfunction': False})))) handle = attrib(default=None) status = attrib(default=RailAgentStatus.READY_TO_DEPART, type=RailAgentStatus) position = attrib(default=None, type=Optional[Tuple[(int, int)]]) old_direction = attrib(default=None) old_position = attrib(default=None) def reset(self): self.position = None self.direction = self.initial_direction self.status = RailAgentStatus.READY_TO_DEPART self.old_position = None self.old_direction = None self.moving = False self.speed_data['position_fraction'] = 0.0 self.speed_data['transition_action_on_cellexit'] = 0.0 self.malfunction_data['malfunction'] = 0 self.malfunction_data['nr_malfunctions'] = 0 self.malfunction_data['moving_before_malfunction'] = False def to_agent(self) -> Agent: return Agent(initial_position=self.initial_position, initial_direction=self.initial_direction, direction=self.direction, target=self.target, moving=self.moving, speed_data=self.speed_data, malfunction_data=self.malfunction_data, handle=self.handle, status=self.status, position=self.position, old_direction=self.old_direction, old_position=self.old_position) def from_schedule(cls, schedule: Schedule): speed_datas = [] for i in range(len(schedule.agent_positions)): speed_datas.append({'position_fraction': 0.0, 'speed': (schedule.agent_speeds[i] if (schedule.agent_speeds is not None) else 1.0), 'transition_action_on_cellexit': 0}) malfunction_datas = [] for i in range(len(schedule.agent_positions)): malfunction_datas.append({'malfunction': 0, 'malfunction_rate': (schedule.agent_malfunction_rates[i] if (schedule.agent_malfunction_rates is not None) else 0.0), 'next_malfunction': 0, 'nr_malfunctions': 0}) return list(starmap(EnvAgent, zip(schedule.agent_positions, schedule.agent_directions, schedule.agent_directions, schedule.agent_targets, ([False] * len(schedule.agent_positions)), speed_datas, malfunction_datas, range(len(schedule.agent_positions))))) def load_legacy_static_agent(cls, static_agents_data: Tuple): agents = [] for (i, static_agent) in enumerate(static_agents_data): if (len(static_agent) >= 6): agent = EnvAgent(initial_position=static_agent[0], initial_direction=static_agent[1], direction=static_agent[1], target=static_agent[2], moving=static_agent[3], speed_data=static_agent[4], malfunction_data=static_agent[5], handle=i) else: agent = EnvAgent(initial_position=static_agent[0], initial_direction=static_agent[1], direction=static_agent[1], target=static_agent[2], moving=False, speed_data={'speed': 1.0, 'position_fraction': 0.0, 'transition_action_on_cell_exit': 0.0}, malfunction_data={'malfunction': 0, 'nr_malfunctions': 0, 'moving_before_malfunction': False}, handle=i) agents.append(agent) return agents
class Hypothesis(object): def __init__(self, state_dict, seq, score): self.state_dict = state_dict self.seq = seq self.score = score def is_completed(self): if (self.seq[(- 1)] == EOS): return True return False def __len__(self): return len(self.seq)
def update(rate=None): global _update_lasttime if rate: now = time.time() pauseLength = ((1 / rate) - (now - _update_lasttime)) if (pauseLength > 0): time.sleep(pauseLength) _update_lasttime = (now + pauseLength) else: _update_lasttime = now _root.update()
class autoencoder_vgg1(nn.Module): def __init__(self): super(autoencoder_vgg1, self).__init__() self.encoder = nn.Sequential(nn.Conv2d(3, 16, 3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(16, 32, 3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(32, 64, 7)) self.decoder = nn.Sequential(nn.ConvTranspose2d(64, 32, 7), nn.ReLU(), nn.ConvTranspose2d(32, 16, 3, stride=2, padding=1, output_padding=1), nn.ReLU(), nn.ConvTranspose2d(16, 3, 3, stride=2, padding=1, output_padding=1), nn.Tanh()) def forward(self, x): encode = self.encoder(x) decode = self.decoder(encode) return (encode, decode)
def destroy_parallel_group(destroy_rpc=True): if ((_DistributedContext.PIPE_RPC_INIT == 1) and destroy_rpc): _destroy_pippy_rpc_network() if (_DistributedContext.PARALLEL_GROUPS_AND_RANKS is not None): for gnr in _DistributedContext.PARALLEL_GROUPS_AND_RANKS.values(): for (group, _) in gnr: dist.destroy_process_group(group) _DistributedContext.PARALLEL_GROUP_SIZE = None _DistributedContext.PARALLEL_RANK = None _DistributedContext.PARALLEL_GROUP = None _DistributedContext.PARALLEL_GROUPS_AND_RANKS = None _DistributedContext.PARALLEL_CONFIG = None _DistributedContext.PARALLEL_INSTANCE_NUM = None _DistributedContext.PARALLEL_INSTANCE_INDEX = None
def main(): filenames = ParseArguments(sys.argv[1:]) sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace') _cpplint_state.ResetErrorCounts() for filename in filenames: ProcessFile(filename, _cpplint_state.verbose_level) _cpplint_state.PrintErrorCounts() sys.exit((_cpplint_state.error_count > 0))
.parametrize('argv,expected', [('', {}), ('run', {'COMMAND': 'run'}), ('with 1 2', {'with': True, 'UPDATE': ['1', '2']}), ('evaluate', {'COMMAND': 'evaluate'}), ('help', {'help': True}), ('help evaluate', {'help': True, 'COMMAND': 'evaluate'}), ('-h', {'--help': True}), ('--help', {'--help': True}), ('-m foo', {'--mongo_db': 'foo'}), ('--mongo_db=bar', {'--mongo_db': 'bar'}), ('-l 10', {'--loglevel': '10'}), ('--loglevel=30', {'--loglevel': '30'}), ('--force', {'--force': True})]) def test_parse_individual_arguments(argv, expected): args = parse_args((['test_prog.py'] + shlex.split(argv)), print_help=False) plain = parse_args(['test_prog.py'], print_help=False) plain.update(expected) assert (args == plain)